blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c04b93099c88eb2c8cfcb1ac710a1df24727bce | 6e74881361ffb2d76a28c7f52e4d4031f86c89ad | /0330/Code05-08.py | 2a1eb5e4f7e465eb5841b1b2a217aac29954e1a9 | [] | no_license | YoungSeok-Choi/Open-Source-Basic-Project | f5d09899ce9b228e5d6f5e10dcb3a9561acad9c0 | 8d3269ce30ea241c95cb3df41459b51282e7ce5e | refs/heads/master | 2022-10-23T13:23:37.832363 | 2020-06-15T06:02:47 | 2020-06-15T06:02:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | score = int(input("점수를 입력하세요 : "))
if score >= 90 :
print("A")
elif score >= 80 :
print("B")
elif score >= 70 :
print("C")
elif score >= 60 :
print("D")
else :
print("F")
print("학점입니다. ^^")
| [
"58320695+dydtjr1515@users.noreply.github.com"
] | 58320695+dydtjr1515@users.noreply.github.com |
61958a15003fa27f422fe61169705ba3de6d4016 | 5f247136f6d937998fd85e48a2bcaf34f80aeee0 | /storeinfo/spiders/c1_towncaredental.py | def94e4073a3b0f991684342edcb6b7eadbdb9a4 | [
"BSD-3-Clause"
] | permissive | mikeym88/Store-Information-Crawler | 9e631429887d3bcb056fe5c634f0f6702701dba2 | 7290acc793a6cf64edd5b95146eb9e2cdf3527e2 | refs/heads/master | 2023-07-15T02:15:38.512530 | 2021-08-28T17:07:54 | 2021-08-28T17:07:54 | 275,984,664 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | from __future__ import unicode_literals
import scrapy
from scrapy.spiders import Spider
from scrapy.http import Request
from storeinfo.items import StoreItem
class towncaredentalSpider(scrapy.Spider):
name = "towncaredental"
start_urls = ['https://www.towncaredental.com/locations/']
def parse(self, response):
stores = response.xpath('//li[@data-id]')
for store in stores:
item = StoreItem()
item['store_name'] = store.xpath('.//h3/a/span/text()').extract_first().strip().replace("\u00a0", " ")
address = store.xpath('.//*[@class="address"]/text()').getall()
item['address'] = ', '.join(address[0:-1]).strip()
city_state_zip_info = address[-1].split(',')
item['city'] = city_state_zip_info[0]
item['state'] = city_state_zip_info[1].split()[0]
item['zip_code'] = city_state_zip_info[1].split()[1]
item['country'] = 'US'
item['phone_number'] = store.xpath('.//*[@class="contact"]/a/text()').extract_first()
yield item
| [
"mikey.x88@gmail.com"
] | mikey.x88@gmail.com |
a60a5159a109716744ac0cbd30f38ff6674cdbdc | c803866a4c71442320ef5a9cfe48797af94c5e61 | /app/auth/views.py | e7a4971f92a7d43c4049ae8699008a0942149842 | [] | no_license | ombima452/pitchcave | 6c451a53752da54116ca6033ed9b11b1e0638f85 | 89c6696ff423fe0e63011d527423880e9c54edb9 | refs/heads/master | 2022-09-19T18:22:50.893232 | 2019-10-01T05:59:11 | 2019-10-01T05:59:11 | 208,234,848 | 0 | 0 | null | 2022-09-16T18:09:45 | 2019-09-13T09:34:52 | Python | UTF-8 | Python | false | false | 1,382 | py | from flask import render_template, redirect, url_for, flash, request
from . import auth
from ..models import User
from .forms import RegistrationForm, LoginForm
from .. import db
from flask_login import login_user, login_required, logout_user
@auth.route('/login',methods=['GET','POST'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email = login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "Pitchcave Login"
return render_template('auth/login.html',login_form = login_form,title=title)
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,password = form.password.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('auth.login'))
title = "New"
return render_template('auth/register.html',registration_form = form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index")) | [
"christineombima452@gmail"
] | christineombima452@gmail |
e8eeb9fdc04bea633a0a29177b1beb4d70dbf976 | 07c267e23c7bdf1b991e58f60da961d7190dc076 | /python/SSH_CLI_automation.py | 1f095d3d89e65ad795aff8757a656abaed7eee5c | [] | no_license | rdm750/rdm750.github.io | 5817ee07761a992244786e739f1effe66abe4e1b | ce8bab6bf24a3ccda1613449ef6a6b87bbb613cc | refs/heads/master | 2020-04-15T13:36:05.212316 | 2018-11-03T02:04:22 | 2018-11-03T02:04:22 | 58,693,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | import paramiko
import sys
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect('127.0.0.1', username= sys.argv[1],
password = sys.argv[2])
list_of_cmds = ['date','pwd','uptime','uname']
test2=["ps -ef | awk '{print $1,$2,$3}' ",'iostat','ifconfig','lpq','netstat -rn','arp -an']
for cmd in list_of_cmds:
stdin, stdout, stderr = ssh.exec_command(cmd)
print stdout.readlines()
| [
"rdm750"
] | rdm750 |
b4f1da558005169fa15bd764349dd3cb1fe14b66 | c3162bbdc94acbe16ecd2fffb4b163e398e7f640 | /Phonebook_Project/LRCphonebook_func.py | a1d8e2da1fbef93f250eac8d3d7d9d49ecf202f6 | [] | no_license | lenniecottrell/Python-Projects | e8496277f0fffa34f892563ddc3cd6408a11451b | 4a2e84a97bd87ea23741a24913c21c18e05357ea | refs/heads/master | 2023-02-13T23:35:01.975359 | 2021-01-08T19:57:36 | 2021-01-08T19:57:36 | 277,906,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,476 | py |
import os
from tkinter import *
import tkinter as tk
import sqlite3
#import other modules to access them
import LRCphonebook_main
import LRCphonebook_gui
def center_window(self, w, h): # pass in the tkinter frame (master) reference and the w and h
#get user's screen width and height
screen_width = self.master.winfo_screenwidth()
screen_height = self.master.winfo_screenheight()
# calculate x and y coordinates to paint the app centered on the user's screen
x = int((screen_width/2) - (w/2))
y = int((screen_height/2) - (h/2))
centerGeo = self.master.geometry('{}x{}+{}+{}'.format(w, h, x, y))
return centerGeo
# catch if the user clicks on the window's upper-right 'x' to double check they want to close
def ask_quit(self):
if messagebox.askokcancel("Exit program", "Okay to exit application?"):
self.master.destroy()
os._exit(0)
#os._exit is important to free up the memory that your program was using and return it to the user's system for their own use
#================================================================
def create_db(self):
conn = sqlite3.connect('phonebook.db')
with conn:
cur = conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS tbl_phonebook( \
ID INTEGER PRIMARY KEY AUTOINCREMENT, \
col_fname TEXT, \
col_lname TEXT, \
col_fullname TEXT, \
col_phone TEXT, \
col_email TEXT \
)")
conn.commit()
conn.close()
first_run(self)
#this function puts example data in the database
def first_run(self):
data = ('John', 'Doe', 'John Doe', '111-111-1111', 'jdoe@email.com') #this is a tuple
conn = sqlite3.connect('phonebook.db')
with conn:
cur = conn.cursor()
cur,count = count_records(cur) #see the next function
if count < 1: #if it's less than 1, it's empty, so it only does this the first time the function is run
cur.execute("""INSERT INTO tbl_phonebook (col_fname, col_lname, col_fullname, col_phone, col_email) VALUES (?,?,?,?,?)""", (data))
conn.commit()
conn.close()
#this function counts how many records are in the database
def count_records(cur):
count = ""
cur.execute("""SELECT COUNT(*) FROM tbl_phonebook""")
count = cur.fetchone()[0] #this actually extracts the data and stores it into 'count'
return cur,count
#the functiion selects an item in the ListBox
def onSelect(self,event):
#calling the event is the self.lstList1 widget from LRCphonebook_gui
varList = event.widget #store the widget in 'varList'
select = varList.curselection()[0] #select index 0 from the list and store it in 'select'
value = varList.get(select) #get the information from 'select' and store it in 'value'
conn = sqlite3.connect('phonebook.db')
with conn:
cursor = conn.cursor()
# this selects all the info for the selected fullname of a person from the list
cursor.execute("""SELECT col_fname, col_lname, col_phone, col_email FROM tbl_phonebook WHERE col_fullname = (?)""", [value])
varBody = cursor.fetchall()
#the above returns a tuple and below we can slice it int o 4 parts using data[] during the iteration
for data in varBody:
self.txt_fname.delete(0,END) #clear the textbox
self.txt_fname.insert(0,data[0]) #put new info in it
self.txt_lname.delete(0,END)
self.txt_lname.insert(0,data[1])
self.txt_phone.delete(0,END)
self.txt_phone.insert(0,data[2])
self.txt_email.delete(0,END)
self.txt_email.insert(0,data[3])
def addToList(self):
var_fname = self.txt_fname.get()
var_lname = self.txt_lname.get()
#normalize the data to keep it consistent with the database
var_fname = var_fname.strip() #This will remove any blank spaces before and after the entry
var_lname = var_lname.strip()
var_fname = var_fname.title() #This will ensure that the first character of each word is capitalized
var_lname = var_lname.title()
var_fullname = ("{} {}".format(var_fname, var_lname)) #combine the normalized names into fullname
print("var_fullname: {}".format(var_fullname)) #the user does not see this
var_phone = self.txt_phone.get().strip()
var_email = self.txt_email.get().strip()
if not "@" or not "." in var_email:
print("incorrect email format!!!")
if (len(var_fname) > 0) and (len(var_lname) > 0) and (len(var_phone) > 0) and (len(var_email) > 0): #makes all fields required
conn = sqlite3.connect('phonebook.db')
with conn:
cursor = conn.cursor()
# Check the database for existance of the fullname, if so we will alert the user and disregard request)
cursor.execute("""SELECT COUNT (col_fullname) FROM tbl_phonebook WHERE col_fullname = '()'""".format(var_fullname))
count = cursor.fetchone()[0]
chkName = count
if chkName == 0: # if this is 0 then there is no existance of the fullname and we can add new data
print("chkName: {}".format(chkName))
cursor.execute("""INSERT INTO tbl_phonebook (col_fname, col_lname, col_fullname, col_phone, col_email) VALUES (?,?,?,?,?)""",(var_fname,var_lname,var_fullname,var_phone,var_email))
self.lstList1.insert(END, var_fullname) #also update the listbox
onClear(self) #call the function to clear all of the textboxes
else:
messagebox.showerror("Name Error",'"{}" already sxists in the database! Please choose a different name'.format(var_fullname))
onClear(self) #call the function to clear all of the textboxes
conn.commit()
conn.close()
else:
messagebox.showerror("Missing Text Error","Please ensure that there is data in all four fields.")
# this function deletes something from the database
def onDelete(self):
var_select = self.lstList1.get(self.lstList1.curselection()) #Listbox's selected value
conn = sqlite3.connect('phonebook.db')
with conn:
cur = conn.cursor()
#check count to ensure that this is not the last record in the database...
# cannot delete last record or we will get an error
cur.execute("""SELECT COUNT(*) FROM tbl_phonebook""")
count = cur.fetchone()[0]
if count > 1:
confirm = messagebox.askokcancel("Delete Confirmation", "All information associated with, ({}) \n will be permanently deleted from the database. \n\nProceed with the deletion request?".format(var_select))
if confirm:
conn = sqlite3.connect('phonebook.db')
with conn:
cursor = conn.cursor()
cursor.execute("""DELETE FROM tbl_phonebook WHERE col_fullname = '{}'""".format(var_select))
#call the function to clear all of the textboxes and the seelected index of listbox onRefresh(self)
#update the listbox of the changes
onDeleted(self)
conn.commit()
else:
confirm = messagebox.showerror("Last Record Error", "({}) is the last record in the database and cannot be deleted at this time. \n\n Please add another record first before you can delete ({}).".format(var_select,var_select))
conn.close()
# this function clears text from the textboxes when something is deleted
def onDeleted(self):
# clear the text in these textboxes
self.txt_fname.delete(0,END)
self.txt_lname.delete(0,END)
self.txt_phone.delete(0,END)
self.txt_email.delete(0,END)
# onRefresh(self) #update the listbox of changes
try:
index = self.lstList1.curselection()[0]
self.lstList1.delete(index)
except IndexError:
pass
def onClear(self):
# clear the text in these textboxes
self.txt_fname.delete(0,END)
self.txt_lname.delete(0,END)
self.txt_phone.delete(0,END)
self.txt_email.delete(0,END)
def onRefresh(self):
#populate the listbox, coinciding with the database
self.lstList1.delete(0,END) #deletes the whole listbox
conn = sqlite3.connect('phonebook.db')
with conn:
cursor = conn.cursor()
cursor.execute("""SELECT COUNT(*) FROM tbl_phonebook""")
count = cursor.fetchone()[0]
i = 0
while i < count:
cursor.execute("""SELECT col_fullname FROM tbl_phonebook""")
varList = cursor.fetchall()[i] #fetch an item with the cursor
for item in varList: #with that item, do the next instruction
self.lstList1.insert(0,str(item)) #this repopulates the listbox
i = i + 1
conn.close()
def onUpdate(self):
try:
var_select = self.lstList1.curselection()[0] #index of the list selection
var_value = self.lstList1.get(var_select) # list selection's text value
except:
messagebox.showinfo("Missing selection","No name was selected from the list box. \nCancelling the Update request.")
return
#The user will only be allowed to update changes for phone and email
#for name changes, the user will need to delete the entire record and start over.
var_phone = self.txt_phone.get().strip() #normalize the data to maintain database integrity
var_email = self.txt_email.get().strip()
if (len(var_phone) > 0) and (len(var_email) > 0): #check to make sure the values are not empty
conn = sqlite3.connect('phonebook.db')
with conn:
cur = conn.cursor()
#count records to see if the user's changes are already in the database
# (i.e. there are no changes to update)
cur.execute("""SELECT COUNT(col_phone) FROM tbl_phonebook WHERE col_phone = '{}'""".format(var_phone))
count = cur.fetchone()[0]
print(count) #prints the phone selection
cur.execute("""SELECT COUNT(col_email) FROM tbl_phonebook WHERE col_email = '{}'""".format(var_email))
count2 = cur.fetchone()[0]
print(count2) #prints the email selection
if count == 0 or count2 == 0: #if the proposed changes are not already in the database, then proceed
response = messagebox.askokcancel("Update Request", "The following changes ({}) and ({}) will be implemented for ({}). \n\nProceed with the update request?".format(var_phone,var_email,var_value))
print(response)
if response:
with conn:
cursor = conn.cursor()
cursor.execute("""UPDATE tbl_phonebook SET col_phone = '{0}',col_email = '{1}' WHERE col_fullname = '{2}'""".format(var_phone,var_email,var_value))
onClear(self)
conn.commit()
else:
messagebox.showinfo("Cancel Request","No changes have been made to ({}).".format(var_value))
else:
messagebox.showinfo("No changes detected","Both ({}) and ({}) \nalready exist in the database for this name. \n\nYour update request has been cancelled.".format(var_phone, var_email))
onClear(self)
conn.close()
else:
messagebox.showerror("Missing information","Please select a name from the list. \nThen edit the phone or email information.")
onClear(self)
if __name__ == "__main__":
pass
| [
"64614859+lenniecottrell@users.noreply.github.com"
] | 64614859+lenniecottrell@users.noreply.github.com |
a0fe504dc7c1f9b916085038b62f0c6730b2f12e | 4f1144a15ba86cc183c5c69e578db8019133e660 | /src/official/transformer/v2/optgen_v13.py | 4d5704a53d28e02a0633f5fa29f6e7462359e4ca | [
"MIT"
] | permissive | AspirinCode/cmg | 244b718c64659cde505cedc449a5b65e5ede7c6d | fe5b4d8778df7bd85f78ec463d85185415a1c591 | refs/heads/main | 2023-01-03T09:42:47.433393 | 2020-10-26T21:39:29 | 2020-10-26T21:39:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,494 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines the Transformer model in TF 2.0.
Model paper: https://arxiv.org/pdf/1706.03762.pdf
Transformer model code source: https://github.com/tensorflow/tensor2tensor
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from src.official.transformer.model import model_utils
from src.official.transformer.v2 import attention_layer
from src.official.transformer.v2 import beam_search
from src.official.transformer.v2 import embedding_layer
from src.official.transformer.v2 import ffn_layer
from src.official.transformer.v2 import metrics
def get_valnet(logits, embedding_softmax_layer, lstm_layer, output_layer):
ids = tf.keras.backend.argmax(logits, axis=-1)
embedded_inputs = embedding_softmax_layer(ids)
z = lstm_layer(embedded_inputs)
outputs = output_layer(z)
return outputs
def get_propnet(logits, embedding_softmax_layer, lstm_layer, idense_layer, output_layer):
ids = tf.keras.backend.argmax(logits, axis=-1)
embedded_inputs = embedding_softmax_layer(ids)
z = lstm_layer(embedded_inputs)
z = idense_layer(z)
outputs = output_layer(z)
return outputs
def get_simnet(logit1, inputs, embedding_softmax_layer, lstm_layer, idense_layer, output_layer):
ids1 = tf.keras.backend.argmax(logit1, axis=-1)
ids2 = inputs
embedded_input1 = embedding_softmax_layer(ids1)
embedded_input2 = embedding_softmax_layer(ids2)
seq1 = lstm_layer(embedded_input1) # [?, 64]
seq2 = lstm_layer(embedded_input2) # [?, 64]
z = tf.concat([seq1, seq2], axis=1) # [?, 128]
z = idense_layer(z)
outputs = output_layer(z)
return outputs
def create_model(params, is_train):
"""Creates transformer model."""
with tf.name_scope("model"):
if is_train:
inputs = tf.keras.layers.Input((None,), dtype="int64", name="inputs")
targets = tf.keras.layers.Input((None,), dtype="int64", name="targets")
px = tf.keras.layers.Input((3,), dtype="float32", name="px")
py = tf.keras.layers.Input((3,), dtype="float32", name="py")
# sim = tf.keras.layers.Input((1,), dtype="float32", name="sim")
internal_model = Transformer(params, name="optgen_v13")
logits = internal_model([inputs, px, py, targets], training=is_train)
# logits = internal_model([inputs, px, sim, py, targets], training=is_train)
vocab_size = params["vocab_size"]
label_smoothing = params["label_smoothing"]
if params["enable_metrics_in_training"]:
logits = metrics.MetricLayer(vocab_size)([logits, targets])
logits = tf.keras.layers.Lambda(lambda x: x, name="logits")(logits)
valnet_embedding_softmax_layer = embedding_layer.EmbeddingFreezable(
params["vocab_size"], params["valnet_hidden_size"], trainable=False)
valnet_bi_lstm_layer = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(params["valnet_hidden_size"], name="valnet_lstm", trainable=False))
valnet_output_layer = tf.keras.layers.Dense(1, use_bias=True, activation=tf.nn.sigmoid, name="valnet_output",
trainable=False)
valnet_hat = get_valnet(logits, valnet_embedding_softmax_layer, valnet_bi_lstm_layer, valnet_output_layer)
propnet_embedding_softmax_layer = embedding_layer.EmbeddingFreezable(
params["vocab_size"], params["propnet_hidden_size"], trainable=False)
propnet_bi_lstm_layer = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(params["propnet_hidden_size"], name="prop_lstm", trainable=False))
propnet_idense_layer = tf.keras.layers.Dense(100, use_bias=True, activation=tf.nn.sigmoid,
name="propnet_idense", trainable=False)
propnet_output_layer = tf.keras.layers.Dense(3, use_bias=True, activation=tf.nn.sigmoid,
name="propnet_output", trainable=False)
propnet_hat = get_propnet(logits, propnet_embedding_softmax_layer, propnet_bi_lstm_layer,
propnet_idense_layer, propnet_output_layer)
simnet_embedding_softmax_layer = embedding_layer.EmbeddingFreezable(
params["vocab_size"], params["simnet_hidden_size"], trainable=False)
simnet_bi_lstm_layer = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(params["simnet_hidden_size"], name="simnet_lstm", trainable=False))
simnet_idense_layer = tf.keras.layers.Dense(100, use_bias=True, activation=tf.nn.relu, name="simnet_idense",
trainable=False)
simnet_output_layer = tf.keras.layers.Dense(1, use_bias=True, activation=tf.nn.sigmoid, name="simnet_output",
trainable=False)
simnet_hat = get_simnet(logits, inputs, simnet_embedding_softmax_layer, simnet_bi_lstm_layer,
simnet_idense_layer, simnet_output_layer)
model = tf.keras.Model([inputs, px, py, targets], logits)
# model = tf.keras.Model([inputs, px, sim, py, targets], logits)
loss = metrics.transformer_loss(
logits, targets, label_smoothing, vocab_size)
model.add_loss(loss)
valnet_true = tf.ones_like(valnet_hat)
loss_valnet = tf.keras.losses.binary_crossentropy(
valnet_true,
valnet_hat,
from_logits=False,
label_smoothing=0
)
model.add_loss(tf.reduce_sum(loss_valnet))
propnet_true = tf.keras.layers.Lambda(lambda x: x)(py)
loss_propnet = tf.keras.losses.mse(propnet_true, propnet_hat)
model.add_loss(tf.reduce_sum(loss_propnet))
simnet_true = tf.ones_like(simnet_hat)
loss_simnet = tf.keras.losses.binary_crossentropy(
simnet_true,
simnet_hat,
from_logits=False,
label_smoothing=0
)
model.add_loss(tf.reduce_sum(loss_simnet))
return model
else:
inputs = tf.keras.layers.Input((None,), dtype="int64", name="inputs")
px = tf.keras.layers.Input((3,), dtype="float32", name="px")
py = tf.keras.layers.Input((3,), dtype="float32", name="py")
# sim = tf.keras.layers.Input((1,), dtype="float32", name="sim")
internal_model = Transformer(params, name="optgen_v13")
# ret = internal_model([inputs, px, sim, py], training=is_train)
ret = internal_model([inputs, px, py], training=is_train)
outputs, scores = ret["outputs"], ret["scores"]
return tf.keras.Model([inputs, px, py], [outputs, scores])
# return tf.keras.Model([inputs, px, sim, py], [outputs, scores])
class Transformer(tf.keras.Model):
"""Transformer model with Keras.
Implemented as described in: https://arxiv.org/pdf/1706.03762.pdf
The Transformer model consists of an encoder and decoder. The input is an int
sequence (or a batch of sequences). The encoder produces a continuous
representation, and the decoder uses the encoder output to generate
probabilities for the output sequence.
"""
def __init__(self, params, name=None):
"""Initialize layers to build Transformer model.
Args:
params: hyperparameter object defining layer sizes, dropout values, etc.
name: name of the model.
"""
super(Transformer, self).__init__(name=name)
self.params = params
self.embedding_softmax_layer = embedding_layer.EmbeddingSharedWeights(
params["vocab_size"], params["hidden_size"])
self.encoder_stack = EncoderStack(params)
self.decoder_stack = DecoderStack(params)
self.property_emb_layer = tf.keras.layers.Dense(3, use_bias=True, activation=tf.nn.relu,
name="property_embedding")
def get_config(self):
return {
"params": self.params,
}
def call(self, inputs, training):
"""Calculate target logits or inferred target sequences.
Args:
inputs: input tensor list of size 1 or 2.
First item, inputs: int tensor with shape [batch_size, input_length].
Second item (optional), targets: None or int tensor with shape
[batch_size, target_length].
training: boolean, whether in training mode or not.
Returns:
If targets is defined, then return logits for each word in the target
sequence. float tensor with shape [batch_size, target_length, vocab_size]
If target is none, then generate output sequence one token at a time.
returns a dictionary {
outputs: [batch_size, decoded length]
scores: [batch_size, float]}
Even when float16 is used, the output tensor(s) are always float32.
"""
if len(inputs) == 4:
inputs, px, py, targets = inputs[0], inputs[1], inputs[2], inputs[3]
else: # 3
inputs, px, py, targets = inputs[0], inputs[1], inputs[2], None
# if len(inputs) == 5:
# inputs, px, sim, py, targets = inputs[0], inputs[1], inputs[2], inputs[3], inputs[4]
# else: # 4
# inputs, px, sim, py, targets = inputs[0], inputs[1], inputs[2], inputs[3], None
# Variance scaling is used here because it seems to work in many problems.
# Other reasonable initializers may also work just as well.
with tf.name_scope("Transformer"):
# Calculate attention bias for encoder self-attention and decoder
# multi-headed attention layers.
attention_bias = model_utils.get_padding_bias(inputs)
# Run the inputs through the encoder layer to map the symbol
# representations to continuous representations.
encoder_outputs = self.encode(inputs, attention_bias, training)
# encoder_outputs = self.concat_property(encoder_outputs, px, sim, py)
encoder_outputs = self.concat_property(encoder_outputs, px, py)
# Generate output sequence if targets is None, or return logits if target
# sequence is known.
if targets is None:
return self.predict(encoder_outputs, attention_bias, training)
else:
logits = self.decode(targets, encoder_outputs, attention_bias, training)
return logits
def concat_property(self, encoder_outputs, px, py):
# def concat_property(sefl, encoder_outputs, px, sim, py):
"""Generate logits for each value in the target sequence.
Args:
encoder_outputs: continuous representation of input sequence. float tensor
with shape [batch_size, input_length, hidden_size]
px: float tensor with property of x [batch_size, 3]
py: float tensor with property of y [batch_size, 3]
Returns:
float32 tensor with shape [batch_size, input_length, hidden_size+6]
"""
input_length = tf.shape(encoder_outputs)[1]
px = self.property_emb_layer(px)
py = self.property_emb_layer(py)
px = tf.tile(tf.expand_dims(px, axis=1), multiples=[1, input_length, 1])
# sim = tf.tile(tf.expand_dims(sim, axis=1), multiples=[1, input_length, 1])
py = tf.tile(tf.expand_dims(py, axis=1), multiples=[1, input_length, 1])
result = tf.concat([encoder_outputs, px, py], axis=-1)
# result = tf.concat([encoder_outputs, px, sim, py], axis=-1)
return result
def encode(self, inputs, attention_bias, training):
"""Generate continuous representation for inputs.
Args:
inputs: int tensor with shape [batch_size, input_length].
attention_bias: float tensor with shape [batch_size, 1, 1, input_length].
training: boolean, whether in training mode or not.
Returns:
float tensor with shape [batch_size, input_length, hidden_size]
"""
with tf.name_scope("encode"):
# Prepare inputs to the layer stack by adding positional encodings and
# applying dropout.
embedded_inputs = self.embedding_softmax_layer(inputs)
embedded_inputs = tf.cast(embedded_inputs, self.params["dtype"])
inputs_padding = model_utils.get_padding(inputs)
attention_bias = tf.cast(attention_bias, self.params["dtype"])
with tf.name_scope("add_pos_encoding"):
length = tf.shape(embedded_inputs)[1]
pos_encoding = model_utils.get_position_encoding(
length, self.params["hidden_size"])
pos_encoding = tf.cast(pos_encoding, self.params["dtype"])
encoder_inputs = embedded_inputs + pos_encoding
if training:
encoder_inputs = tf.nn.dropout(
encoder_inputs, rate=self.params["layer_postprocess_dropout"])
return self.encoder_stack(
encoder_inputs, attention_bias, inputs_padding, training=training)
def decode(self, targets, encoder_outputs, attention_bias, training):
"""Generate logits for each value in the target sequence.
Args:
targets: target values for the output sequence. int tensor with shape
[batch_size, target_length]
encoder_outputs: continuous representation of input sequence. float tensor
with shape [batch_size, input_length, hidden_size]
attention_bias: float tensor with shape [batch_size, 1, 1, input_length]
training: boolean, whether in training mode or not.
Returns:
float32 tensor with shape [batch_size, target_length, vocab_size]
"""
with tf.name_scope("decode"):
# Prepare inputs to decoder layers by shifting targets, adding positional
# encoding and applying dropout.
decoder_inputs = self.embedding_softmax_layer(targets)
decoder_inputs = tf.cast(decoder_inputs, self.params['dtype'])
attention_bias = tf.cast(attention_bias, self.params["dtype"])
with tf.name_scope("shift_targets"):
# Shift targets to the right, and remove the last element
decoder_inputs = tf.pad(decoder_inputs,
[[0, 0], [1, 0], [0, 0]])[:, :-1, :]
with tf.name_scope("add_pos_encoding"):
length = tf.shape(decoder_inputs)[1]
pos_encoding = model_utils.get_position_encoding(
length, self.params["hidden_size"])
pos_encoding = tf.cast(pos_encoding, self.params["dtype"])
decoder_inputs += pos_encoding
if training:
decoder_inputs = tf.nn.dropout(
decoder_inputs, rate=self.params["layer_postprocess_dropout"])
# Run values
decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias(
length, dtype=self.params['dtype'])
outputs = self.decoder_stack(
decoder_inputs,
encoder_outputs,
decoder_self_attention_bias,
attention_bias,
training=training)
logits = self.embedding_softmax_layer(outputs, mode="linear")
logits = tf.cast(logits, tf.float32)
return logits
def _get_symbols_to_logits_fn(self, max_decode_length, training):
"""Returns a decoding function that calculates logits of the next tokens."""
timing_signal = model_utils.get_position_encoding(
max_decode_length + 1, self.params["hidden_size"])
decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias(
max_decode_length)
def symbols_to_logits_fn(ids, i, cache):
"""Generate logits for next potential IDs.
Args:
ids: Current decoded sequences. int tensor with shape [batch_size *
beam_size, i + 1]
i: Loop index
cache: dictionary of values storing the encoder output, encoder-decoder
attention bias, and previous decoder attention values.
Returns:
Tuple of
(logits with shape [batch_size * beam_size, vocab_size],
updated cache values)
"""
# Set decoder input to the last generated IDs
decoder_input = ids[:, -1:]
# Preprocess decoder input by getting embeddings and adding timing signal.
decoder_input = self.embedding_softmax_layer(decoder_input)
decoder_input += timing_signal[i:i + 1]
self_attention_bias = decoder_self_attention_bias[:, :, i:i + 1, :i + 1]
decoder_outputs = self.decoder_stack(
decoder_input,
cache.get("encoder_outputs"),
self_attention_bias,
cache.get("encoder_decoder_attention_bias"),
training=training,
cache=cache)
logits = self.embedding_softmax_layer(decoder_outputs, mode="linear")
logits = tf.squeeze(logits, axis=[1])
return logits, cache
return symbols_to_logits_fn
def predict(self, encoder_outputs, encoder_decoder_attention_bias, training):
"""Return predicted sequence."""
# Currently, we always do prediction in float32.
# TODO(reedwm): Add float16 support.
encoder_outputs = tf.cast(encoder_outputs, tf.float32)
batch_size = tf.shape(encoder_outputs)[0]
input_length = tf.shape(encoder_outputs)[1]
max_decode_length = input_length + self.params["extra_decode_length"]
symbols_to_logits_fn = self._get_symbols_to_logits_fn(
max_decode_length, training)
# Create initial set of IDs that will be passed into symbols_to_logits_fn.
initial_ids = tf.ones([batch_size], dtype=tf.int32) # 1: [BEGIN]
# Create cache storing decoder attention values for each layer.
# pylint: disable=g-complex-comprehension
cache = {
"layer_%d" % layer: {
"k": tf.zeros([batch_size, 0, self.params["hidden_size"]]),
"v": tf.zeros([batch_size, 0, self.params["hidden_size"]])
} for layer in range(self.params["num_hidden_layers"])
}
# pylint: enable=g-complex-comprehension
# Add encoder output and attention bias to the cache.
cache["encoder_outputs"] = encoder_outputs
cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias
# Use beam search to find the top beam_size sequences and scores.
decoded_ids, scores = beam_search.sequence_beam_search(
symbols_to_logits_fn=symbols_to_logits_fn,
initial_ids=initial_ids,
initial_cache=cache,
vocab_size=self.params["vocab_size"],
beam_size=self.params["beam_size"],
alpha=self.params["alpha"],
max_decode_length=max_decode_length,
eos_id=2) # 2: [END]
import sys
# tf.print(decoded_ids.shape, output_stream=sys.stderr)
# Get the top sequence for each batch element
# top_decoded_ids = decoded_ids[:, 0, 1:]
# for i in range(self.params["beam_size"]):
# candidate_ids = decoded_ids[:, i, 0:] #should include [begin], [batch, beam_size, length]
#
# get_propnet(params, candidate_ids)
top_decoded_ids = decoded_ids[:, 0, 0:] #should include [begin], [batch, beam_size, length]
top_scores = scores[:, 0]
return {"outputs": top_decoded_ids, "scores": top_scores}
class LayerNormalization(tf.keras.layers.Layer):
"""Applies layer normalization."""
def __init__(self, hidden_size):
super(LayerNormalization, self).__init__()
self.hidden_size = hidden_size
def build(self, input_shape):
"""Builds the layer."""
# Passing experimental_autocast=False causes these variables to not be
# automatically casted to fp16 when mixed precision is used. Since we use
# float32 in call() for numeric stability, we do not want variables to be
# casted to fp16.
self.scale = self.add_weight(
"layer_norm_scale",
shape=[self.hidden_size],
dtype="float32",
initializer=tf.ones_initializer(),
experimental_autocast=False)
self.bias = self.add_weight(
"layer_norm_bias",
shape=[self.hidden_size],
dtype="float32",
initializer=tf.zeros_initializer(),
experimental_autocast=False)
super(LayerNormalization, self).build(input_shape)
def get_config(self):
return {
"hidden_size": self.hidden_size,
}
def call(self, x, epsilon=1e-6):
input_dtype = x.dtype
if input_dtype == tf.float16:
x = tf.cast(x, tf.float32)
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.math.rsqrt(variance + epsilon)
return tf.cast(norm_x * self.scale + self.bias, input_dtype)
class PrePostProcessingWrapper(tf.keras.layers.Layer):
"""Wrapper class that applies layer pre-processing and post-processing."""
def __init__(self, layer, params):
super(PrePostProcessingWrapper, self).__init__()
self.layer = layer
self.params = params
self.postprocess_dropout = params["layer_postprocess_dropout"]
def build(self, input_shape):
# Create normalization layer
self.layer_norm = LayerNormalization(self.params["hidden_size"])
super(PrePostProcessingWrapper, self).build(input_shape)
def get_config(self):
return {
"params": self.params,
}
def call(self, x, *args, **kwargs):
"""Calls wrapped layer with same parameters."""
# Preprocessing: apply layer normalization
training = kwargs["training"]
y = self.layer_norm(x)
# Get layer output
y = self.layer(y, *args, **kwargs)
# Postprocessing: apply dropout and residual connection
if training:
y = tf.nn.dropout(y, rate=self.postprocess_dropout)
return x + y
class EncoderStack(tf.keras.layers.Layer):
"""Transformer encoder stack.
The encoder stack is made up of N identical layers. Each layer is composed
of the sublayers:
1. Self-attention layer
2. Feedforward network (which is 2 fully-connected layers)
"""
def __init__(self, params):
super(EncoderStack, self).__init__()
self.params = params
self.layers = []
def build(self, input_shape):
"""Builds the encoder stack."""
params = self.params
for _ in range(params["num_hidden_layers"]):
# Create sublayers for each layer.
self_attention_layer = attention_layer.SelfAttention(
params["hidden_size"], params["num_heads"],
params["attention_dropout"])
feed_forward_network = ffn_layer.FeedForwardNetwork(
params["hidden_size"], params["filter_size"], params["relu_dropout"])
self.layers.append([
PrePostProcessingWrapper(self_attention_layer, params),
PrePostProcessingWrapper(feed_forward_network, params)
])
# Create final layer normalization layer.
self.output_normalization = LayerNormalization(params["hidden_size"])
super(EncoderStack, self).build(input_shape)
def get_config(self):
return {
"params": self.params,
}
def call(self, encoder_inputs, attention_bias, inputs_padding, training):
"""Return the output of the encoder layer stacks.
Args:
encoder_inputs: tensor with shape [batch_size, input_length, hidden_size]
attention_bias: bias for the encoder self-attention layer. [batch_size, 1,
1, input_length]
inputs_padding: tensor with shape [batch_size, input_length], inputs with
zero paddings.
training: boolean, whether in training mode or not.
Returns:
Output of encoder layer stack.
float32 tensor with shape [batch_size, input_length, hidden_size]
"""
for n, layer in enumerate(self.layers):
# Run inputs through the sublayers.
self_attention_layer = layer[0]
feed_forward_network = layer[1]
with tf.name_scope("layer_%d" % n):
with tf.name_scope("org_encoder_self_attention"):
encoder_inputs = self_attention_layer(
encoder_inputs, attention_bias, training=training)
with tf.name_scope("org_encoder_ffn_org"):
encoder_inputs = feed_forward_network(
encoder_inputs, training=training)
return self.output_normalization(encoder_inputs)
class DecoderStack(tf.keras.layers.Layer):
"""Transformer decoder stack.
Like the encoder stack, the decoder stack is made up of N identical layers.
Each layer is composed of the sublayers:
1. Self-attention layer
2. Multi-headed attention layer combining encoder outputs with results from
the previous self-attention layer.
3. Feedforward network (2 fully-connected layers)
"""
def __init__(self, params):
super(DecoderStack, self).__init__()
self.params = params
self.layers = []
def build(self, input_shape):
"""Builds the decoder stack."""
params = self.params
for _ in range(params["num_hidden_layers"]):
self_attention_layer = attention_layer.SelfAttention(
params["hidden_size"], params["num_heads"],
params["attention_dropout"])
enc_dec_attention_layer = attention_layer.Attention(
params["hidden_size"], params["num_heads"],
params["attention_dropout"])
feed_forward_network = ffn_layer.FeedForwardNetwork(
params["hidden_size"], params["filter_size"], params["relu_dropout"])
self.layers.append([
PrePostProcessingWrapper(self_attention_layer, params),
PrePostProcessingWrapper(enc_dec_attention_layer, params),
PrePostProcessingWrapper(feed_forward_network, params)
])
self.output_normalization = LayerNormalization(params["hidden_size"])
super(DecoderStack, self).build(input_shape)
def get_config(self):
return {
"params": self.params,
}
def call(self,
decoder_inputs,
encoder_outputs,
decoder_self_attention_bias,
attention_bias,
training,
cache=None):
"""Return the output of the decoder layer stacks.
Args:
decoder_inputs: tensor with shape [batch_size, target_length, hidden_size]
encoder_outputs: tensor with shape [batch_size, input_length, hidden_size]
decoder_self_attention_bias: bias for decoder self-attention layer. [1, 1,
target_len, target_length]
attention_bias: bias for encoder-decoder attention layer. [batch_size, 1,
1, input_length]
training: boolean, whether in training mode or not.
cache: (Used for fast decoding) A nested dictionary storing previous
decoder self-attention values. The items are:
{layer_n: {"k": tensor with shape [batch_size, i, key_channels],
"v": tensor with shape [batch_size, i, value_channels]},
...}
Returns:
Output of decoder layer stack.
float32 tensor with shape [batch_size, target_length, hidden_size]
"""
for n, layer in enumerate(self.layers):
self_attention_layer = layer[0]
enc_dec_attention_layer = layer[1]
feed_forward_network = layer[2]
# Run inputs through the sublayers.
layer_name = "layer_%d" % n
layer_cache = cache[layer_name] if cache is not None else None
with tf.name_scope(layer_name):
with tf.name_scope("org_decoder_self_attention"):
decoder_inputs = self_attention_layer(
decoder_inputs,
decoder_self_attention_bias,
training=training,
cache=layer_cache)
with tf.name_scope("org_decoder_encdec_attention"):
decoder_inputs = enc_dec_attention_layer(
decoder_inputs,
encoder_outputs,
attention_bias,
training=training)
with tf.name_scope("org_decoder_ffn"):
decoder_inputs = feed_forward_network(
decoder_inputs, training=training)
return self.output_normalization(decoder_inputs)
| [
"nomolos79@gmail.com"
] | nomolos79@gmail.com |
677f1c9ab1b95908a04a6846f55392ab078d38ed | 7e2ae02c7d7a00479c79c57df3b4b5a67680b9f8 | /CrazyPython/05/transfer_test.py | 5ec3629437f39271bc014bac62e2e4008d72f92a | [] | no_license | ahao214/HundredPY | 32f8e149475e0d07a910ae82a1725de10eeaf5eb | 67a941b15b51dfbf933c61a35efa85ed84d441b8 | refs/heads/master | 2021-09-30T02:45:50.583381 | 2021-09-18T09:33:36 | 2021-09-18T09:33:36 | 196,845,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | # 函数的参数传递机制
def swap(a, b):
# 下面代码实现a、b变量的值交换
a, b = b, a
print("在swap函数里,a的值是:", a, ";b的值是,", b)
a = 6
b = 9
swap(a, b)
print("交换结束后,变量a的值是:", a, ";变量b的值是:", b)
def swapdict(dw):
# 下面代码实现a、b变量的值交换
dw['a'], dw['b'] = dw['b'], dw['a']
print("在swap函数里,dw['a']的值是:", dw['a'], ";dw['b']的值是,", dw['b'])
dw = {'a': 6, 'b': 9}
swapdict(dw)
print("交换结束后,dw['a']的值是:", dw['a'], ";dw['b']的值是,", dw['b'])
| [
"haochen214@163.com"
] | haochen214@163.com |
4f2a818799590c4fbc98ecdb6e382a0efee81dbc | 006141b07c6bcd0a693a13f51cbb6d7c6009ccdd | /cgi-bin/update.py | 41cdc322b6a55497db29af8efb737ed8d42de66c | [] | no_license | exilaus/3dhome | 7e54181f84cf9cca1c4422217479e7869485eed4 | e07f168da539e2edb3ddf047215825ce8c6a9010 | refs/heads/master | 2021-01-18T20:21:40.674334 | 2013-07-24T17:05:32 | 2013-07-24T17:05:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | #!/usr/bin/pythonRoot
import cgi, os,sys
import subprocess
import urllib2
print """Content-Type: text/html;charset=utf-8\r\n\r\n\r\n\r\n\r\n
<html>
<head>
<title>Update</title><link rel="stylesheet" type="text/css" href="/WEB/style.css">
<script type="text/javascript">
function goBack() {
javascript: history.go(-2);
}
function timer() {
setTimeout("goBack()", 35000);
}
window.onload=timer;
</script>
</head><body>
<h2>Update</h2><center><table><tr><td>"""
url = 'http://exilaus.byethost15.com/3dhome/update.txt' # write the url here
response = urllib2.urlopen(url)
for line in response:
print line.rstrip()
print "<br>"
c = subprocess.Popen(line.rstrip(), shell=True)
print "</td></tr></table></center><br><br><br><br><br><br><center><h1>Update complete. Please wait...</h1></center>"
print "</body>" | [
"exilaus@hotmail.com"
] | exilaus@hotmail.com |
ee57158af40112b19388d679d38127b30806d32a | c9ad6ad969de505b3c8471c6f46dfd782a0fb498 | /0x05-python-exceptions/0-safe_print_list.py | 16119623870bda212d1982f12fcd78d50aa22dde | [] | no_license | enterpreneur369/holbertonschool-higher_level_programming | 002fd5a19b40c8b1db06b34c4344e307f24c17ac | dd7d3f14bf3bacb41e2116d732ced78998a4afcc | refs/heads/master | 2022-06-20T00:57:27.736122 | 2020-05-06T14:26:10 | 2020-05-06T14:26:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | #!/usr/bin/python3
""" safe_print_list
Python function to print the elements of a list
"""
def safe_print_list(my_list=[], x=0):
i, p = 0, 0
try:
for i in range(x):
print("{}".format(my_list[i]), end="")
p = p + 1
except IndexError as err:
pass
finally:
print()
return (p)
| [
"jose.calderon@holbertonschool.com"
] | jose.calderon@holbertonschool.com |
5d9c86ea45fce01a9865a3fa70adb03dbe1e9476 | 0a2a795c01d83f18d43d90da404231e4f9f921e1 | /src/models/seeds_only.py | a1f0d6b6c2bb620ab74badcab09a8341040a6294 | [] | no_license | aritche/PointCloudExperiments | 609d738800d03adb09a28d855395364a59eb97d0 | ec6905a3dc55a239b0943b9e754b27cdec18deb0 | refs/heads/master | 2022-12-27T01:48:07.315674 | 2020-10-06T15:06:24 | 2020-10-06T15:06:24 | 293,429,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,855 | py | """
A model for generating streamlines for a single tract
Uses 3D TOM input
This is an adaptation of HairNet (Zhou et al. 2018) https://doi.org/10.1007/978-3-030-01252-6_15
"""
import os
import numpy as np
import random
import cv2
import nibabel as nib
from glob import glob
import torch
import torch.nn as nn
from chamfer_python import distChamfer as chamferDistance
from dipy.io.streamline import load_trk
from dipy.tracking.streamline import set_number_of_points, select_random_set_of_streamlines
from nibabel import trackvis
from dipy.tracking import utils
from models.augment import *
num_points = 10
num_streamlines = 128
# Model adapted from https://towardsdatascience.com/deep-learning-on-point-clouds-implementing-pointnet-in-google-colab-1fd65cd3a263
class CustomModel(nn.Module):
def __init__(self):
super(CustomModel, self).__init__()
self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU()
self.dropout = torch.nn.Dropout(p=0)
# VOLUME SIZE # PARAMETERS
# Encoding (input -> 512 vector) # 3 x 144 x 144 x 144 -> 8.9M (IN * F^3 + 1)*OUT
self.seed_mlp_1 = nn.Conv1d(in_channels=3, out_channels=32, kernel_size=1)
self.seed_mlp_2 = nn.Conv1d(in_channels=32, out_channels=64, kernel_size=1)
self.linear_1 = nn.Linear(in_features=64, out_features=512)
self.linear_2 = nn.Linear(in_features=512, out_features=1024)
self.linear_3 = nn.Linear(in_features=1024, out_features=3840)
def forward(self, tom_cloud, seeds_cloud):
# Encode seeds
s = self.dropout(self.relu(self.seed_mlp_1(seeds_cloud)))
s = self.dropout(self.relu(self.seed_mlp_2(s)))
s = nn.MaxPool1d(s.size(-1))(s)
x = s.view(-1, 64)
cv2.namedWindow('encoding', cv2.WINDOW_NORMAL)
encoding = np.reshape(x.cpu().detach().numpy()[0], (8,8))
#cv2.namedWindow('linear1', cv2.WINDOW_NORMAL)
#cv2.namedWindow('linear2', cv2.WINDOW_NORMAL)
#cv2.namedWindow('linear3', cv2.WINDOW_NORMAL)
#encoding = np.reshape(x.cpu().detach().numpy()[0], (16,12))
x = self.dropout(self.relu(self.linear_1(x)))
linear1 = np.reshape(x.cpu().detach().numpy()[0], (32,16))
x = self.dropout(self.relu(self.linear_2(x)))
linear2 = np.reshape(x.cpu().detach().numpy()[0], (32,32))
#encoding = (encoding - np.min(encoding))/(np.max(encoding) - np.min(encoding))
#encoding = (encoding - -1)/(1 - -1)
#encoding = np.tanh(encoding)
#cv2.imshow('encoding', np.uint8(encoding*255))
#t_encoding = (t_encoding - np.min(t_encoding))/(np.max(t_encoding) - np.min(t_encoding))
encoding = np.tanh(encoding)
cv2.imshow('encoding', np.uint8(encoding*255))
#linear1 = (linear1 - np.min(linear1))/(np.max(linear1) - np.min(linear1))
#linear1 = (linear1 - -1)/(1 - -1)
#cv2.imshow('linear1', np.uint8(linear1*255))
#linear2 = (linear2 - np.min(linear2))/(np.max(linear2) - np.min(linear2))
#linear2 = (linear2 - -1)/(1 - -1)
#cv2.imshow('linear2', np.uint8(linear2*255))
#linear3 = (linear3 - np.min(linear3))/(np.max(linear3) - np.min(linear3))
#linear3 = (linear3 - -1)/(1 - -1)
#cv2.imshow('linear3', np.uint8(linear3*255))
cv2.waitKey(1)
x = self.linear_3(x)
result = x.view(-1, 3, num_streamlines*num_points)
return result
# Custom loss function
def CustomLoss(output, target):
output = output.permute(0,2,1)
target = target.permute(0,2,1)
output = output.reshape(-1, num_streamlines, num_points*3)
target = target.reshape(-1, num_streamlines, num_points*3)
distA, distB, _, _ = chamferDistance(output, target)
return (distA + distB).mean()
def get_data(tom_fn, tractogram_fn, is_test):
# Load data
tom_cloud = np.load(tom_fn)
trk_cloud = np.float32(np.load(tractogram_fn))
# Sample streamlines from tractogram
trk_cloud = np.reshape(trk_cloud, (-1, num_points*3))
np.random.shuffle(trk_cloud)
trk_cloud = trk_cloud[:num_streamlines,:]
if len(trk_cloud) < num_streamlines: # pad with zeros if not enough streamlines
padding_cloud = np.zeros((num_streamlines,3*num_points))
padding_cloud[:trk_cloud.shape[0],:trk_cloud.shape[1]] = trk_cloud
trk_cloud = padding_cloud
trk_cloud = np.reshape(trk_cloud, (num_streamlines*num_points, 3))
#####################
# Data augmentation #
#####################
if is_test == False:
# Rotation factors
x_angle = np.random.uniform(-np.pi/4, np.pi/4)
y_angle = np.random.uniform(-np.pi/4, np.pi/4)
z_angle = np.random.uniform(-np.pi/4, np.pi/4)
# Scale factors
x_factor = np.random.uniform(0.9, 1.5)
y_factor = np.random.uniform(0.9, 1.5)
z_factor = np.random.uniform(0.9, 1.5)
# Displacement factors
x_disp = np.random.uniform(-0.1,0.1)
y_disp = np.random.uniform(-0.1,0.1)
z_disp = np.random.uniform(-0.1,0.1)
# Noise stdev factor
noise_stdev = np.random.uniform(0,0.02)
# Get the matrices
rot_matrix = get_rot_matrix(x_angle, y_angle, z_angle)
scale_matrix = get_scale_matrix(x_factor, y_factor, z_factor)
# Augment the TOM cloud
tom_cloud = rotate_tom_cloud(tom_cloud, rot_matrix)
tom_cloud = displace_tom_cloud(tom_cloud, x_disp, y_disp, z_disp)
tom_cloud = scale_tom_cloud(tom_cloud, scale_matrix)
tom_cloud = tom_add_noise(tom_cloud, 0, noise_stdev)
# Augment the TRK cloud
trk_cloud = rotate_trk_cloud(trk_cloud, rot_matrix)
trk_cloud = displace_trk_cloud(trk_cloud, x_disp, y_disp, z_disp)
trk_cloud = scale_trk_cloud(trk_cloud, scale_matrix)
# Extract seeds from resulting tractogram
seeds = np.reshape(trk_cloud, (num_streamlines, num_points, 3))[:,0,:]
# Convert to torch tensors
tom = torch.from_numpy(np.float32(tom_cloud))
tom = tom.permute(1,0) # channels first for pytorch
tractogram = torch.from_numpy(np.float32(trk_cloud))
tractogram = tractogram.permute(1, 0) # channels first for pytorch
seeds = torch.from_numpy(np.float32(seeds))
seeds = seeds.permute(1,0)
return [[tom, seeds], tractogram]
class CustomDataset(torch.utils.data.Dataset):
def __init__(self, toms_dir, tractograms_dir, is_test=False):
# Get lists of files
self.toms_fn = glob(toms_dir + '/*.npy')
self.tractograms_fn = glob(tractograms_dir + '/*.npy')
# Sort for correct matching between the sets of filenames
self.toms_fn.sort()
self.tractograms_fn.sort()
self.is_test = is_test
# Load data into RAM
#self.data = []
#print("Loading dataset into RAM...")
#for i in range(len(self.toms_fn)):
# self.data.append(get_data(self.toms_fn[i], self.tractograms_fn[i]))
# print(i)
# Given an index, return the loaded [data, label]
def __getitem__(self, idx):
return get_data(self.toms_fn[idx], self.tractograms_fn[idx], self.is_test)
def __len__(self):
return len(self.toms_fn)
def OutputToPoints(output):
points = output.permute(1,0)
points = points.cpu().detach().numpy()
return points
def OutputToStreamlines(output):
streamlines = output
streamlines = streamlines.permute(1, 0) # from (3,N) to (N,3)
streamlines = streamlines.cpu().detach().numpy()
streamlines = np.reshape(streamlines, (-1,num_points,3))
return streamlines
| [
"11988281+aritche@users.noreply.github.com"
] | 11988281+aritche@users.noreply.github.com |
7df7be595ce8ffd2733101ec5fb928582a128286 | 85431b353749dd8f6ea308b439b9e5c42b2a7352 | /UnicornLog/users/urls.py | 2de4fefe0f2aed0d126c148fa75e4804122b7173 | [] | no_license | evansimmons/DjangoUnicorn | d018b229f7468e371a939ffb8fbf6a4afc2f1903 | 5e5c5fb6401daeabeb12404ebc390248cb29324d | refs/heads/main | 2023-06-19T19:36:57.036735 | 2021-07-14T19:52:38 | 2021-07-14T19:52:38 | 381,471,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | '''defines url patters for the users app'''
from django.urls import path, include
from . import views
app_name= 'users'
urlpatterns = [
#default auth urls
path('', include('django.contrib.auth.urls')),
#regitration page
path('register/', views.register, name='register'),
]
| [
"etscodelancer@gmail.com"
] | etscodelancer@gmail.com |
e14c3e8ca645ab5648ae1080ad7080f8db18abef | c0377e118c629965a42a05198bb9f2faa789db5f | /ABS/test.py | 81dbaf08cfcd6679e1aef3d0c325f0c6710575a4 | [] | no_license | Craft055/AtCoder | 0e3a078951b1cc3c991d3d18b15b6ddc697c8f1f | 40011b7033717726de593f2781331d4c67b10dd1 | refs/heads/master | 2023-03-01T11:08:29.611377 | 2021-02-07T17:27:35 | 2021-02-07T17:27:35 | 240,670,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import fractions
print(math.cos(10))
print(math.sin(10))
a = 6
b = 4
print(fractions.gcd(a, b))
#print(math.gcd(a, b))
| [
"craftsman.jvyeu@gmail.com"
] | craftsman.jvyeu@gmail.com |
d6a0249802c2b4ec54d3455f60c9a2bcede0ef1d | 5b9f98b59dbade41fc06303623a410c6064d7f1e | /web/admin.py | df2a5fc143fa8646a26272aabf3270570d01778b | [] | no_license | neilus/hello-django | 250c9020fb88ed760dbf0fb0fae3feec7357b046 | 6f768766b10131c33a1c6411240381fd30c86957 | refs/heads/master | 2023-02-17T23:27:39.665641 | 2020-11-20T19:34:40 | 2020-11-20T19:34:40 | 247,371,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Question, Choice
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 0
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['question_text']}),
('Date Infomation', {
'fields': ['pub_date'],
'classes':['collapse']
}
),
]
inlines = [ChoiceInline]
admin.site.register(Question, QuestionAdmin)
admin.site.register(Choice)
| [
"neilus@elte.hu"
] | neilus@elte.hu |
cee3e4fc0e62ec435f4307f8189967c2b0311e8f | 117e2fab53a39e14d4aa1c8c60d146c942118ac6 | /gcase/views/da_report_views.py | 270e5e71d7a1d42275aa0ee489808fd47035b9c3 | [] | no_license | j1210030/case-portal | 6e46ae3adf9c7d0905de27c78a4aba830d8eccb3 | 257ca1bdc7760db60a4c9102e59920574ec3975d | refs/heads/master | 2020-03-27T15:14:25.504719 | 2018-08-30T06:53:10 | 2018-08-30T06:53:10 | 146,705,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,761 | py | # -*- coding: utf-8 -*-
"""
@File: da_report_views.py
"""
from .common_views import *
from gcase.models import BacklogReport, PartnerReport,LanguageReport, Partner,ReviewRequestReport
from gcase.views import BacklogReportView, LanguageReportView
from django.utils.functional import lazy
from django.db import DatabaseError
from itertools import chain
from datetime import timedelta
from django.http import StreamingHttpResponse
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Sum, Avg, Max, Min
from django.db import connection
import csv
from io import TextIOWrapper, StringIO
from io import BytesIO
import sys;
from django.template.defaultfilters import default
from django.db.models.query import prefetch_related_objects
log = logging.getLogger(__name__)
class DaReportView(BacklogReportView,LanguageReportView):
template_name='gcase/report/da_report/index.html'
context_dict = {}
def get(self, request):
self.context_dict = {}
data=request.GET.copy()
self.context_dict['view_name'] = 'da_bi_weekly'
self.context_dict['action'] = 'index'
week_list = []
table_data_list = []
dt = None
sunday = None
if 'from' not in data or data['from'] == '':
sunday = get_sunday2(None, None);
#og.info(' Default Sunday: %s ' % sunday)
dt = datetime.strptime(sunday,'%Y-%m-%d')
from_dt = dt + relativedelta(days=-7)
else:
dt = format_dt2(data['from'],False)
sunday = get_sunday2(dt, None);
from_dt = datetime.strptime(sunday,'%Y-%m-%d')
log.info('From: %s ' % from_dt)
week_list.append(from_dt.date())
dt = from_dt + relativedelta(days=-7)
self.context_dict['period'] = '( %02s/%02s ~ %02s/%02s )' % ( dt.month , (dt + relativedelta(days=-6)).day, from_dt.month, from_dt.day )
log.info( ' period: %s ' % self.context_dict['period'] )
week_list.append(dt.date())
log.info(week_list)
until_dt = from_dt + relativedelta(days=-28)
log.info('Until: %s ' % until_dt)
date_filter = Q(week__lte = from_dt, week__gte = until_dt)
data = self.get_backlog_list(date_filter,'asc')
total_assigned=0
total_needinfo =0
total_blocked =0
total_review_requested =0
total_backlog = 0
total_incoming = 0
total_incoming_partner = 0
total_so = 0
total_so_partner = 0
for week in week_list:
for item in data:
if week == item.week:
total_assigned = total_assigned + item.assigned
total_needinfo = total_needinfo + item.needinfo
total_blocked = total_blocked + item.blocked
total_review_requested = total_review_requested + item.review_requested
total_backlog = total_backlog + item.get_backlog()
total_incoming = total_incoming + item.incoming
total_incoming_partner = total_incoming_partner + item.incoming_partner
total_so = total_so + item.so
total_so_partner = total_so_partner + item.so_partner
table_data_list.append(item)
bi_weekly_total = {}
bi_weekly_total['assigned'] = total_assigned
bi_weekly_total['needinfo'] = total_needinfo
bi_weekly_total['blocked'] = total_blocked
bi_weekly_total['review_requested'] = total_review_requested
bi_weekly_total['total_backlog'] = total_backlog
bi_weekly_total['total_incoming'] = total_incoming
bi_weekly_total['total_incoming_partner'] = total_incoming_partner
bi_weekly_total['total_so'] = total_so
bi_weekly_total['total_so_partner'] = total_so_partner
log.info(len(table_data_list))
self.context_dict['bi_weekly_data_list'] = table_data_list
self.context_dict['graph_list'] = data
self.context_dict['biweekly_total'] = bi_weekly_total
self.context_dict['week_list'] = week_list
self.context_dict['total_list'] = self.get_total_backlog(date_filter, 'asc')
self.context_dict['backlog_list'] = data
log.info(len(self.context_dict['backlog_list']))
self.get_language_report(from_dt, until_dt)
self.context_dict['component_report'] = self.get_firebase_component_report(from_dt, until_dt)
self.context_dict['component_report_partner'] = self.get_firebase_component_partners_report(from_dt, until_dt)
self.context_dict['review_list'] = self.get_review_request_report(from_dt, until_dt)
log.info(self.context_dict['language_report_android'])
return render(request, self.template_name,self.context_dict)
def get_language_report(self, from_dt, until_dt):
date_filter = Q(week__lte = from_dt, week__gte = until_dt)
language_report_android = []
language_report_firebase = []
self.context_dict['language_total_report'] = self.get_language_total(date_filter, 'asc')
data = self.get_language_report_by_product(date_filter, 'asc')
for item in data:
if item.product_id == 1:
language_report_android.append(item)
if item.product_id == 2:
language_report_firebase.append(item)
self.context_dict['language_report_android'] = language_report_android
self.context_dict['language_report_firebase'] = language_report_firebase
log.info(language_report_android )
def get_firebase_component_report(self, from_dt, until_dt):
component_report = []
# Adjust until_dt one month to 2weeks
until_dt = from_dt + relativedelta(days=-7)
try:
sql = ''' SELECT count(*) AS case_count ,components.name FROM cases INNER JOIN components ON cases.component_id = components.id
WHERE week <= %s AND week >= %s AND cases.component_id IS NOT NULL AND cases.product_id = 2 GROUP BY components.id
ORDER BY case_count DESC'''
with connection.cursor() as cursor:
cursor.execute(sql, [from_dt, until_dt] )
component_report = cursor.fetchall()
log.info(component_report)
except Exception, ex:
log.exception("SQL Error Encountered in jd search. " + str(ex))
return component_report
def get_firebase_component_partners_report(self, from_dt, until_dt):
component_partners_report = []
# Adjust until_dt one month to 2weeks
until_dt = from_dt + relativedelta(days=-7)
try:
sql = ''' SELECT count(*) AS case_count ,components.name FROM cases INNER JOIN components ON cases.component_id = components.id
WHERE week <= %s AND week >= %s AND cases.component_id IS NOT NULL AND cases.product_id = 2 AND cases.partner_id IS NOT NULL
GROUP BY components.id
ORDER BY case_count DESC'''
with connection.cursor() as cursor:
cursor.execute(sql, [from_dt, until_dt] )
component_partners_report = cursor.fetchall()
log.info(component_report)
except Exception, ex:
log.exception("SQL Error Encountered in jd search. " + str(ex))
return component_partners_report
def get_review_request_report(self, from_dt, until_dt):
date_filter = Q(week__lte = from_dt, week__gte = until_dt)
review_list = []
try:
kwargs = {}
args = ()
review_list = ReviewRequestReport.objects.filter(date_filter, *args, **kwargs).order_by('-week')
except Exception, ex:
log.exception("SQL Error Encountered in jd search. " + str(ex))
return review_list | [
"noreply@github.com"
] | noreply@github.com |
3550302238ab612a294100eb4da88fd18255c303 | 9e1a5cbe2612301018761741a8b2fc8b298d3b4d | /main/view/comment.py | 1daaa8afa45a58f9fc8abd43b2c87f9feea8a0a4 | [
"MIT"
] | permissive | FlyAndNotDown/Blog | dcf4bf37f601b434560e3f6343a497c549d358d2 | 8abf504060e7c0d4942a6763c4e8f34c7ef903ce | refs/heads/master | 2018-12-19T00:54:42.814636 | 2018-09-15T04:50:53 | 2018-09-15T04:50:53 | 118,254,269 | 9 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,101 | py | from main.models import Comment
class CommentPublishRequest:
"""
发表评论请求
"""
def __init__(self, sender, post, context):
"""
构造
:param sender: 发送者 pk
:param post: 文章 pk
:param context: 评论内容
"""
# 将数据存入数据库
comment = Comment(
sender = sender,
post=post,
is_child=False,
context=context,
)
comment.save()
class CommentReplyRequest:
"""
回复评论请求
"""
def __init__(self, sender, receiver, post, parent, context):
"""
构造
:param sender: 发送者 pk
:param receiver: 接收者 pk
:param post: 文章 pk
:param parent: 父级评论 pk
:param context: 评论
"""
# 将数据存入数据库
comment = Comment(
sender=sender,
receiver=receiver,
post=post,
is_child=True,
parent=parent,
context=context
)
comment.save()
| [
"461425614@qq.com"
] | 461425614@qq.com |
db5b4fb0b932d623194070822ee8ae6be653aed4 | 1f6a3165c02238e109c7cd6fd67aa17291b3418d | /create_stage_tables.py | c5bbb7487ad66620a0ad78071618984deb70ff3a | [] | no_license | avlam/TtA | 45dfacf6d74a9f30d06af46daa237ebef4df3232 | 9efe0db72db2554fea51f2d47a2ac7678be8a202 | refs/heads/main | 2023-08-11T23:16:43.180234 | 2021-09-19T14:29:29 | 2021-09-19T14:29:29 | 384,590,402 | 0 | 0 | null | 2021-09-17T19:07:58 | 2021-07-10T02:06:56 | Python | UTF-8 | Python | false | false | 6,574 | py | #!/usr/bin/env python
# coding: utf-8
# Setup
import pandas as pd
from pathlib import Path
import re
from parameters import locations, SPACING_CHAR
from journal_phrases import journal_phrases
output_dir = locations['staging']
game_list = list(locations['raw'].glob('*.csv'))
searches = {
'outcome': re.compile(r'(\w+) is (.*?) as (\w+) \((.*?)\)', re.IGNORECASE),
'game_name': re.compile(r'Game (?P<name>.*?) created.', re.IGNORECASE),
'points': re.compile(r'(?P<points>\d+)', re.IGNORECASE)
}
## Functions
def get_str_from_journal(target_file, *targets):
"""
helper to extract specific key strings from game journal. (e.g. summary text, game title)
returns a dictionary with *targets as keys
"""
key_strings = {
'results': 1, # will always be first line in log after the headers for a completed game.
'creation': -1 # will always be last line in log IF log is complete
}
output = {}
if target_file.exists:
with open(target_file, 'r') as game_journal:
lines = game_journal.readlines()
for target in targets:
output.update({target: lines[key_strings[target]]})
else:
print(f'{target_file} not found.')
pass
return output
def alias_player(username):
"""
lookup usernames and return player name
future changes: move aliases to separate file and prompt for input when a new username is encountered.
"""
alias = {
'david li': 'david',
'li david': 'david',
'micah yospe': 'micah',
'teddy yeh': 'teddy',
'x l':'xan'
}
if username in alias.keys():
return alias[username]
else:
print(f'new username found: {username}')
return username
def parse_score(score_str):
"""
Given a str score_str, try to extract score in points. if fails, return Nan
"""
try:
return re.match(searches['points'], score_str).group()
except:
return None
def parse_summary(game_file):
"""
given a path game_file, output summary info is dict for input into df construction
calls get_str_from_journal() for summary string
"""
summary = re.findall(searches['outcome'], get_str_from_journal(game_file,'results')['results'].lower())
summary_df = pd.DataFrame(summary)
summary_df.rename(inplace=True,
columns={
0:'result',
1:'name',
2:'player',
3:'score'
})
summary_df.set_index('player', inplace=True)
summary_df['name'] = summary_df['name'].apply(alias_player)
summary_df['score'] = summary_df['score'].apply(parse_score)
return summary_df.transpose()
def generate_tables(game, *tables):
"""
handler for stage table creation. reads game data based on Path object game and passes common information to each table generator.
game: Path object to game journal file
*tables: tables to generate
"""
# need to add arguments to pass through mode and save options
game_data = pd.read_csv(game, index_col=0)
game_id = game.stem
def table_games(game, mode='add', save=False):
"""
creates stage table 'games' or adds game to existing stage table 'games'
returns dataframe of data
game: Path object to game journal
mode: ['add', 'create']
save: bool - determines if resulting dataframe is saved to file
overwrite: bool - if False, appends to existing file
"""
GAMES_FILENAME = 'games.csv'
GAMES_PATH = locations['staging'].joinpath(GAMES_FILENAME)
if mode == 'create':
games = pd.DataFrame(columns=['game_name', 'num_turns', 'start_date', 'end_date'])
elif mode == 'add':
if GAMES_PATH.exists():
games = pd.read_csv(GAMES_PATH, index_col=0)
else:
raise(f'{GAMES_FILENAME} does not exist. Use mode=\'create\'')
else:
raise ValueError(f'mode {mode} not found. Must be either "add" or "create"')
game_id = game.stem
game_data = pd.read_csv(game, index_col=0)
find_name = re.search(searches['game_name'], get_str_from_journal(game,'creation')['creation'])
if find_name:
game_name = find_name.group('name')
else:
game_name = ''
summary = {
'game_name': game_name,
'num_turns': game_data['round'].max()-1, # offset by one to account for post-game scoring listed as a turn in journal
'end_date': game_data['time'].max(),
'start_date':game_data['time'].min()
}
games = games.append(pd.DataFrame(summary,index=[game_id]))
if save:
games.to_csv(GAMES_PATH)
return games
def parse_journal(game, save=False):
"""
Create set of stage tables parsing each journal entry phrase defined in dict journal_phrases
Input is path to individual game file
returns a dict of dfs representing each generated table.
"""
journal = pd.read_csv(game, index_col=0)
journal['game_id'] = game.stem
output = {}
for phrase, template in journal_phrases.items():
# print(f'parsing {phrase}')
file = f'{phrase}.csv'
filepath = locations['staging'].joinpath(file)
search = re.compile(template, re.IGNORECASE)
matches = journal['text'].apply(lambda logentry: re.match(search, logentry))
matches.dropna(inplace=True)
parsed_df = pd.DataFrame(matches.apply(lambda x: x.groupdict()).to_list(), index=matches.index)
parsed_df = parsed_df.join(journal[['time','age','round','game_id','text']])
if filepath.exists():
existing_data = pd.read_csv(filepath, index_col=0)
parsed_df = existing_data.append(parsed_df)
parsed_df.reset_index(drop=True, inplace=True)
output[phrase] = parsed_df
if save:
parsed_df.to_csv(filepath)
return output
# Generate Tables
players = pd.DataFrame()
scores = pd.DataFrame()
for game in game_list:
game_data = pd.read_csv(game, index_col=0)
game_id = game.stem
parse_journal(game, save=True)
summary_df = parse_summary(game)
summary_df = summary_df.transpose().reset_index()
summary_df['game_id'] = game_id
players = players.append(summary_df.loc[:,['game_id','player','name']])
scores = players.append(summary_df)
# Store Tables
players.reset_index().to_csv(output_dir.joinpath('players.csv'))
scores.reset_index().to_csv(output_dir.joinpath('scores.csv'))
| [
"14320735+avlam@users.noreply.github.com"
] | 14320735+avlam@users.noreply.github.com |
8a1230827541d821262fb3f1280ea53c87565736 | 8c618e16b15ad33a6ab6dcc4e0511e7a3acba094 | /remcall/schema/__init__.py | 8eb3a63f088ff55fb93ab052c031ca2d24a80f9d | [
"MIT"
] | permissive | luphord/remcall | 0bef9bbf13be697645f7b93fbd9a5e3ee9afd97b | 31419ff0f5c21ea2d90f9cabdaec85b6eebcaa12 | refs/heads/trunk | 2021-12-25T23:44:39.888706 | 2021-12-03T08:15:58 | 2021-12-03T08:15:58 | 165,920,464 | 0 | 0 | MIT | 2021-12-03T08:15:59 | 2019-01-15T20:42:12 | Python | UTF-8 | Python | false | false | 634 | py | from .core import Type, Interface, Enum, Record, Primitive, Method, \
string, int8, int16, int32, int64, uint8, uint16, \
uint32, uint64, float32, float64, void, boolean, \
date, datetime, time, primitive_types, Array, Schema
from .base import assert_name
__all__ = ['Type', 'Interface', 'Enum', 'Record', 'Primitive', 'Method',
'string', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16',
'uint32', 'uint64', 'float32', 'float64', 'void', 'boolean',
'date', 'datetime', 'time', 'assert_name', 'primitive_types',
'Array', 'Schema']
| [
"luphord@protonmail.com"
] | luphord@protonmail.com |
53789cb282c13c39b41a5a7f45aec84fa89e69b1 | fa8e268778bc6a86eb62dbc6460714a052c286fc | /leetcode/two_sum.py | b5ff1942444b84d783ee8fddd7fa8c16fa64836a | [] | no_license | Cardenaz/codegym | f4da4b1d95611cb8ac80d23f2779f6e468f8d466 | cf6718c0dcd59f435b666c6f31a9633b6172121e | refs/heads/master | 2023-02-18T23:48:39.371988 | 2021-01-19T10:20:38 | 2021-01-19T10:20:38 | 325,789,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | def twoSum(nums, target):
pointers = []
numsIdx = 0
numsNextIdx = 1
while numsIdx < len(nums) -1:
if nums[numsIdx] + nums[numsNextIdx] == target:
pointers.append(numsIdx)
pointers.append(numsNextIdx)
numsNextIdx += 1
else:
numsNextIdx +=1
| [
"william@cardenas.se"
] | william@cardenas.se |
fef0f186e3b388ef8dbb58d698766de6b8a4cbb0 | dee9432b12b8d5667ba3f58889344f89a032229d | /food/robots.py | 62e74a1df46393c50327b29f48029c5a8199bdf9 | [] | no_license | rolllyroman/lucas | a39743d697483f962617428bc61bfc053e9b4095 | e219ed3fc69ad36132ac4361c1766b279269323c | refs/heads/master | 2020-04-16T06:48:55.329438 | 2019-01-24T06:20:44 | 2019-01-24T06:20:44 | 150,229,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,006 | py | #coding:utf-8
import requests
import time
from lxml import etree
import json
# import MySQLdb
import pymysql
import random
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
from constant import USER_AGENT
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
chromeOptions = webdriver.ChromeOptions()
# 设置代理
chromeOptions.add_argument("--proxy-server=http://112.85.167.11:9999")
# 一定要注意,=两边不能有空格,不能是这样--proxy-server = http://202.20.16.82:10152
driver = webdriver.Chrome(chrome_options = chromeOptions)
# 设置无头
# chrome_options = Options()
# chrome_options.add_argument('--headless')
# driver = webdriver.Chrome(chrome_options=chrome_options)
# driver = webdriver.Chrome()
HEADERS = {'Accept': 'text/html, application/xhtml+xml, image/jxr, */*',
'Accept-Language':'zh-Hans-CN, zh-Hans; q=0.5',
'Connection':'Keep-Alive',
# 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063'}
'User-Agent':random.choice(USER_AGENT),
}
BASIC_URL = "https://weixin.sogou.com/weixin?query=%s&_sug_type_=&s_from=input&_sug_=n&type=1&page=%s&ie=utf8"
conn = pymysql.connect(host="119.23.52.3",user="root",passwd="168mysql",db="haha",charset="utf8")
conn.autocommit(1) # conn.autocommit(True)
cursor = conn.cursor()
proxies_queue = []
# def put_proxy_queue():
# url = "https://proxyapi.mimvp.com/api/fetchsecret.php?orderid=862060912114100297&num=5&http_type=3&result_fields=1,2,3"
# resp = requests.get(url)
# content = resp.content
# datas = content.split('\r\n')
# for data in datas:
# http_ip = data.split(',')[0]
# https_ip = http_ip.split(":")[0] + data.split(',')[-1]
# proxies = {
# "http":http_ip,
# "https":https_ip,
# }
# try:
# print "测试结果:%s"%requests.get("http://www.baidu.com",proxies=proxies)
# except:
# print "失败proxies:%s"%proxies
# else:
# proxies_queue.append(proxies)
# print "now proxies_queue:%s"%proxies_queue
# def get_proxies():
# print "now proxies_queue:%s"%proxies_queue
# if len(proxies_queue) < 20:
# for i in range(1,6):
# print "wait for put proxy... %s"%i
# time.sleep(1)
# put_proxy_queue()
# res = random.choice(proxies_queue)
# try:
# requests.get("http://www.baidu.com",proxies=res)
# except:
# proxies_queue.remove(res)
# return get_proxies()
# else:
# return res
def if_list_code(weixins,detail_srcs):
if len(weixins) == 1:
code = raw_input("请输入验证码:")
code_label = driver.find_element_by_name("c")
code_label.send_keys(" ") # 防止发送不成功
code_label.clear()
code_label.send_keys(code)
submit_label = driver.find_element_by_id("submit")
submit_label.click()
time.sleep(1)
content = driver.page_source.encode("utf-8")
html = etree.HTML(content)
weixins = html.xpath("//label/text()")
detail_srcs = html.xpath("//li//div/p[@class='tit']/a/@href")
print "weixins:%s"%weixins
if len(weixins) == 1:
return if_list_code(weixins,detail_srcs)
return weixins,detail_srcs
def search_list(word):
print "search_list:%s"%word
for i in range(1,11):
url = BASIC_URL%(word,i)
# resp = requests.get(url,headers=HEADERS)
driver.get(url)
time.sleep(1)
content = driver.page_source.encode("utf-8")
html = etree.HTML(content)
# print resp.content.decode()
# print "============="
# print url
# print "============="
# print resp.status_code
weixins = html.xpath("//label/text()")
detail_srcs = html.xpath("//li//div/p[@class='tit']/a/@href")
weixins,detail_srcs = if_list_code(weixins,detail_srcs)
if not weixins:
break
deal_detail(weixins,detail_srcs)
def get_words():
words = set()
url = "https://hanyu.baidu.com/s?wd=%E7%99%BE%E5%AE%B6%E5%A7%93&from=poem"
resp = requests.get(url,headers=HEADERS)
resp.encoding = "utf-8"
html = resp.text
for w in html:
words.add(w)
return words
def main():
print "main start..."
words = get_words()
for w in words:
sql = "select word from got_word where word = %s"
cursor.execute(sql,(w,))
if cursor.fetchone():
print "%s 已搜过,跳过..."%w
continue
print "开始搜索:%s"%w
search_list(w)
sql = "insert into got_word(word) values(%s)"
cursor.execute(sql,(w,))
def if_detail_code(heads,names):
# 弹出详情验证码
if not names:
code = raw_input("请输入验证码:")
code_label = driver.find_element_by_id("input")
code_label.send_keys(" ") # 防止发送不成功
code_label.clear()
code_label.send_keys(code)
submit_label = driver.find_element_by_id("bt")
submit_label.click()
time.sleep(1)
content = driver.page_source.encode("utf-8")
html = etree.HTML(content)
heads = html.xpath("//div//span/img/@src")
names = html.xpath("//strong/text()")
if not names:
return if_detail_code(heads,names)
return heads,names
def deal_detail(weixins,detail_srcs):
print "deal_detail start..."
for i,weixin in enumerate(weixins):
sql = "select weixin from robot where weixin = %s"
cursor.execute(sql,(weixin,))
res = cursor.fetchone()
if res:
continue
src = detail_srcs[i]
# 详情名字和头像
# resp = requests.get(src,headers=HEADERS)
# html = etree.HTML(resp.content)
driver.get(src)
content = driver.page_source.encode("utf-8")
html = etree.HTML(content)
heads = html.xpath("//div//span/img/@src")
names = html.xpath("//strong/text()")
heads,names = if_detail_code(heads,names)
head = heads[0].replace("http","https")
name = names[0].strip()
sql = "insert into robot(weixin,name,head) values(%s,%s,%s)"
cursor.execute(sql,(weixin,name,head))
print weixin,name,head,"ok!"
time.sleep(1)
# def test2():
# url = "https://weixin.sogou.com/weixin?query=%E6%9D%8E&_sug_type_=&s_from=input&_sug_=n&type=1&page=222&ie=utf8"
# resp = requests.get(url,headers=HEADERS)
# html = etree.HTML(resp.content)
# weixins = html.xpath("//label/text()")
# print "==========================="
# print weixins
# print "==========================="
if __name__ == "__main__":
main()
cursor.close()
conn.close()
driver.close()
| [
"1983654762@qq.com"
] | 1983654762@qq.com |
78191fe526f5ea9adcdf791fd3a27433f46ea781 | c1dd4501e134e3cfef5b7dc82d0e022e3ec7e9b6 | /project/celery.py | 616e6f5f026f581e9deffe767328d2b21c6846bc | [] | no_license | artemmj/set_up_jwt_django | a189cd1d59eac5fe9d02772284f794480f7525f7 | 8ba80f83b8516e5a2226e005ec22a821997c319f | refs/heads/master | 2023-04-25T01:22:41.274238 | 2021-04-28T11:47:50 | 2021-04-28T11:47:50 | 362,447,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | from __future__ import absolute_import, unicode_literals
import os
from django.conf import settings
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings.common')
app = Celery('config')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| [
"webep4@gmail.com"
] | webep4@gmail.com |
9dcc562873902522127abfd68618df28662b37b9 | ca3cb1e721d8efab78f099f0d0b52e5248e3a89f | /merge_img.py | efcc54d3578397c7dea3f8d4f79b088c8f3dd5cb | [] | no_license | CyrusVorwald2/dashcam-poc | 6ac669700f4891fbab65fe10e6c924a1218d68f4 | 41675aeb2a8e3f59a0afad7e766f2cdbd50f4ec1 | refs/heads/master | 2022-12-06T06:30:19.405527 | 2015-12-18T16:14:15 | 2015-12-18T16:14:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | import cv2
img1 = cv2.imread('e_profile.jpg')
img2 = cv2.imread('e_caption.png')
# img1 = cv2.imread('img1.png')
# img2 = cv2.imread('logo.png')
# height, width, depth = img1.shape
# print height, width, depth
#
# height, width, depth = img2.shape
# print height, width, depth
dst = cv2.addWeighted(img1,0.7,img2,0.3,0)
cv2.imshow('dst',dst)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"Eugene.Chung@mezocliq.com"
] | Eugene.Chung@mezocliq.com |
62a21b6c0ce11b03bd02fca8f1dd74e4aa7d8bc2 | 94c18fb640dbd4108a69f0446af0ad85db05aa9a | /python_quizzup/pyquiz/templatetags/sub.py | 48e3a9c6a5f119584e43809eadf070b43f045b25 | [
"MIT"
] | permissive | viveksoundrapandi/iamvivek | 4ecec6595c93e1dc664255fcea0bfe74b047825f | ad4a1b2e55302bfb5c55bdf73c5480536b0dcd91 | refs/heads/master | 2022-03-20T18:36:19.887110 | 2019-10-19T12:41:39 | 2019-10-19T12:41:39 | 107,754,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | from django import template
register = template.Library()
@register.filter
def sub(value, arg):
return int(value) - int(arg)
| [
"vivekhas3@gmail.com"
] | vivekhas3@gmail.com |
7dd54bed4c22108fdd325ab8efa1459e4fdd1d11 | a47192d5abd5f34f63b2c0e27b954ae07de47302 | /day20/range.py | d17de1cba89cc621b63647419a191c9a16be7aa0 | [] | no_license | Godsmith/adventofcode2016 | 46639af6e015f0a024cde32ba0a1f98268899f4f | e036fb68bb53b9c79aa143b6c4645db218f77862 | refs/heads/master | 2020-06-15T04:21:21.012830 | 2017-01-10T21:52:30 | 2017-01-10T21:52:30 | 75,330,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | class Range:
def __init__(self, low, high):
self.low = low
self.high = high
def __repr__(self):
return 'Range<%s-%s>' % (self.low, self.high)
def __hash__(self):
return hash(tuple([self.low, self.high]))
def __eq__(self, other):
return self.low == other.low and self.high == other.high
@classmethod
def combine(cls, ranges):
lowest = min([r.low for r in ranges])
highest = max([r.high for r in ranges])
return cls(lowest, highest)
def can_be_combined(self, range_):
return not (self.high < range_.low - 1 or self.low > range_.high + 1)
| [
"filip.lange@gmail.com"
] | filip.lange@gmail.com |
17204f9e5d9bc658b029fb4341dd4d71a9ad058a | 2544d05926c8cdfa28d5104ad566e49a36ebeb0c | /Plots/stackedbarchartOlympic.py | 341b49eb9c752e30629c42d3f56bd256c43e2311 | [] | no_license | Gingerhouse/ITSC_3155_VisualizationLab | 002946cda06bb9e4289be2a6eca0b1a01c1e06cf | 8efd15277b0ff4cdf5d4423c652fe6fc6649d662 | refs/heads/master | 2023-03-29T18:23:20.920831 | 2021-03-31T00:37:55 | 2021-03-31T00:37:55 | 350,495,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py | import pandas as pd
import plotly.offline as pyo
import plotly.graph_objs as go
# Load CSV file from Datasets folder
df = pd.read_csv('../Datasets/Olympic2016Rio.csv')
# Removing empty spaces to avoid errors
df = df.apply(lambda x: x.str.strip() if x.dtype == "object" else x)
# Creating sum of number of cases group by Country Column
new_df = df.groupby(['NOC']).agg(
{'Total': 'sum', 'Gold': 'sum', 'Silver': 'sum', 'Bronze': 'sum'}).reset_index()
# Sorting values and select 20 first value
new_df = new_df.sort_values(by=['Total'], ascending=[False]).head(20).reset_index()
# Preparing data
trace1 = go.Bar(x=new_df['NOC'], y=new_df['Gold'], name='Gold', marker={'color': '#FFD700'})
trace2 = go.Bar(x=new_df['NOC'], y=new_df['Silver'], name='Silver', marker={'color': '#9EA0A1'})
trace3 = go.Bar(x=new_df['NOC'], y=new_df['Bronze'], name='Bronze', marker={'color': '#CD7F32'})
data = [trace1, trace2, trace3]
# Preparing layout
layout = go.Layout(title='Total Medals of the Olympics 2016 of Top 20 Countries', xaxis_title="Country",
yaxis_title="Medals", barmode='stack')
# Plot the figure and saving in a html file
fig = go.Figure(data=data, layout=layout)
pyo.plot(fig, filename='stackedbarchartOlympic.html') | [
"abelvillape@gmail.com"
] | abelvillape@gmail.com |
cc748c6aadec1a2627e7132cfd476d19c690933c | f7127398e6bc60cdece53014dfebb58aa99c0fbd | /aiogram_dialog/widgets/kbd/checkbox.py | b6a4e010a29614fdc9277b51a146f248f8d6f885 | [] | no_license | drforse/aiogram_dialog | 25fcae2579e9b37c43a41303232d009e04316c6a | 984496ee7818d7896235d20f30bb662f56293385 | refs/heads/master | 2023-02-28T21:39:53.331894 | 2021-02-05T05:50:15 | 2021-02-05T05:50:15 | 336,158,550 | 0 | 0 | null | 2021-02-05T03:58:44 | 2021-02-05T03:58:43 | null | UTF-8 | Python | false | false | 1,300 | py | from typing import Callable, Optional, Union, Dict, Awaitable
from aiogram.types import CallbackQuery
from aiogram_dialog.manager.manager import DialogManager
from aiogram_dialog.widgets.text import Text, Case
from .button import Button
OnStateChanged = Callable[[CallbackQuery, "Checkbox", DialogManager], Awaitable]
class Checkbox(Button):
def __init__(self, checked_text: Text, unchecked_text: Text,
id: str,
on_state_changed: Optional[OnStateChanged] = None,
when: Union[str, Callable] = None):
text = Case({True: checked_text, False: unchecked_text}, selector=self._is_text_checked)
super().__init__(text, id, self._on_click, when)
self.on_state_changed = on_state_changed
async def _on_click(self, c: CallbackQuery, button: Button, manager: DialogManager):
manager.context.set_data(self.widget_id, not self.is_checked(manager), internal=True)
if self.on_state_changed:
await self.on_state_changed(c, self, manager)
def _is_text_checked(self, data: Dict, case: Case, manager: DialogManager) -> bool:
return self.is_checked(manager)
def is_checked(self, manager: DialogManager) -> bool:
return manager.context.data(self.widget_id, False, internal=True)
| [
"tishka17@mail.ru"
] | tishka17@mail.ru |
911fa601ad7bdf4df8cdd0b0452942bd3b675e77 | 38697ac1686dc523dc03e74c0b526847a7724742 | /webapp/flota/admin.py | ba2f57c5d5a06b0f3fe453967abe14fe83697c44 | [] | no_license | giovannyc28/flota | 94edaf03090d720016e662874399c2e310751b0a | 7b757c0fca3e6ead7eb9ce40460b8ac8f34039a4 | refs/heads/master | 2020-03-28T10:03:55.397326 | 2018-09-10T06:51:54 | 2018-09-10T06:51:54 | 148,080,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 828 | py | from django.contrib import admin
from .models import Vehiculos, Propietarios
class VehiculosInline(admin.TabularInline):
model = Vehiculos
extra = 0
fields = ["placa", "tipo_vehiculo", "marca", "modelo", "cilindraje", "tipo_servicio"]
class PropietariosAdmin(admin.ModelAdmin):
inlines = [VehiculosInline]
list_display = ('nuip', 'nombres', 'apellidos')
search_fields = ['nuip', 'nombres', 'apellidos']
class VehiculosAdmin(admin.ModelAdmin):
list_display = ('placa', 'tipo_vehiculo', 'marca', 'modelo', 'cilindraje', 'tipo_servicio', 'propietario')
search_fields = ['placa', 'marca', 'propietario__nombres', 'propietario__apellidos']
list_filter = ['tipo_vehiculo', 'marca', 'modelo']
admin.site.register(Propietarios, PropietariosAdmin)
admin.site.register(Vehiculos, VehiculosAdmin)
| [
"giovanny@localhost.localdomain"
] | giovanny@localhost.localdomain |
f9b20190b7c9f4fd3c83e3e9cc298a4768dcfc86 | 50da3cfb8cc7c0acc8c4348a5647651e08e65fe9 | /doc-dcm/SAMPLEZ/albertca-NanScan-cd0decd/NanScan/__init__.py | a8ddb19c8cc8944603201128586619ba59887123 | [] | no_license | hpcgam/dicomimport | f8692520e6b4a7c76efed39acc831541ddf6d502 | 1f265b1a5c9e631a536333633893ab525da87f16 | refs/heads/master | 2021-03-12T19:57:51.078509 | 2011-06-08T05:25:01 | 2011-06-08T05:25:01 | 1,189,259 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | # Copyright (C) 2008-2009 by Albert Cervera i Areny
# albert@nan-tic.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
| [
"hpcgam@gmail.com"
] | hpcgam@gmail.com |
891bc1be4c906960cbcbcdf7103c5f48108c1a41 | 848ddb87d475c2ef65f1221a8c0bf0fa71dd1dfd | /maindnc (copy).py | 03b06c86c41ba56b28d7c6d0055f58c628edfaee | [] | no_license | XSMUBC/Lifelong-learning_xsm | 03d404b21e597c4a085b44e964e7a479ec28993b | b74d6d841b1f590616f85e442463c20868855441 | refs/heads/master | 2022-04-17T12:42:06.597115 | 2020-04-18T19:26:10 | 2020-04-18T19:26:10 | 256,835,323 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,920 | py | import torch
import tensorflow as tf
import numpy as np
import statistics
from torch.nn import functional as F
import torch.distributions as tdist
import visual_visdom
import visual_plt
import utils
import matplotlib.pyplot as plt
#########################################################
## maindnc xsm code ##
#########################################################
def maindnc(self, size, batch_index,z0,task,tasks,t_label):
'''
if list(z0.size())[0]!=0:
#estimation of the mean and variance
zx=z0
mean=(zx.mean(dim=1)).mean(dim=0)
var=(zx.std(dim=1)).mean(dim=0)
#print('xsm mean',mean)
#print('xsm xsm var',var)
else:
#estimate in begining
mean=0
var=1.6
'''
mean=0
var=1.6
n = tdist.Normal(mean, var)
z1 =n.sample((size, self.z_dim)).to(self._device())
t_label =n.sample((size, self.z_dim)).to(t_label)
if (task<=round((tasks+1)/2)):
z2=torch.cat((z0,z1,z1), 0)
else:
z2=torch.cat((z0,z1), 0)
dl=64
m=int(list(z1.size())[0]/dl)
n=int(list(z0.size())[0]/dl)
if list(z0.size())[0]!=0:
for i in range(m):
rows1 =z1[i*dl:i*dl+dl,:]
tensor_similarity=0
for j in range(n):
rows2 = z0[j*dl:j*dl+dl,:]
x = rows1
y = rows2
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
tensor_similarity+=torch.sum(cos(x, y))
if (tensor_similarity<0):
z2=torch.cat((z2,torch.reshape(rows1, (dl, 100))), 0)
image_tensor=z1
print('xsm xsm xsm xsm z2',z2[:,:(-1)])
plt.imsave('./plots/save.png', image_tensor.numpy() , cmap='gray')
if batch_index==2000:
torch.save(z2, 'dnc.pt')
return z2,t_label
| [
"noreply@github.com"
] | noreply@github.com |
b783d5bf51d4bb8dd0b44dab30f43382f53dfeb2 | bb9ab2b88c990377e58fd2b719a60f2e4a4689ce | /est-sfs/01_vcf_to_estsfs.py | 8304ae2a0ee0641c79e0ee2e8fe764171fc6c5b3 | [] | no_license | silvewheat/biocal-cli | 7ded0e05c134c932a7dd45130c546cd607b443b9 | 134a0bf4f0d318de50a92a1e72d18c13580e64e2 | refs/heads/master | 2022-12-11T21:04:25.240272 | 2022-11-28T02:40:02 | 2022-11-28T02:40:02 | 147,090,111 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,551 | py | # -*- coding: utf-8 -*-
"""
Created on 2022 10-14
@author: Yudongcai
@Email: yudong_cai@163.com
"""
import re
import typer
import numpy as np
from cyvcf2 import VCF
from collections import Counter, defaultdict
def convert_gts(gt_bases):
gt_split = re.compile(r'[/|]')
bases = []
for base in gt_bases:
bases.extend(gt_split.split(base))
return bases
def main(vcffile: str = typer.Argument(..., help="input vcf file"),
focalsamples: str = typer.Argument(..., help="sample list for focal samples"),
outgroup1: str = typer.Argument(..., help="sample list for outgroup1"),
outgroup2: str = typer.Argument(..., help="sample list for outgroup2"),
outgroup3: str = typer.Argument(..., help="sample list for outgroup3"),
outprefix: str = typer.Argument(..., help="output prefix")):
focal_samples = [x.strip() for x in open(focalsamples)]
outgroup1_samples = [x.strip() for x in open(outgroup1)]
outgroup2_samples = [x.strip() for x in open(outgroup2)]
outgroup3_samples = [x.strip() for x in open(outgroup3)]
samples = focal_samples + outgroup1_samples + outgroup2_samples + outgroup3_samples
print(f'focal samples: {len(focal_samples)}\noutgroup1: {len(outgroup1_samples)}\noutgroup2: {len(outgroup2_samples)}\noutgroup3: {len(outgroup3_samples)}')
with open(f'{outprefix}_siteInfo.tsv', 'w') as f1, open(f'{outprefix}_datafile', 'w') as f2:
base2index = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
f1.write('CHROM\tPOS\tREF\tALT\tmajorAllele\tminorAllele\n')
vcf = VCF(vcffile, gts012=True, samples=samples)
focal_selection = [True if x in focal_samples else False for x in vcf.samples]
outgroup1_selection = [True if x in outgroup1_samples else False for x in vcf.samples]
outgroup2_selection = [True if x in outgroup2_samples else False for x in vcf.samples]
outgroup3_selection = [True if x in outgroup3_samples else False for x in vcf.samples]
outgroup_selections = (outgroup1_selection, outgroup2_selection, outgroup3_selection)
for variant in vcf:
alleles = [variant.REF] + variant.ALT
f1.write(f'{variant.CHROM}\t{variant.POS}\t{variant.REF}\t' + ','.join(variant.ALT) + '\t')
counter_gts_focal = Counter(convert_gts(variant.gt_bases[focal_selection]))
major_allele = counter_gts_focal.most_common()[0][0]
try:
minor_allele = counter_gts_focal.most_common()[1][0]
except IndexError:
minor_allele = list(set(alleles) - set(major_allele))[0]
f1.write(f'{major_allele}\t{minor_allele}\n')
f2.write(f"{counter_gts_focal.get('A', 0)},{counter_gts_focal.get('C', 0)},{counter_gts_focal.get('G', 0)},{counter_gts_focal.get('T', 0)}")
for selection in outgroup_selections:
counts = ['0', '0', '0', '0'] # A C G T
counter_gts = Counter(convert_gts(variant.gt_bases[selection])).most_common()
first_base, first_count = counter_gts[0]
try:
second_base, second_count = counter_gts[1]
except IndexError:
second_count = 0
# 两种allele数量相等时按缺失处理
if (first_count > second_count) and (first_base != '.'):
counts[base2index[first_base]] = '1'
f2.write('\t'+','.join(counts))
f2.write('\n')
if __name__ == '__main__':
typer.run(main) | [
"silverwheat@163.com"
] | silverwheat@163.com |
b32a8968f5ecf5ed069b54be4452c5c54f3954c0 | a35f464d49dd892f26acffa22ade7d4c622ddf5f | /wheat/wallet/did_wallet/did_wallet_puzzles.py | 50e6ea91f658c1be3b934b753164a0cee89b7312 | [
"Apache-2.0"
] | permissive | Jsewill/wheat-blockchain | 7b2cffd97cf5fabc68c7b0fd7c34ec21640f1fa5 | 75e412edd095ed1e51b3b70f6238dda7241f6456 | refs/heads/main | 2023-06-20T05:56:00.842418 | 2021-07-13T23:51:39 | 2021-07-13T23:51:39 | 385,761,992 | 0 | 0 | Apache-2.0 | 2021-07-13T23:44:33 | 2021-07-13T23:44:32 | null | UTF-8 | Python | false | false | 3,484 | py | from clvm_tools import binutils
from wheat.types.blockchain_format.sized_bytes import bytes32
from wheat.types.blockchain_format.program import Program
from typing import List, Optional, Tuple
from blspy import G1Element
from wheat.types.blockchain_format.coin import Coin
from wheat.types.coin_solution import CoinSolution
from wheat.util.ints import uint64
from wheat.wallet.puzzles.load_clvm import load_clvm
from wheat.types.condition_opcodes import ConditionOpcode
SINGLETON_TOP_LAYER_MOD = load_clvm("singleton_top_layer.clvm")
LAUNCHER_PUZZLE = load_clvm("singleton_launcher.clvm")
DID_INNERPUZ_MOD = load_clvm("did_innerpuz.clvm")
SINGLETON_LAUNCHER = load_clvm("singleton_launcher.clvm")
def create_innerpuz(pubkey: bytes, identities: List[bytes], num_of_backup_ids_needed: uint64) -> Program:
backup_ids_hash = Program(Program.to(identities)).get_tree_hash()
# MOD_HASH MY_PUBKEY RECOVERY_DID_LIST_HASH NUM_VERIFICATIONS_REQUIRED
return DID_INNERPUZ_MOD.curry(pubkey, backup_ids_hash, num_of_backup_ids_needed)
def create_fullpuz(innerpuz, genesis_id) -> Program:
mod_hash = SINGLETON_TOP_LAYER_MOD.get_tree_hash()
return SINGLETON_TOP_LAYER_MOD.curry(mod_hash, genesis_id, LAUNCHER_PUZZLE.get_tree_hash(), innerpuz)
def get_pubkey_from_innerpuz(innerpuz: Program) -> G1Element:
ret = uncurry_innerpuz(innerpuz)
if ret is not None:
pubkey_program = ret[0]
else:
raise ValueError("Unable to extract pubkey")
pubkey = G1Element.from_bytes(pubkey_program.as_atom())
return pubkey
def is_did_innerpuz(inner_f: Program):
"""
You may want to generalize this if different `CC_MOD` templates are supported.
"""
return inner_f == DID_INNERPUZ_MOD
def is_did_core(inner_f: Program):
return inner_f == SINGLETON_TOP_LAYER_MOD
def uncurry_innerpuz(puzzle: Program) -> Optional[Tuple[Program, Program]]:
"""
Take a puzzle and return `None` if it's not a `CC_MOD` cc, or
a triple of `mod_hash, genesis_coin_checker, inner_puzzle` if it is.
"""
r = puzzle.uncurry()
if r is None:
return r
inner_f, args = r
if not is_did_innerpuz(inner_f):
return None
pubkey, id_list, num_of_backup_ids_needed = list(args.as_iter())
return pubkey, id_list
def get_innerpuzzle_from_puzzle(puzzle: Program) -> Optional[Program]:
r = puzzle.uncurry()
if r is None:
return None
inner_f, args = r
if not is_did_core(inner_f):
return None
mod_hash, genesis_id, inner_puzzle = list(args.as_iter())
return inner_puzzle
def create_recovery_message_puzzle(recovering_coin_id: bytes32, newpuz: bytes32, pubkey: G1Element):
puzstring = f"(q . ((0x{ConditionOpcode.CREATE_COIN_ANNOUNCEMENT.hex()} 0x{recovering_coin_id.hex()}) (0x{ConditionOpcode.AGG_SIG_UNSAFE.hex()} 0x{bytes(pubkey).hex()} 0x{newpuz.hex()})))" # noqa
puz = binutils.assemble(puzstring)
return Program.to(puz)
def create_spend_for_message(parent_of_message, recovering_coin, newpuz, pubkey):
puzzle = create_recovery_message_puzzle(recovering_coin, newpuz, pubkey)
coin = Coin(parent_of_message, puzzle.get_tree_hash(), uint64(0))
solution = Program.to([])
coinsol = CoinSolution(coin, puzzle, solution)
return coinsol
# inspect puzzle and check it is a DID puzzle
def check_is_did_puzzle(puzzle: Program):
r = puzzle.uncurry()
if r is None:
return r
inner_f, args = r
return is_did_core(inner_f)
| [
"86817720+wheatnetwork@users.noreply.github.com"
] | 86817720+wheatnetwork@users.noreply.github.com |
7e50f3a8f442d6fed03fbf5af82ec1695c6de101 | 3268ece38499e4d2f2604eada8c05c6cc77c3071 | /provisioner/src/singleton.py | 24c29c2179e5f06049a6db318b8fd7d4088afde5 | [] | no_license | nicwaller/provisioner | 3b04fb3d875155c945b9d2669c3777fbc9e4cdc5 | 5bcc66711a7b7590bc28609bb6d3a523601ec5ee | refs/heads/main | 2023-07-27T20:48:38.989974 | 2021-08-31T23:40:32 | 2021-08-31T23:40:32 | 369,907,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,616 | py | """
It would be bad to have two provisioners running at once on the same system.
So let's try to be sure we're the only one around.
"""
import logging
import os
logger = logging.getLogger("Singleton")
class Singleton(object):
pidfile = "/var/run/provisioner.pid"
def __enter__(self):
logger.info("Starting up...")
if os.path.exists(self.pidfile):
with open(self.pidfile, "r+") as file:
other_pid = int(file.read())
if pid_exists(other_pid):
logger.critical(
f"Another provisioner is already running! pid={other_pid}"
)
raise RuntimeError("Duplicate process") from None
else:
logger.warning(f"Removing stale pidfile")
file.truncate()
file.write(str(os.getpid()))
else:
with open(self.pidfile, "w") as file:
file.write(str(os.getpid()))
logger.debug("Wrote pidfile.")
logger.debug("No conflicting processes found.")
# noinspection PyShadowingBuiltins
def __exit__(self, _, value, traceback):
os.unlink(self.pidfile)
logger.info("Done") # TODO: log the time spent, and maybe update metrics
def pid_exists(pid) -> bool:
"""
Check For the existence of a unix pid.
https://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid-in-python
"""
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
| [
"code@nicwaller.com"
] | code@nicwaller.com |
bc7f5da07b7c4f1adc6515a085503a7bbb62b5ae | 679af513b9051ce9fca1ba998210fea12a277556 | /runTests.py | 9698834d08d4dabb7bcc9ac537ff8392525b8c3b | [] | no_license | umbertoDifa/SequentialRecommenderSystem | 5ba75fb398810e9b6a5d2919de700273ace7aa60 | 38a8c50a6762d777b9f205d097caeafde1fea649 | refs/heads/master | 2020-12-31T00:03:59.668344 | 2017-03-29T10:13:29 | 2017-03-29T10:13:29 | 86,566,676 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | from tests.FPM_tests import FPM_recommender_tests
from tests.Markov_tests import Markov_tests
from tests.data_expansion_tests import Data_expansion_tests
import unittest
unittest.main() | [
"lupen4th@hotmail.it"
] | lupen4th@hotmail.it |
b423a890a31fd737e8602dfbfd3438058faf4964 | d8ec038cc8b2a49a779fdeb668c34058b4b11db1 | /ecm/urls.py | 6e2f85f0567f33a5776f8098b4f698dbffef3e58 | [] | no_license | ngoctutrang/Django-Shopping-Cart | d619ee2217d202513d639f882bbe5a6e5d12a2b7 | 308b0e3214d3c64be97f720fc4e876f5eeb881c1 | refs/heads/master | 2022-12-01T01:06:18.082535 | 2020-08-06T04:24:23 | 2020-08-06T04:24:23 | 284,867,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | """ecm URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('store.urls')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"tdngoctu@gmail.com"
] | tdngoctu@gmail.com |
4707d1638995a2fd49ac8ec1d7bb1cd1c54ed94b | fd5b3e587fb41e595946324bf3293331180bcbdd | /www/apis.py | 2c72544a764932bb9fda103823920c2f33fa7e06 | [] | no_license | labulaka6/webapp | 48890fd0b4f8a49d60acf9d5f95d8c5b890319bd | 221dfbbb205ea9a4c127aeafea6df10fb66e9875 | refs/heads/master | 2021-03-03T12:45:28.197166 | 2020-03-09T09:09:43 | 2020-03-09T09:09:43 | 244,323,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,780 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
JSON API definition.
'''
__author__ = 'labulaka6'
import json
import logging
import inspect
import functools
# 简单的几个api错误异常类,用于跑出异常
class APIError(Exception):
def __init__(self, error, data='', message=''):
super(APIError, self).__init__(message)
self.error = error
self.data = data
self.message = message
class APIValueError(APIError):
"""docstring for APIValueError"""
def __init__(self, field, message=''):
super(APIValueError, self).__init__('value:invalid', field, message)
class APIResourceNotFoundError(APIError):
"""docstring for APIResourceNotFoundError"""
def __init__(self, field, message=''):
super(APIResourceNotFoundError, self).__init__(
'value:notfound', field, message)
class APIPermissionError(APIError):
"""docstring for APIPermissionError"""
def __init__(self, field, message=''):
super(APIPermissionError, self).__init__(
'permission:forbidden', 'permission', message)
# 用于分页
class Page(object):
# 参数说明:
# item_count:要显示的条目数量
# page_index:要显示的是第几页
# page_size:每页的条目数量
def __init__(self, item_count, page_index=1, page_size=10):
self.item_count = item_count
self.page_size = page_size
# 计算出应该有多少页才能显示全部的条目
self.page_count = item_count // page_size + \
(1 if item_count % page_size > 0 else 0)
# 如果没有条目或者要显示的页超出了能显示的页的范围
if (item_count == 0) or (page_index > self.page_count):
# 则不显示
self.offset = 0
self.limit = 0
self.page_index = 1
else:
# 否则说明要显示
# 设置显示页就是传入的要求显示的页
self.page_index = page_index
# 这页的初始条目的offset
self.offset = self.page_size * (page_index - 1)
# 这页能显示的数量
self.limit = self.page_size
# 这页后面是否还有下一页
self.has_next = self.page_index < self.page_count
# 这页之前是否还有上一页
self.has_previous = self.page_index > 1
def __str__(self): # 格式化属性,dict()会调用
return 'item_count: %s, page_count: %s, page_index: %s, page_size: %s, offset: %s, limit: %s' % \
(self.item_count, self.page_count, self.page_index, self.page_size, self.offset, self.limit)
__repr__ = __str__
| [
"13399226198@163.com"
] | 13399226198@163.com |
6d3042bd052f2e41a4cd141a774a174ab4c4cf9d | 2272daa9ace06ad9c043bacce089c3285d4a8dfd | /0.49_4_exercise_while_loops.py | 9a65e1352e61636c5ccc2756fdac51ffef2d5983 | [] | no_license | Eddy-Arora/Hello-world | 1c522aa010bc656ad6ff61b38f9b60212c81b710 | 7b903af7360bf014cfa5138f68946a4c237f2d3a | refs/heads/main | 2023-04-12T00:39:33.627847 | 2021-04-13T11:30:51 | 2021-04-13T11:30:51 | 351,726,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | n = int(input("Enter n: "))
i = 2
while i <= n:
print(i)
i = i + 2 | [
"rajeev.arora1908@gmail.com"
] | rajeev.arora1908@gmail.com |
1939165261e9bc871d33a3d26d3408e0baaf61a6 | cd0591c773702d66d964e325f494b17918617949 | /hgvs/utils/altseq_to_hgvsp.py | f47725b71196b3a2abc09f81931572745eaf3ced | [
"Apache-2.0"
] | permissive | SunbyMoon/hgvs | 9de0f1a2ddc134f072e490b989982d6e90bd164a | 2f348d53ee542576d0035a54757daa5dcd077e6b | refs/heads/master | 2020-03-22T15:51:35.995761 | 2018-07-03T04:19:09 | 2018-07-03T04:19:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,292 | py | # -*- coding: utf-8 -*-
"""Utility class for creating an hgvsp SequenceVariant object,
given a transcript with variants applied.
Used in hgvsc to hgvsp conversion.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import hgvs
from ..edit import (AAExt, AAFs, AARefAlt, AASub, Dup)
from ..exceptions import (HGVSError)
from ..location import (AAPosition, Interval)
from ..posedit import (PosEdit)
from six.moves import range
DBG = False
class AltSeqToHgvsp(object):
def __init__(self, ref_data, alt_data):
"""Constructor
:param ref_data: reference transcript record
:type ref_data: recordtype
:param alt_data: alt transcript record
:type ref_data: recordtype
"""
self._ref_data = ref_data
self._alt_data = alt_data
self._protein_accession = self._ref_data.protein_accession
self._ref_seq = self._ref_data.aa_sequence
self._alt_seq = self._alt_data.aa_sequence
self._is_frameshift = self._alt_data.is_frameshift
self._frameshift_start = self._alt_data.frameshift_start
self._is_substitution = self._alt_data.is_substitution
self._is_ambiguous = self._alt_data.is_ambiguous
if DBG:
print("len ref seq:{} len alt seq:{}".format(len(self._ref_seq), len(self._alt_seq)))
print("fs start:{} protein ac:{}".format(self._frameshift_start, self._protein_accession))
print(self._ref_seq)
print(self._alt_seq)
print("aa variant start: {}".format(self._alt_data.variant_start_aa))
print(self._ref_data.transcript_sequence)
print(self._alt_data.transcript_sequence)
def build_hgvsp(self):
"""Compare two amino acid sequences; generate an hgvs tag from the output
:return list of variants in sequence order
:rtype list of dict
"""
variants = []
if not self._is_ambiguous and len(self._alt_seq) > 0:
do_delins = True
if self._ref_seq == self._alt_seq:
# Silent p. variant
start = self._alt_data.variant_start_aa
if start - 1 < len(self._ref_seq):
deletion = self._ref_seq[start - 1]
insertion = deletion
else:
start = ""
deletion = ""
insertion = ""
self._is_frameshift = False
variants.append({"start": start, "ins": insertion, "del": deletion})
do_delins = False
elif self._is_substitution:
if len(self._ref_seq) == len(self._alt_seq):
diff_pos = [(i, self._ref_seq[i], self._alt_seq[i]) for i in range(len(self._ref_seq))
if self._ref_seq[i] != self._alt_seq[i]]
if len(diff_pos) == 1:
(start, deletion, insertion) = diff_pos[0]
variants.append({"start": start + 1, "ins": insertion, "del": deletion})
do_delins = False
elif (self._alt_seq[self._alt_data.variant_start_aa - 1] == "*"
and self._ref_seq[self._alt_data.variant_start_aa - 1] != "*"):
# introduced stop codon
deletion = self._ref_seq[self._alt_data.variant_start_aa - 1:]
variants.append({"start": self._alt_data.variant_start_aa, "ins": "*", "del": deletion})
do_delins = False
if do_delins:
if self._alt_data.is_frameshift:
start = self._alt_data.variant_start_aa - 1
aa_start = self._alt_data.variant_start_aa
while self._ref_seq[start] == self._alt_seq[start]:
start += 1
aa_start += 1
insertion = list(self._alt_seq[start:])
deletion = list(self._ref_seq[start:])
variants.append({"start": aa_start, "ins": insertion, "del": deletion})
else: # non-frameshifting delins or dup
# get size diff from diff in ref/alt lengths
start = self._alt_data.variant_start_aa - 1
aa_start = self._alt_data.variant_start_aa
delta = len(self._alt_seq) - len(self._ref_seq)
while self._ref_seq[start] == self._alt_seq[start]:
start += 1
aa_start += 1
offset = start + abs(delta)
if delta > 0: # net insertion
insertion = list(self._alt_seq[start:offset])
deletion = []
ref_sub = self._ref_seq[start:]
alt_sub = self._alt_seq[offset:]
elif delta < 0: # net deletion
insertion = []
deletion = list(self._ref_seq[start:offset])
ref_sub = self._ref_seq[offset:]
alt_sub = self._alt_seq[start:]
else:
insertion = []
deletion = []
ref_sub = self._ref_seq[start:]
alt_sub = self._alt_seq[start:]
# from start, get del/ins out to last difference
diff_indices = [i for i in range(len(ref_sub)) if ref_sub[i] != alt_sub[i]]
if diff_indices:
max_diff = diff_indices[-1] + 1
insertion.extend(list(alt_sub[:max_diff]))
deletion.extend(list(ref_sub[:max_diff]))
variants.append({"start": aa_start, "ins": insertion, "del": deletion})
if DBG:
print(variants)
if self._is_ambiguous:
var_ps = [
self._create_variant(None, None, '', '', acc=self._protein_accession, is_ambiguous=self._is_ambiguous)
]
elif len(self._alt_seq) == 0:
var_ps = [
self._create_variant(
None,
None,
'',
'',
acc=self._protein_accession,
is_ambiguous=self._is_ambiguous,
is_no_protein=True)
]
else:
var_ps = [self._convert_to_sequence_variants(x, self._protein_accession) for x in variants]
if len(var_ps) > 1:
raise HGVSError("Got multiple AA variants - not supported")
return var_ps[0]
#
# internal methods
#
def _convert_to_sequence_variants(self, variant, acc):
"""Convert AA variant to an hgvs representation
:param variant: contains start, del, and ins
:type variant: dict
:param acc: protein accession
:type acc: str
:return hgvs string
:rtype str
"""
start = variant['start']
insertion = ''.join(variant['ins'])
deletion = ''.join(variant['del'])
# defaults
is_dup = False # assume not dup
fsext_len = None # fs or ext length
is_sub = False
is_ext = False
if start == 1: # initial methionine is modified
aa_start = aa_end = AAPosition(base=start, aa=deletion)
ref = ''
alt = ''
self._is_ambiguous = True # side-effect
if insertion and insertion.find("*") == 0: # stop codon at variant position
aa_start = aa_end = AAPosition(base=start, aa=deletion[0])
ref = ''
alt = '*'
is_sub = True
elif start == len(self._ref_seq): # extension
if self._alt_seq[-1] == '*':
fsext_len = len(insertion) - len(deletion) # don't include the former stop codon
else:
fsext_len = '?'
subst_at_stop_codon = insertion[0]
aa_start = aa_end = AAPosition(base=start, aa='*')
ref = ''
alt = subst_at_stop_codon
is_ext = True
elif self._is_frameshift: # frameshift
aa_start = aa_end = AAPosition(base=start, aa=deletion[0])
ref = ''
try:
fsext_len = str(insertion.index("*") + 1) # start w/ 1st change; ends w/ * (inclusive)
except ValueError:
fsext_len = "?"
alt = insertion[0]
else: # no frameshift - sub/delins/dup
if insertion == deletion: # silent
aa_start = aa_end = AAPosition(base=start, aa=deletion)
ref = alt = ''
elif len(insertion) == len(deletion) == 1: # substitution
aa_start = aa_end = AAPosition(base=start, aa=deletion)
ref = ''
alt = insertion
is_sub = True
elif len(deletion) > 0: # delins OR deletion OR stop codon at variant position
ref = deletion
end = start + len(deletion) - 1
if len(insertion) > 0: # delins
aa_start = AAPosition(base=start, aa=deletion[0])
if end > start:
aa_end = AAPosition(base=end, aa=deletion[-1])
else:
aa_end = aa_start
alt = insertion
else: # deletion OR stop codon at variant position
if len(deletion) + start == len(self._ref_seq): # stop codon at variant position
aa_start = AAPosition(base=start, aa=deletion[0])
aa_end = AAPosition(base=start, aa=deletion[0])
ref = ''
alt = '*'
is_sub = True
else: # deletion
aa_start = AAPosition(base=start, aa=deletion[0])
if end > start:
aa_end = AAPosition(base=end, aa=deletion[-1])
else:
aa_end = aa_start
alt = None
elif len(deletion) == 0: # insertion OR duplication OR extension
is_dup, dup_start = self._check_if_ins_is_dup(start, insertion)
if is_dup: # duplication
dup_end = dup_start + len(insertion) - 1
aa_start = AAPosition(base=dup_start, aa=insertion[0])
aa_end = AAPosition(base=dup_end, aa=insertion[-1])
ref = alt = None
else: # insertion
start -= 1
end = start + 1
aa_start = AAPosition(base=start, aa=self._ref_seq[start - 1])
aa_end = AAPosition(base=end, aa=self._ref_seq[end - 1])
ref = None
alt = insertion
else: # should never get here
raise ValueError("unexpected variant: {}".format(variant))
var_p = self._create_variant(
aa_start,
aa_end,
ref,
alt,
fsext_len=fsext_len,
is_dup=is_dup,
acc=acc,
is_ambiguous=self._is_ambiguous,
is_sub=is_sub,
is_ext=is_ext)
return var_p
def _check_if_ins_is_dup(self, start, insertion):
"""Helper to identify an insertion as a duplicate
:param start: 1-based insertion start
:type start: int
:param insertion: sequence
:type insertion: str
:return (is duplicate, variant start)
:rtype (bool, int)
"""
is_dup = False # assume no
variant_start = None
dup_candidate_start = start - len(insertion) - 1
dup_candidate = self._ref_seq[dup_candidate_start:dup_candidate_start + len(insertion)]
if insertion == dup_candidate:
is_dup = True
variant_start = dup_candidate_start + 1
return is_dup, variant_start
def _create_variant(self,
start,
end,
ref,
alt,
fsext_len=None,
is_dup=False,
acc=None,
is_ambiguous=False,
is_sub=False,
is_ext=False,
is_no_protein=False):
"""Creates a SequenceVariant object"""
if is_ambiguous:
posedit = None
else:
interval = Interval(start=start, end=end)
# Note - order matters
if is_no_protein:
edit = '0'
elif is_sub:
edit = AASub(ref=ref, alt=alt)
elif is_ext:
edit = AAExt(ref=ref, alt=alt, aaterm='*', length=fsext_len)
elif self._is_frameshift:
edit = AAFs(ref=ref, alt=alt, length=fsext_len)
elif is_dup:
edit = Dup()
elif ref == alt == '':
edit = AARefAlt(ref='', alt='')
else:
edit = AARefAlt(ref=ref, alt=alt)
posedit = PosEdit(pos=interval, edit=edit, uncertain=hgvs.global_config.mapping.inferred_p_is_uncertain)
var_p = hgvs.sequencevariant.SequenceVariant(acc, 'p', posedit)
return var_p
# <LICENSE>
# Copyright 2018 HGVS Contributors (https://github.com/biocommons/hgvs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# </LICENSE>
| [
"reecehart@gmail.com"
] | reecehart@gmail.com |
8c8b678d13701ba585b3238bd029821548cc4783 | f7550c4964dc8f3c59dbcebe39e947bd6a264dba | /1.Recorsions - 1/String into Int.py | 49eb468cd5f79f87fb2aa7dff14c15aa4c47eb1d | [] | no_license | Jashwanth-k/Data-Structures-and-Algorithms | db5e2e30932e0a35db578c19ae6cff9f147b7c3d | 1ebf9986999a474cb094f3ab04616a46f2887043 | refs/heads/main | 2023-08-25T02:57:17.394322 | 2021-10-11T15:27:56 | 2021-10-11T15:27:56 | 402,448,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | def str_to_int(s):
l = len(s)
if l == 1:
return ord(s[0]) - ord('0')
a = str_to_int(s[1:])
b = ord(s[0]) - ord('0')
output = b*(10**(l-1)) + a
return output
s = ''
print(str_to_int(s)) | [
"noreply@github.com"
] | noreply@github.com |
80c66729e6cbcb7721e17efef2dc1381872cf87d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_minefields.py | 9a6f4d39f05827d2da9dbb885032211575fb3e49 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py |
#calss header
class _MINEFIELDS():
def __init__(self,):
self.name = "MINEFIELDS"
self.definitions = minefield
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['minefield']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
078e44a48834d3337ead8e1e1c6507f4e79f8e16 | b9538a3f41d3cf983a19ef432ed8d125cd5a9e5e | /di5cheng-IT-PaaS/venv/Lib/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/cli/chardetect.py | ae5b452050890a535b2dd7aec999cfea9b6a8bf2 | [] | no_license | xutusheng/di5cheng | 61997c473bed5eb78b646447bdbfb13bd930cb02 | 5d8dcefe8967ba4d238623f54f6775bc8c188e85 | refs/heads/master | 2023-03-13T20:57:47.190609 | 2020-04-10T07:56:55 | 2020-04-10T07:56:55 | 197,309,931 | 0 | 0 | null | 2021-03-25T23:37:50 | 2019-07-17T03:42:00 | Python | UTF-8 | Python | false | false | 2,775 | py | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from pip._vendor.chardet import __version__
from pip._vendor.chardet.compat import PY2
from pip._vendor.chardet.universaldetector import UniversalDetector
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in lines:
line = bytearray(line)
u.feed(line)
# shortcut out of the loop to save reading further - particularly useful if we read a BOM.
if u.done:
break
u.close()
result = u.result
if PY2:
name = name.decode(sys.getfilesystemencoding(), 'ignore')
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no resport'.format(name)
def main(argv=None):
"""
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
"""
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings")
parser.add_argument('input',
help='File whose encoding we would like to determine. \
(default: stdin)',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin if PY2 else sys.stdin.buffer])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':
main()
| [
"576265944@qq.com"
] | 576265944@qq.com |
20dd8bac432917f44ec65e02ad42a37c002d8dc7 | dd6c759081c1490c624de00f9519216613de5293 | /src/ui/__init__.py | 02186177946aec017837c2690ac545a6690800ea | [
"MIT"
] | permissive | forcemain/SwarmOps | 76151fd31dff5288f3bc66a24c03547c6d9bb142 | 07675b362c83ce74bae13cb1c9ee627dc4ee25ed | refs/heads/master | 2021-06-18T12:41:11.960706 | 2017-05-10T01:04:44 | 2017-05-10T01:04:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,826 | py | # -*- coding:utf-8 -*-
#
# SwarmOps views for ui
#
from flask import Blueprint, render_template, url_for, redirect, g, abort
from utils.public import logger, login_required
ui_blueprint = Blueprint("ui", __name__, template_folder="templates", static_folder='static')
''' swarm route'''
@ui_blueprint.route("/")
@ui_blueprint.route("/swarm/")
@login_required
def index():
return render_template("swarm/swarm.html")
@ui_blueprint.route("/swarm/add/")
@login_required
def swarm_add():
return render_template("swarm/add.html")
@ui_blueprint.route("/swarm/init/")
@login_required
def swarm_init():
return render_template("swarm/init.html")
'''service route'''
@ui_blueprint.route("/service/")
@login_required
def service():
return render_template("service/service.html")
@ui_blueprint.route("/service/delete/")
@login_required
def service_delete():
return render_template("service/delete.html")
@ui_blueprint.route("/service/update/")
@login_required
def service_update():
return render_template("service/update.html")
@ui_blueprint.route("/service/create/")
@login_required
def service_create():
return render_template("service/create.html")
@ui_blueprint.route("/service/detail/")
@login_required
def service_detail():
return render_template("service/detail.html")
@ui_blueprint.route("/service/nginx/")
@login_required
def service_nginx():
return render_template("service/nginx.html")
'''node route'''
@ui_blueprint.route("/node/")
@login_required
def node():
return render_template("node/node.html")
@ui_blueprint.route("/node/add/")
@login_required
def node_add():
return render_template("node/add.html")
@ui_blueprint.route("/node/update/")
@login_required
def node_update():
return render_template("node/update.html")
@ui_blueprint.route("/node/delete/")
@login_required
def node_delete():
return render_template("node/delete.html")
'''misc route'''
@ui_blueprint.route("/misc/")
@login_required
def misc():
return render_template("misc.html")
@ui_blueprint.route("/storage/")
@login_required
def storage():
return render_template("misc/storage.html")
'''network route'''
@ui_blueprint.route("/network/")
@login_required
def network():
return render_template("network/network.html")
'''registry route'''
@ui_blueprint.route("/registry/")
@login_required
def registry():
return render_template("registry/registry.html")
@ui_blueprint.route("/registry/<namespace>/<repository_name>/")
@login_required
def registryImageName(namespace, repository_name):
return render_template("registry/imageName.html", imageName="{}/{}".format(namespace, repository_name).replace("_/", ""))
@ui_blueprint.route("/registry/<imageId>/")
@login_required
def registryImageId(imageId):
return render_template("registry/imageId.html", imageId=imageId)
| [
"staugur@vip.qq.com"
] | staugur@vip.qq.com |
8ce177826d5aee9c149d47b05f96be769a59424f | ec733aa667eab2a925bb09b55f8a2d13f431ad94 | /youtbenew.py | c06942fc55c30f24aeed37cf9eebe68cbf740a81 | [
"MIT"
] | permissive | ChavdiTrending/Youtube | 1a51594db1431b6107713f6151e26c737dcda444 | 4dc5addc894effd351213a5fdc72110d1b65e4cd | refs/heads/master | 2021-01-24T01:58:47.693127 | 2018-04-14T15:20:46 | 2018-04-14T15:20:46 | 122,828,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | import requests
import json
cnt = 0
nextpage = ''
links = []
while cnt<2:
payload = {'part': 'snippet', 'key': 'AIzaSyA46dBqdBBtnfgxr8aRvSBE2Q7qiNyhWrk', 'maxResults': '50','chart':'mostPopular','regionCode':'IN','pageToken':nextpage}
l = requests.Session().get('https://www.googleapis.com/youtube/v3/videos', params=payload)
resp_dict = json.loads(l.content)
print(resp_dict)
nextpage = resp_dict['nextPageToken']
for i in resp_dict['items']:
links.append("https://youtu.be/"+i['id'])
links.append(i['snippet']['title'])
links.append(i['snippet']['thumbnails']['high']['url'])
cnt+=1
jsonDict = {}
jsonDict['links'] = links
with open('Videolinks.json', 'w') as outfile:
json.dump(jsonDict, outfile)
| [
"savannahar68@gmail.com"
] | savannahar68@gmail.com |
789e14b2928fb55c758fdba579f23eae73d39023 | 2ed188020fbf744185102b1f76084e4ca90dc08f | /elements/element.py | a20794700fdec2c9cc78a0e333d07449a66a0ba0 | [
"MIT"
] | permissive | Sage-Bionetworks/GENIE-FMI-converter | ddfe02bcb23027dbbf133b37de03c06b8607d866 | fed7d05a5b0b01a6f5cbcf0df14a63eecdbc5471 | refs/heads/master | 2022-10-01T08:15:50.445156 | 2022-08-30T21:17:08 | 2022-08-30T21:17:08 | 220,875,930 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,515 | py | import re
rr = "{http://integration.foundationmedicine.com/reporting}"
vr = "{http://foundationmedicine.com/compbio/variant-report-external}"
schema = "{http://www.w3.org/2001/XMLSchema}"
instance = "{http://www.w3.org/2001/XMLSchema-instance}"
matches = [(val, re.compile(r"^{}".format(val))) for val in
(rr, vr, schema, instance)]
class empty_node(object):
def __init__(self):
self.text = ""
self.attrib = {}
self.tag = ""
# strips the URL suffix
def suffix(matches, inp):
for m in matches:
if re.match(m[1], inp):
return (m[0], re.sub(m[1], "", inp))
return ("", inp)
class element:
def __init__(self, matches, node):
(self.prefix, self.name) = suffix(matches, node.tag)
self.attrib = node.attrib
self.text = ""
if node.text:
self.text = node.text.strip()
self.children = []
def attr(self, inp):
return self.attrib.get(inp).strip() if self.attrib.get(inp) else None
def find(self, element_name):
ret = []
self.find_path(element_name, ret)
return ret
def find_path(self, element_name, pre):
if self.name == element_name:
pre.append(self)
elif self.children:
for child in self.children:
child.find_path(element_name, pre)
# returns first elements matching full path
def get(self, item, *items):
if items and item == self.name:
return self.get(*items)
for entry in self.children:
if entry.name == item:
if items:
return entry.get(*items)
else:
return entry
return element([], empty_node())
def __str__(self):
return self.__to_str__(0)
def __to_str__(self, depth):
ret = "\n" + " " * depth + self.name
if self.text:
ret += ": '{}'".format(self.text)
if self.attrib:
ret += " {{{} }}".format(
", ".join("{}: '{}' ".format(key[1], val)
for key, val in self.attrib.items()))
if self.children:
for child in self.children:
ret += " " * (depth + 1) + child.__to_str__(depth + 1)
ret += "\n"
return ret
def decompose(node):
ele = element(matches, node)
ele.children = [decompose(child) for child in node]
return ele
| [
"jlb152@duke.edu"
] | jlb152@duke.edu |
7119d5cf9fe49a141201fecf3ee52c63d62af40c | 47925a3da43b0136f34bfebadd0c8eb3536cfba3 | /Problem_048.py | 8d4007ba9eb2a9a4d6d80230353911051d4c1471 | [] | no_license | arm-out/ProjectEuler | a1a8801eb45cdd9b3d595f11292c85e8f0e1adc6 | 2fdf5b4096bc2a479086794b656cd205359ceeae | refs/heads/master | 2021-07-10T13:24:20.668103 | 2020-07-03T00:00:48 | 2020-07-03T00:00:48 | 161,202,031 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | a = 0
for i in range(1, 1001):
a += i ** i
print(str(a)[-10:])
| [
"noreply@github.com"
] | noreply@github.com |
2aa7e05f460ae0b7d0f6ea6a66312db082a1ce07 | da052c0bbf811dc4c29a83d1b1bffffd41becaab | /core/serial_number_expired_date/models/stock_picking.py | 6b51aa2baf397a198e54c46a84b406b3800e23da | [] | no_license | Muhammad-SF/Test | ef76a45ad28ac8054a4844f5b3826040a222fb6e | 46e15330b5d642053da61754247f3fbf9d02717e | refs/heads/main | 2023-03-13T10:03:50.146152 | 2021-03-07T20:28:36 | 2021-03-07T20:28:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,983 | py | # -*- coding: utf-8 -*-
import logging
from odoo import models, fields, api , _
import datetime
# from dateutil.relativedelta import relativedelta
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT, float_compare, float_round
class PackOperation(models.Model):
_inherit = 'stock.pack.operation.lot'
expired_date = fields.Datetime(string='Expiry Date', store=True)
class Picking(models.Model):
_inherit = "stock.picking"
def _create_lots_for_picking(self):
Lot = self.env['stock.production.lot']
for pack_op_lot in self.mapped('pack_operation_ids').mapped('pack_lot_ids'):
if not pack_op_lot.lot_id:
lot = Lot.create({'name': pack_op_lot.lot_name, 'product_id': pack_op_lot.operation_id.product_id.id, 'use_date':pack_op_lot.expired_date,'expired_date':pack_op_lot.expired_date})
pack_op_lot.write({'lot_id': lot.id})
# TDE FIXME: this should not be done here
self.mapped('pack_operation_ids').mapped('pack_lot_ids').filtered(lambda op_lot: op_lot.qty == 0.0).unlink()
create_lots_for_picking = _create_lots_for_picking
class Quant(models.Model):
_inherit = "stock.quant"
expired_date = fields.Date(related='lot_id.use_date',string='Expiry Date', store=True)
class StockProductionLot(models.Model):
_inherit = 'stock.production.lot'
expired_date = fields.Datetime(string='Expiry Date', store=True)
# Assign dates according to products data
@api.model
def create(self, vals):
dates = self._get_dates(vals.get('product_id'))
product_id = vals.get('product_id')
exp_date = vals.get('expired_date')
if exp_date:
expired_date = datetime.datetime.strptime(exp_date, DEFAULT_SERVER_DATETIME_FORMAT)
else:
expired_date = datetime.datetime.now()
product = self.env['product.product'].browse(product_id)
if product:
for d in dates.keys():
if d in ['use_date']:
date = (expired_date - datetime.timedelta(days=product.removal_time)) + datetime.timedelta(days=product.use_time)
vals['use_date'] = fields.Datetime.to_string(date)
if d in ['life_date']:
date = (expired_date - datetime.timedelta(days=product.removal_time)) + datetime.timedelta(days=product.life_time)
vals['life_date'] = fields.Datetime.to_string(date)
if d in ['alert_date']:
date = (expired_date - datetime.timedelta(days=product.removal_time)) + datetime.timedelta(days=product.alert_time)
vals['alert_date'] = fields.Datetime.to_string(date)
if d in ['removal_date']:
date = expired_date
vals['removal_date'] = fields.Datetime.to_string(date)
return super(StockProductionLot, self).create(vals)
| [
"jbalu2801@gmail.com"
] | jbalu2801@gmail.com |
d697ab63f80e3a49fe00bec7ff8ce8df45f20e9d | f3646538e70fa52e55ebfcb85d9bf5e131cb158d | /Cryptopals Set 4/set4_challenge32.py | 0fce97d203d674209c52e8dd5e00bf967512f2f5 | [
"MIT"
] | permissive | rajKarra69420/CryptoPals | 76f1342e72235137b83504691d74c4594c7cd7ee | 6ade402ed652a902249a82cce53aedf2081a5db9 | refs/heads/master | 2023-02-10T23:36:12.708235 | 2021-01-06T10:31:27 | 2021-01-06T10:31:27 | 294,879,718 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | import set4_challenge31 as break_hmac
# we still use the server from challenge 31 but change the delay time
# when challenge 31 breaks, we can increase the number of samples we collect per byte
if __name__ == "__main__":
# we may not need to increase samples to 100 but it does guarantee that our attack works at lower sleep times
print(break_hmac.get_hmac("test_file", 100))
| [
"noreply@github.com"
] | noreply@github.com |
469e38e83b1d2afb5cf82b1f1a90849485818ff4 | fa08376603d6136ec81f958510a363192c8ced83 | /site-packages/amuse/community/huayno/interface.py | b5b2c5802b0f60328edef50762add7d292694ac4 | [] | no_license | BrianTCook/amuse_env | e8da14e0bfd917179c3973e54daab1f980ae434c | 2e7eff89e82a859020604b692fb94bdd67ed7798 | refs/heads/master | 2021-05-18T21:14:52.897911 | 2020-04-04T16:11:58 | 2020-04-04T16:11:58 | 251,420,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,952 | py | from amuse.community import *
from amuse.community.interface.gd import GravitationalDynamicsInterface,GravityFieldInterface
from amuse.community.interface.gd import GravitationalDynamics,GravityFieldCode
class HuaynoInterface(CodeInterface,
LiteratureReferencesMixIn,
GravitationalDynamicsInterface,
StoppingConditionInterface,
GravityFieldInterface):
"""
HUAYNO is a code to solve the astrophysical N-body problem. It uses
recursive Hamiltonian splitting to generate multiple-timestep integrators
which conserve momentum to machine precision. A number of different
integrators are available. The code has been developed within the
AMUSE environment. It can make use of GPUs - for this an OpenCL
version can be compiled.
.. [#] Pelupessy, Federico I.; J\"anes, J\"urgen; Portegies Zwart, Simon, New Astronomy, Volume 17, Issue 8, p. 711-719
.. [#] J\"anes, J\"urgen; Pelupessy, Federico I.; Portegies Zwart, Simon, A&A, Volume 570, October 2014 (for CC, OK methods)
"""
include_headers = ['worker_code.h']
__so_module__ = 'huayno_cython'
MODE_OPENCL='opencl'
MODE_OPENMP='openmp'
def name_of_worker(self,mode):
if mode==self.MODE_OPENCL:
return 'huayno_worker_cl'
if mode==self.MODE_OPENMP:
return 'huayno_worker_mp'
return 'huayno_worker'
def __init__(self, mode=None, **options):
CodeInterface.__init__(self, name_of_the_worker = self.name_of_worker(mode), **options)
LiteratureReferencesMixIn.__init__(self)
@legacy_function
def get_time():
function = LegacyFunctionSpecification()
function.addParameter('time', dtype='d', direction=function.OUT)
function.result_type = 'i'
return function
@legacy_function
def commit_particles():
function = LegacyFunctionSpecification()
function.result_type = 'i'
return function
@legacy_function
def get_kinetic_energy():
function = LegacyFunctionSpecification()
function.addParameter('kinetic_energy', dtype='d', direction=function.OUT)
function.result_type = 'i'
return function
@legacy_function
def get_potential_energy():
function = LegacyFunctionSpecification()
function.addParameter('potential_energy', dtype='d', direction=function.OUT)
function.result_type = 'i'
return function
@legacy_function
def initialize_code():
function = LegacyFunctionSpecification()
function.result_type = 'i'
return function
@legacy_function
def evolve_model():
function = LegacyFunctionSpecification()
function.addParameter('time_end', dtype='d', direction=function.IN)
function.result_type = 'i'
return function
@legacy_function
def get_timestep_parameter():
function = LegacyFunctionSpecification()
function.addParameter('time_param', dtype='d', direction=function.OUT)
function.result_type = 'i'
return function
@legacy_function
def set_timestep_parameter():
function = LegacyFunctionSpecification()
function.addParameter('time_param', dtype='d', direction=function.IN)
function.result_type = 'i'
return function
@legacy_function
def get_timestep():
function = LegacyFunctionSpecification()
function.addParameter('timestep', dtype='d', direction=function.OUT)
function.result_type = 'i'
return function
@legacy_function
def set_timestep():
function = LegacyFunctionSpecification()
function.addParameter('timestep', dtype='d', direction=function.IN)
function.result_type = 'i'
return function
@legacy_function
def get_verbosity_parameter():
function = LegacyFunctionSpecification()
function.addParameter('verbosity', dtype='i', direction=function.OUT)
function.result_type = 'i'
return function
@legacy_function
def set_verbosity_parameter():
function = LegacyFunctionSpecification()
function.addParameter('verbosity', dtype='i', direction=function.IN)
function.result_type = 'i'
return function
@legacy_function
def get_number_of_particles():
function = LegacyFunctionSpecification()
function.addParameter('number_of_particles', dtype='i', direction=function.OUT)
function.result_type = 'i'
return function
@legacy_function
def get_inttype_parameter():
function = LegacyFunctionSpecification()
function.addParameter('inttype', dtype='i', direction=function.OUT)
function.result_type = 'i'
return function
@legacy_function
def set_inttype_parameter():
function = LegacyFunctionSpecification()
function.addParameter('inttype', dtype='i', direction=function.IN)
function.result_type = 'i'
return function
@legacy_function
def get_eps2_parameter():
function = LegacyFunctionSpecification()
function.addParameter('eps2', dtype='d', direction=function.OUT)
function.result_type = 'i'
return function
@legacy_function
def set_eps2_parameter():
function = LegacyFunctionSpecification()
function.addParameter('eps2', dtype='d', direction=function.IN)
function.result_type = 'i'
return function
def set_eps2(self, e):
return self.set_eps2_parameter(e)
def get_eps2(self):
return self.get_eps2_parameter()
@legacy_function
def get_evolve_statistics():
function = LegacyFunctionSpecification()
function.addParameter('ttot', dtype='int64', direction=function.OUT)
function.addParameter('ktot', dtype='int64', direction=function.OUT)
function.addParameter('dtot', dtype='int64', direction=function.OUT)
function.addParameter('tstot', dtype='int64', direction=function.OUT)
function.addParameter('kstot', dtype='int64', direction=function.OUT)
function.addParameter('dstot', dtype='int64', direction=function.OUT)
function.result_type = 'i'
return function
class Huayno(GravitationalDynamics,GravityFieldCode):
__interface__ = HuaynoInterface
class inttypes(object):
# http://stackoverflow.com/questions/36932/whats-the-best-way-to-implement-an-enum-in-python
SHARED2=1
EXTRAPOLATE=5
PASS_KDK=2
PASS_DKD=7
HOLD_KDK=3
HOLD_DKD=8
PPASS_DKD=9
BRIDGE_KDK=4
BRIDGE_DKD=10
CC=11
CC_KEPLER=12
OK=13
KEPLER=14
SHARED4=15
SHARED6=18
SHARED8=19
SHARED10=20
SHAREDBS=21
CCC=22
CCC_KEPLER=23
CC_BS=24
CCC_BS=25
BS_CC_KEPLER=26
CC_BSA=27
CCC_BSA=28
SHARED2_COLLISIONS=29
SHARED4_COLLISIONS=30
SHARED6_COLLISIONS=31
SHARED8_COLLISIONS=32
SHARED10_COLLISIONS=33
@classmethod
def _list(cls):
return set([x for x in list(cls.__dict__.keys()) if not x.startswith('_')])
def __init__(self, convert_nbody = None, **options):
self.stopping_conditions = StoppingConditions(self)
legacy_interface = self.__interface__(**options)
# self.legacy_doc = legacy_interface.__doc__
GravitationalDynamics.__init__(
self,
legacy_interface,
convert_nbody,
**options
)
def define_parameters(self, handler):
self.stopping_conditions.define_parameters(handler)
handler.add_method_parameter(
"get_eps2",
"set_eps2",
"epsilon_squared",
"smoothing parameter for gravity calculations",
default_value = 0.0 | nbody_system.length * nbody_system.length
)
handler.add_method_parameter(
"get_timestep_parameter",
"set_timestep_parameter",
"timestep_parameter",
"timestep parameter for gravity calculations",
default_value = 0.03
)
handler.add_method_parameter(
"get_timestep",
"set_timestep",
"timestep",
"timestep for evolve calls",
default_value = 0.0 | nbody_system.time
)
handler.add_method_parameter(
"get_verbosity_parameter",
"set_verbosity_parameter",
"verbosity_parameter",
"verbosity parameter (0 mean silent)",
default_value = 0
)
handler.add_method_parameter(
"get_inttype_parameter",
"set_inttype_parameter",
"inttype_parameter",
"integrator method to use",
default_value = 8
)
handler.add_method_parameter(
"get_begin_time",
"set_begin_time",
"begin_time",
"model time to start the simulation at",
default_value = 0.0 | nbody_system.time
)
def define_methods(self, handler):
GravitationalDynamics.define_methods(self, handler)
handler.add_method(
"get_eps2",
(),
(nbody_system.length * nbody_system.length, handler.ERROR_CODE,)
)
handler.add_method(
"set_eps2",
(nbody_system.length * nbody_system.length, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_timestep_parameter",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_timestep_parameter",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_timestep",
(),
(nbody_system.time, handler.ERROR_CODE,)
)
handler.add_method(
"set_timestep",
(nbody_system.time, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_inttype_parameter",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_inttype_parameter",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
self.stopping_conditions.define_methods(handler)
def define_particle_sets(self, handler):
GravitationalDynamics.define_particle_sets(self, handler)
self.stopping_conditions.define_particle_set(handler)
def define_state(self, handler):
GravitationalDynamics.define_state(self, handler)
handler.add_method('RUN', 'get_kinetic_energy')
handler.add_method('RUN', 'get_potential_energy')
self.stopping_conditions.define_state(handler)
| [
"btcook@umich.edu"
] | btcook@umich.edu |
67aa6f8fa5bdeb12b1ed91673b7baa7322637490 | 0608a5858ad9b7804c68de57abe9be0e278a9897 | /venasaur.py | a4aada9d48f3f507c0ab498dd4a5d41a3909b22c | [] | no_license | ryanpoon/Pokemon-Battle-Game | b95f268a5af5e95f6626c8ce812394d221879299 | 46fae9d0a2b1129a380389e5784b7b64adae623f | refs/heads/master | 2020-06-10T20:21:18.623957 | 2017-01-18T22:25:12 | 2017-01-18T22:25:12 | 75,884,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,499 | py | import random
class Venasaur:
poketype = ['Grass', 'Poison']
description = "There is a large flower on Venusaur's back. The flower is said to take on vivid colors if it gets plenty of nutrition and sunlight. The flower's aroma soothes the emotions of people."
pokemon = 'Venasaur'
def __init__(self, name='Venasaur', startcp = None, level=1):
if startcp == None:
startcp = int((random.randint(20, 30)/10)*random.randint(10,level*25))
if name == 'Ivysaur':
name = 'Venasaur'
self.attack = 198 + random.randint(1, 15)
self.defense = 200 + random.randint(1, 15)
self.stamina = 160 + random.randint(1, 15)
self.cp = int((random.randint(12, 18)/10)*startcp)
self.name = name
self.hp = int(self.cp/9)
self.maxhp = int(self.cp/9)
#Generating moves
moves = random.randint(1,7)
if moves == 1:
self.moves = ('Razor Leaf', 'Power Whip')
elif moves == 2:
self.moves = ('Razor Leaf', 'Sludge Bomb')
elif moves == 3:
self.moves = ('Razor Leaf', 'Solar Beam')
elif moves == 4:
self.moves = ('Vine Whip', 'Power Whip')
elif moves == 5:
self.moves = ('Vine Whip', 'Sludge Bomb')
else:
self.moves = ('Vine Whip', 'Solar Beam')
#Generating size stats
self.height = float(random.randint(150, 250))/100
self.weight = float(random.randint(7000,13000))/100
| [
"ryanpoon2004@gmail.com"
] | ryanpoon2004@gmail.com |
37fab74a8dd7f9a2728b0b9ef84f75a1d3b6d256 | 2904b0b42ff2b93b2834fdc79a0e2af9ed733f59 | /source/tests/py_tests/take_operator_test.py | d8b5c6160ea70c84fb0d5c6fdc9a91fb6cf10fba | [
"BSD-3-Clause"
] | permissive | web-slave/U-00DC-Sprache | 02e9a04c31323310b93ecbed1c7d89b762162942 | 9576f93d8cb2abffded1071f9f0328c2385446a8 | refs/heads/master | 2022-10-09T06:16:26.759090 | 2020-05-30T09:09:16 | 2020-05-31T15:51:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,732 | py | from py_tests_common import *
def TakeForPart_Test0():
c_program_text= """
// Take from struct.
struct S
{
i32 x;
fn constructor() ( x= 0 ) {}
fn constructor( i32 in_x ) ( x= in_x ) {}
fn destructor() { x= -1; }
fn constructor( mut this, S &imut other )= delete;
op=( mut this, S &imut other )= delete;
}
struct T{ S s; }
fn Foo()
{
var T mut t{ .s(666) };
var S s= take(t.s);
halt if( t.s.x != 0 );
halt if( s.x != 666 );
}
"""
tests_lib.build_program( c_program_text )
tests_lib.run_function( "_Z3Foov" )
def TakeForPart_Test1():
c_program_text= """
// Take from array.
struct S
{
i32 x;
fn constructor() ( x= 0 ) {}
fn constructor( i32 in_x ) ( x= in_x ) {}
fn destructor() { x= -1; }
fn constructor( mut this, S &imut other )= delete;
op=( mut this, S &imut other )= delete;
}
fn Foo()
{
var [ S, 3 ] mut arr[ (55), (77), (99) ];
var S s= take(arr[1]);
halt if( arr[1].x != 0 );
halt if( s.x != 77 );
}
"""
tests_lib.build_program( c_program_text )
tests_lib.run_function( "_Z3Foov" )
def TakeForValueVariable_Test0():
c_program_text= """
// Take temp value.
struct S
{
i32 x;
fn constructor( i32 in_x ) ( x= in_x ) {}
fn destructor() { x= -1; }
fn constructor( mut this, S &imut other )= delete;
op=( mut this, S &imut other )= delete;
}
fn Foo()
{
var S s= take(S(56789));
halt if(s.x != 56789);
}
"""
tests_lib.build_program( c_program_text )
tests_lib.run_function( "_Z3Foov" )
def TakeForValueVariable_Test1():
c_program_text= """
// Take value, returned from function.
struct S
{
i32 x;
fn constructor( i32 in_x ) ( x= in_x ) {}
fn destructor() { x= -1; }
fn constructor( mut this, S &imut other )= delete;
op=( mut this, S &imut other )= delete;
}
fn GetS() : S
{
return S(321);
}
fn Foo()
{
var S s= take(GetS());
halt if(s.x != 321);
}
"""
tests_lib.build_program( c_program_text )
tests_lib.run_function( "_Z3Foov" )
def TakeForValueVariable_Test2():
c_program_text= """
// Take moved value.
struct S
{
i32 x;
fn constructor( i32 in_x ) ( x= in_x ) {}
fn destructor() { x= -1; }
fn constructor( mut this, S &imut other )= delete;
op=( mut this, S &imut other )= delete;
}
fn Foo()
{
var S mut s0(5555);
var S s1= take(move(s0));
halt if(s1.x != 5555);
}
"""
tests_lib.build_program( c_program_text )
tests_lib.run_function( "_Z3Foov" )
def TakeForConstReference_Test0():
c_program_text= """
// Take from struct.
struct S
{
i32 x;
fn constructor() ( x= 0 ) {}
fn constructor( i32 in_x ) ( x= in_x ) {}
fn destructor() { x= -1; }
fn constructor( mut this, S &imut other )= delete;
op=( mut this, S &imut other )= delete;
}
struct T{ S s; }
fn Foo()
{
var T t{ .s(666) };
take(t.s);
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "ExpectedReferenceValue" )
assert( errors_list[0].file_pos.line == 16 )
def TakeForConstReference_Test1():
c_program_text= """
// Take from array.
struct S
{
i32 x;
fn constructor() ( x= 0 ) {}
fn constructor( i32 in_x ) ( x= in_x ) {}
fn destructor() { x= -1; }
fn constructor( mut this, S &imut other )= delete;
op=( mut this, S &imut other )= delete;
}
fn Foo()
{
var [ S, 3 ] arr[ (1), (2), (3) ];
take(arr[1]);
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "ExpectedReferenceValue" )
assert( errors_list[0].file_pos.line == 15 )
def TakenVariableHaveReferences_Test0():
c_program_text= """
struct S
{
i32 x;
fn constructor( i32 in_x ) ( x= in_x ) {}
fn constructor( mut this, S &imut other )= delete;
op=( mut this, S &imut other )= delete;
}
struct T{ S s; }
fn Foo()
{
var T mut t{ .s(666) };
auto& ref= t; // Reference to variable.
take(t.s);
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "MovedVariableHaveReferences" )
assert( errors_list[0].file_pos.line == 14 )
def TakenVariableHaveReferences_Test1():
c_program_text= """
struct S
{
i32 x;
fn constructor( i32 in_x ) ( x= in_x ) {}
fn constructor( mut this, S &imut other )= delete;
op=( mut this, S &imut other )= delete;
}
struct T{ S s; }
fn Foo()
{
var T mut t{ .s(666) };
auto& ref= t.s; // Reference to member.
take(t.s);
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "MovedVariableHaveReferences" )
assert( errors_list[0].file_pos.line == 14 )
def TakenVariableHaveReferences_Test2():
c_program_text= """
struct S
{
i32 x;
fn constructor( i32 in_x ) ( x= in_x ) {}
fn constructor( mut this, S &imut other )= delete;
op=( mut this, S &imut other )= delete;
}
struct T{ S s; }
fn Foo()
{
var T mut t{ .s(666) };
auto &mut ref= t.s; // Mutable reference to member.
take(t.s);
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "MovedVariableHaveReferences" )
assert( errors_list[0].file_pos.line == 14 )
def TakenVariableHaveReferences_Test3():
c_program_text= """
struct S
{
i32 x;
fn constructor()( x= 0 ) {}
fn constructor( i32 in_x ) ( x= in_x ) {}
}
struct T{ S s; }
fn Bar(S &imut a, S b){}
fn Foo()
{
var T mut t{ .s(666) };
Bar(t.s, take(t.s)); // Reference exists in argument.
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 1 )
assert( errors_list[1].error_code == "MovedVariableHaveReferences" )
assert( errors_list[1].file_pos.line == 13 )
def InnereReferenceTransferedInTakeOperator_Test0():
c_program_text= """
struct S
{
i32& r;
auto constexpr default_value= 0;
fn constructor()( r= default_value ) {}
fn constructor( this'tag0', i32 &'tag1 in_r ) ' tag0 <- tag1 ' ( r= in_r ) {}
}
fn Foo()
{
var i32 mut x= 0;
var S mut s0;
{
var S mut s1(x);
s0= take(s1);
}
++x; // 's0' contains reference to 'x'
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "ReferenceProtectionError" )
assert( errors_list[0].file_pos.line == 18 )
| [
"AntiPanzerschrek@yandex.ru"
] | AntiPanzerschrek@yandex.ru |
5333d7458b459a6a072ba62c4edc50cabd724bff | 9af64bf1d6a9fb05f314ae3949e10c1699251491 | /question1_etl.py | 2b2c4e2f4eda78c05d58481876a11a54fb00704f | [] | no_license | Spoetnik1/werkspot_de_assignment | 1e5b9c6a0aa623df30bda3bc6c81c243366133a5 | cb30c0357594689db2071a31ed989fb335fcf46e | refs/heads/main | 2022-12-30T10:45:59.899531 | 2020-10-22T20:23:44 | 2020-10-22T20:23:44 | 306,448,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,189 | py | import mysql.connector
from sqlalchemy import create_engine
import pandas as pd
# sqlalchemy and pymsql for running Pandas integration.
db_connection_str = 'mysql+pymysql://root:root@127.0.0.1/professional_activity'
db_connection = create_engine(db_connection_str)
### Extract data and apply basic transformations
df = pd.read_csv('event_log.csv', sep=';')
df = df.rename(columns={"professional_id_anonymized": "prof_id_anonymized", "created_at": "time_stamp"})
df['time_stamp'] = pd.to_datetime(df.time_stamp, format='%Y-%m-%d %H:%M:%S')
### Perform required transformations
df_proposals = df[df.event_type == 'proposed']
df_proposals.meta_data = df_proposals.meta_data.str.replace('-', ' ')
df_proposals[['service_id',
'name_nl',
'name_en',
'lead_fee']] = df_proposals.meta_data.str.split('_',expand=True)
df_services = df_proposals[['service_id',
'name_nl',
'name_en']].drop_duplicates()
df_proposals = df_proposals[['event_id',
'prof_id_anonymized',
'service_id',
'lead_fee',
'time_stamp']]
df_account_activity = df[df.event_type.isin(['created_account',
'became_able_to_propose',
'became_unable_to_propose'])]
df_account_activity = df_account_activity.drop('meta_data', axis=1)
### Load resutls to MySQL
df_account_activity.to_sql('account_status_events',
con=db_connection,
if_exists='append',
chunksize=1000,
index=False)
df_services.to_sql('services_info',
con=db_connection,
if_exists='append',
chunksize=1000,
index=False)
df_proposals.to_sql('proposal_events',
con=db_connection,
if_exists='append',
chunksize=1000,
index=False) | [
"noreply@github.com"
] | noreply@github.com |
7c96592c9fbdbb9b5aaf8888bb01b0f067723362 | 0480be6e95e56afefed53299352d952f6539d57a | /spe-py-nwsim/event.py | fa110c6330235df4dc3e012ba640a8d9ed34e2e1 | [] | no_license | AntonioLonga/Spe_second_assignment | e077c6db15276355e7f0788ba3d29893b964ae8b | ca879a6e561f987a4461eeae982ef4f64e650639 | refs/heads/master | 2021-09-14T15:56:10.624773 | 2018-05-15T19:47:26 | 2018-05-15T19:47:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,735 | py | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2016 Michele Segata <segata@ccs-labs.org>
from events import Events
class Event:
"""
Defines the basic structure of an event
"""
def __init__(self, event_time, event_type, destination, source, obj=None):
"""
Creates an event.
:param event_time: time at which the event should be scheduled
:param event_type: type of event
:param destination: destination module that should be notified
:param source: module generating the event
:param obj: optional object to be attached to the event
"""
self.event_time = event_time
self.event_type = event_type
self.destination = destination
self.source = source
self.obj = obj
def get_time(self):
"""
Returns event time
"""
return self.event_time
def get_type(self):
"""
Returns event type
"""
return self.event_type
def get_destination(self):
"""
Returns event destination
"""
return self.destination
def get_source(self):
"""
Returns event generator
"""
return self.source
def get_obj(self):
"""
Returns the object attached to the event
"""
return self.obj
def dump_event(self):
"""
Prints the event in a human readable format
"""
print("Event time: %f" % self.event_time)
t = ""
if self.event_type == Events.PACKET_ARRIVAL:
t = "ARRIVAL"
elif self.event_type == Events.START_TX:
t = "START_TX"
elif self.event_type == Events.START_RX:
t = "START_RX"
elif self.event_type == Events.END_TX:
t = "END_TX"
elif self.event_type == Events.END_RX:
t = "END_RX"
elif self.event_type == Events.END_PROC:
t = "END_PROC"
print("Event type: %s" % t)
print("Source node: %d" % self.source.get_id())
print("Destination node: %d\n" % self.destination.get_id())
| [
"longaantonio@gmail.com"
] | longaantonio@gmail.com |
0d8023f60e892e1cd8258b5296c92061892d15dd | 0ba1ca59a95f3bfbb395aac00d0fb875b47f74d6 | /main_app/migrations/0016_auto_20210208_1756.py | d8ce537aac997788e9d6a0405206544b1a44c8a2 | [] | no_license | mcholbrook/seedling | 8df5102ea259bd14659c802f5f999816ffa17c32 | 1ed160bbdf0dcd8da35feedb43e1572ee5d0c8be | refs/heads/main | 2023-03-01T21:47:53.033588 | 2021-02-12T02:19:25 | 2021-02-12T02:19:25 | 335,401,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | # Generated by Django 3.1.6 on 2021-02-08 17:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main_app', '0015_auto_20210208_1702'),
]
operations = [
migrations.AlterField(
model_name='message',
name='recipient',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='recipient', to=settings.AUTH_USER_MODEL),
),
]
| [
"mirandacholbrook@gmail.com"
] | mirandacholbrook@gmail.com |
61e4c35990e787f9e9bb9d2878b778a677f464ba | fb66aa1174f7c2e204af1974b6dd45b7106317f5 | /python_client/protocol/rainbow.py | b975e3f44deb95aa0aa52afeaa07a07970785ceb | [] | no_license | aboychen/xxd | 5b20f21d907bf64c4ac000ece010ca199956b101 | dcfd8fa6d5f7c49ee04dec350233c99a6f49e0d3 | refs/heads/master | 2020-04-08T19:39:43.039713 | 2016-11-11T05:11:27 | 2016-11-11T05:11:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,774 | py | #!/usr/bin/evn python
# -*- coding: utf8 -*-
import struct
import interface
class InfoUp(object):
_module = 23
_action = 1
def __init__(self):
pass
def encode(self):
buff = bytearray()
buff.extend(struct.pack('<B', self._module))
buff.extend(struct.pack('<B', self._action))
return buff
def decode(self, raw_msg):
pass
@staticmethod
def size():
return 2
class InfoDown(object):
_module = 23
_action = 1
def __init__(self):
pass
self.segment_num = 0
self.level_order = 0
self.status = 0
self.reset_num = 0
self.max_segment_can_jump = 0
self.max_pass_segment = 0
self.auto_fight_num = 0
self.buy_times = 0
def encode(self):
buff = bytearray()
buff.extend(struct.pack('<B', self._module))
buff.extend(struct.pack('<B', self._action))
buff.extend(struct.pack("<h", self.segment_num))
buff.extend(struct.pack("<b", self.level_order))
buff.extend(struct.pack("<b", self.status))
buff.extend(struct.pack("<b", self.reset_num))
buff.extend(struct.pack("<h", self.max_segment_can_jump))
buff.extend(struct.pack("<h", self.max_pass_segment))
buff.extend(struct.pack("<b", self.auto_fight_num))
buff.extend(struct.pack("<h", self.buy_times))
return buff
def decode(self, raw_msg):
idx = 0
self.segment_num = struct.unpack_from("<h", raw_msg, idx)[0]
idx += 2
self.level_order = struct.unpack_from("<b", raw_msg, idx)[0]
idx += 1
self.status = struct.unpack_from("<b", raw_msg, idx)[0]
idx += 1
self.reset_num = struct.unpack_from("<b", raw_msg, idx)[0]
idx += 1
self.max_segment_can_jump = struct.unpack_from("<h", raw_msg, idx)[0]
idx += 2
self.max_pass_segment = struct.unpack_from("<h", raw_msg, idx)[0]
idx += 2
self.auto_fight_num = struct.unpack_from("<b", raw_msg, idx)[0]
idx += 1
self.buy_times = struct.unpack_from("<h", raw_msg, idx)[0]
idx += 2
@staticmethod
def size():
return 12
class ResetUp(object):
_module = 23
_action = 2
def __init__(self):
pass
def encode(self):
buff = bytearray()
buff.extend(struct.pack('<B', self._module))
buff.extend(struct.pack('<B', self._action))
return buff
def decode(self, raw_msg):
pass
@staticmethod
def size():
return 2
class ResetDown(object):
_module = 23
_action = 2
def __init__(self):
pass
def encode(self):
buff = bytearray()
buff.extend(struct.pack('<B', self._module))
buff.extend(struct.pack('<B', self._action))
return buff
def decode(self, raw_msg):
pass
@staticmethod
def size():
return 0
class AwardInfoUp(object):
_module = 23
_action = 3
def __init__(self):
pass
def encode(self):
buff = bytearray()
buff.extend(struct.pack('<B', self._module))
buff.extend(struct.pack('<B', self._action))
return buff
def decode(self, raw_msg):
pass
@staticmethod
def size():
return 2
class AwardInfoDown(object):
_module = 23
_action = 3
def __init__(self):
pass
self.award = []
def encode(self):
buff = bytearray()
buff.extend(struct.pack('<B', self._module))
buff.extend(struct.pack('<B', self._action))
buff.extend(struct.pack('<B', len(self.award)))
for item in self.award:
buff.extend(item.encode())
return buff
def decode(self, raw_msg):
idx = 0
_award_size = struct.unpack_from("<B", raw_msg, idx)[0]
idx += 1
for i in range(_award_size):
obj = AwardInfoDownAward()
obj.decode(raw_msg[idx:])
idx += obj.size()
self.award.append(obj)
def size(self):
size = 1
for item in self.award:
size += item.size()
return size
class AwardInfoDownAward(object):
def __init__(self):
pass
self.order = 0
def encode(self):
buff = bytearray()
buff.extend(struct.pack('<B', self._module))
buff.extend(struct.pack('<B', self._action))
buff.extend(struct.pack("<b", self.order))
return buff
def decode(self, raw_msg):
idx = 0
self.order = struct.unpack_from("<b", raw_msg, idx)[0]
idx += 1
@staticmethod
def size():
return 1
class TakeAwardUp(object):
_module = 23
_action = 4
def __init__(self):
pass
self.pos1 = 0
self.pos2 = 0
def encode(self):
buff = bytearray()
buff.extend(struct.pack('<B', self._module))
buff.extend(struct.pack('<B', self._action))
buff.extend(struct.pack("<b", self.pos1))
buff.extend(struct.pack("<b", self.pos2))
return buff
def decode(self, raw_msg):
idx = 0
self.pos1 = struct.unpack_from("<b", raw_msg, idx)[0]
idx += 1
self.pos2 = struct.unpack_from("<b", raw_msg, idx)[0]
idx += 1
@staticmethod
def size():
return 4
class TakeAwardDown(object):
_module = 23
_action = 4
def __init__(self):
pass
self.next_level = False
def encode(self):
buff = bytearray()
buff.extend(struct.pack('<B', self._module))
buff.extend(struct.pack('<B', self._action))
buff.extend(struct.pack("<?", self.next_level))
return buff
def decode(self, raw_msg):
idx = 0
self.next_level = struct.unpack_from("<?", raw_msg, idx)[0]
idx += 1
@staticmethod
def size():
return 1
class JumpToSegmentUp(object):
_module = 23
_action = 5
def __init__(self):
pass
self.segment = 0
def encode(self):
buff = bytearray()
buff.extend(struct.pack('<B', self._module))
buff.extend(struct.pack('<B', self._action))
buff.extend(struct.pack("<h", self.segment))
return buff
def decode(self, raw_msg):
idx = 0
self.segment = struct.unpack_from("<h", raw_msg, idx)[0]
idx += 2
@staticmethod
def size():
return 4
class JumpToSegmentDown(object):
_module = 23
_action = 5
def __init__(self):
pass
def encode(self):
buff = bytearray()
buff.extend(struct.pack('<B', self._module))
buff.extend(struct.pack('<B', self._action))
return buff
def decode(self, raw_msg):
pass
@staticmethod
def size():
return 0
class AutoFightUp(object):
_module = 23
_action = 6
def __init__(self):
pass
self.segment = 0
def encode(self):
buff = bytearray()
buff.extend(struct.pack('<B', self._module))
buff.extend(struct.pack('<B', self._action))
buff.extend(struct.pack("<h", self.segment))
return buff
def decode(self, raw_msg):
idx = 0
self.segment = struct.unpack_from("<h", raw_msg, idx)[0]
idx += 2
@staticmethod
def size():
return 4
class AutoFightDown(object):
_module = 23
_action = 6
def __init__(self):
pass
self.awardCoin = 0
self.awardExp = 0
self.awardBoxPos1 = 0
self.awardBoxPos2 = 0
def encode(self):
buff = bytearray()
buff.extend(struct.pack('<B', self._module))
buff.extend(struct.pack('<B', self._action))
buff.extend(struct.pack("<l", self.awardCoin))
buff.extend(struct.pack("<l", self.awardExp))
buff.extend(struct.pack("<b", self.awardBoxPos1))
buff.extend(struct.pack("<b", self.awardBoxPos2))
return buff
def decode(self, raw_msg):
idx = 0
self.awardCoin = struct.unpack_from("<l", raw_msg, idx)[0]
idx += 4
self.awardExp = struct.unpack_from("<l", raw_msg, idx)[0]
idx += 4
self.awardBoxPos1 = struct.unpack_from("<b", raw_msg, idx)[0]
idx += 1
self.awardBoxPos2 = struct.unpack_from("<b", raw_msg, idx)[0]
idx += 1
@staticmethod
def size():
return 10
class RainbowModule(interface.BaseModule):
decoder_map = {
1: InfoDown,
2: ResetDown,
3: AwardInfoDown,
4: TakeAwardDown,
5: JumpToSegmentDown,
6: AutoFightDown,
}
receive_callback = {}
def decode(self, message):
action = ord(message[0])
decoder_maker = self.decoder_map[action]
msg = decoder_maker()
msg.decode(message[1:])
return msg
def add_callback(self, action, callback):
if self.receive_callback.has_key(action):
self.receive_callback[action].append(callback)
else:
self.receive_callback[action] = [callback,]
def clear_callback(self):
self.receive_callback = {}
def add_info(self, callback):
self.add_callback(1, callback)
def add_reset(self, callback):
self.add_callback(2, callback)
def add_award_info(self, callback):
self.add_callback(3, callback)
def add_take_award(self, callback):
self.add_callback(4, callback)
def add_jump_to_segment(self, callback):
self.add_callback(5, callback)
def add_auto_fight(self, callback):
self.add_callback(6, callback)
| [
"tangweichen@163.com"
] | tangweichen@163.com |
5ed07c539c06aabbc980fc17582e082144b08f35 | c59efda0f3aa08b2558d3d8390b807571a12cdb2 | /python/sgdp/make_sgdp_contig_h5.py | 44fbb46c0a580db9169563c28c6abc353f47edde | [] | no_license | gmcvicker/MHC | a6c534a98ca9eb635aa727b1c8020d8ed3632438 | eed39f639cb2d59c10eb17419a1ec9373c12e1c0 | refs/heads/master | 2016-09-15T18:58:41.937586 | 2015-11-17T00:19:23 | 2015-11-17T00:19:23 | 40,500,807 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,323 | py | import sys
import tables
import pysam
SAMPLES_FILE = "/home/gm114/data/SGDP/C-team-mag/261.samples"
CONTIG_H5 = "/home/gm114/data/SGDP/mag_mhc_contigs.h5"
DEPTH_H5 = "/home/gm114/data/SGDP/mag_mhc_depth.h5"
MHC_START = 29000000
MHC_END = 34000000
N_SAMPLES = 261
def create_contig_h5_file():
# initialize HDF5 output file
h5f = tables.openFile(CONTIG_H5, "w")
atom = tables.UInt8Atom(dflt=ord('.'))
n_sites = MHC_END - MHC_START + 1
shape = (n_sites, N_SAMPLES)
carray = h5f.createCArray(h5f.root, "contigs", atom, shape,
filters=tables.Filters(complevel=1, complib="zlib"))
return carray
def create_depth_h5_file():
h5f = tables.openFile(DEPTH_H5, "w")
atom = tables.Int8Atom(dflt=0)
n_sites = MHC_END - MHC_START + 1
shape = (n_sites, N_SAMPLES)
carray = h5f.createCArray(h5f.root, "depth", atom, shape,
filters=tables.Filters(complevel=1, complib="zlib"))
def main():
f = open(SAMPLES_FILE, "r")
contig_h5 = create_contig_h5_file()
depth_h5 = create_depth_h5_file()
depth_carray = depth_h5.getNode("/depth")
mhc_len = MHC_END - MHC_START + 1
indv_index = 0
for l in f:
words = l.rstrip().split()
cteam_id = words[1]
bam_filename = "/home/gm114/data/SGDP/mag_mhc_mapped/%s.sort.bam" % cteam_id
bamfile = pysam.Samfile(bam_filename, "rb")
sys.stderr.write("%s\n" % cteam_id)
depth_array = np.zeros(mhc_len)
for read in bamfile.fetch("chr6", MHC_START, MHC_END):
for blk in read.get_blocks():
start = blk[0] - MHC_START
end = blk[1] - MHC_START
if start < 0:
start = 0
if end < 0:
end = 0
if start > mhc_len:
start = mhc_len
if end > mhc_len:
end = mhc_len
depth_array[start:end] =
print "%d-%d" % (blk[0], blk[1])
indv_index = 0
bamfile.close()
contig_h5.close()
depth_h5.close()
f.close()
main()
| [
"gmcvicker@gmail.com"
] | gmcvicker@gmail.com |
5a161ea6d1e896ed253bfc435f525d755fd47e55 | 15595ff491fbddf2518126c3aed1f8b360cbf923 | /public/utils.py | 471aad43606eb45e45032d12a1592a1ccd9deb0d | [] | no_license | TiannaLopes/skateSharpen | 54e171b9347723a4f07eae0ace2f95c85c22dfde | 32bd0b317ea3072364a6f75dcd56408bd17217b0 | refs/heads/master | 2023-06-06T05:22:47.486937 | 2021-06-13T21:32:57 | 2021-06-13T21:32:57 | 376,644,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | def jsend_response(success:bool, payload:dict):
return {
'status': 'success' if success else 'failure',
'data': payload
}
| [
"63357829+TiannaLopes@users.noreply.github.com"
] | 63357829+TiannaLopes@users.noreply.github.com |
1b4daeedaade35bbdf704edc9591e83126e98f90 | 3805c40a5f037cb6439798f4ffc6babc5cddc004 | /dogpile/__init__.py | 345ce1cbe636b5f78423505e2638f10c3e99b39a | [
"BSD-2-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | thestick613/dogpile.cache | ba1df3a8e71b7f22cc4b3bc52f32a7d90555b125 | a0939e132dc0964a315137787903a561f5fa5f06 | refs/heads/master | 2022-07-15T13:34:37.923493 | 2022-06-23T08:02:02 | 2022-06-23T08:02:02 | 205,935,922 | 0 | 0 | NOASSERTION | 2022-06-23T08:02:03 | 2019-09-02T21:00:39 | Python | UTF-8 | Python | false | false | 106 | py | __version__ = "0.7.2"
from .lock import Lock # noqa
from .lock import NeedRegenerationException # noqa
| [
"mike_mp@zzzcomputing.com"
] | mike_mp@zzzcomputing.com |
be5c2e81ae57b3cc706e183b70ee73451806702c | f74d1cb7119536594b1c167bec70fdcce9a4016f | /scrapyModel/scrapyModel/spiders/BookSpider.py | 19f110cc649710e742b4c57acc92e5c453e58514 | [] | no_license | Magicxiangye/python_crawler_demo1 | 22f4fe555f879ade8637509cbc73ded3397fe542 | 06aa2afba6797413c4eefa8a25616e615b7f754b | refs/heads/master | 2023-06-05T00:12:42.062902 | 2021-06-29T16:03:13 | 2021-06-29T16:03:13 | 314,501,775 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,386 | py | # 使用的是Scrapy框架来进行book网页的书籍爬取
import scrapy
import os
import sys
# sys.path.append(r"E:\PycharmProject\python_crawler_demo1\scrapyModel\scrapyModel")
current_directory = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath(os.path.dirname(current_directory) + os.path.sep + ".")
sys.path.append(root_path)
from scrapyModel.items import BookItem
# 继承scrapy.Spider
class BookSpider(scrapy.Spider):
# 每一个爬虫的唯一标志
# 一个项目有多个爬虫,name为这个爬虫在这个项目中的唯一的标识
# 用于在shell中启动自己写的爬虫,这个就是爬虫名
# 使用的方法
# scrapy crawl books -o books.csv
name = "books"
# 定义爬虫的起点,起点可以为多个,这里只有一个起点
start_urls = ['http://books.toscrape.com/']
# 分解
# 下载完起始的页面后,回调一个页面的解析的函数,默认为parse()
# 这个解析函数通常用于
# 1.提取页面中想要的信息(使用的是xPath或CSS选择器)
# 提取页面中的链接,同时对链接实现下载的请求
# 页面解析函数通常被实现成一个生成器函数,
# 每一项从页面中提 取的数据以及每一个对链接页面的下载请求
# 都由yield语句提交给 Scrapy引擎。
def parse(self, response):
# 提取数据
# 每一本书的信息在<article class="product_pod">中,我们使用
# css()方法找到所有这样的 article 元素,并依次迭代
for book in response.css('article.product_pod'):
name = book.xpath('./h3/a/@title').extract_first()
price = book.css('p.price_color::text').extract_first()
yield {
'name': name,
'price': price,
}
# 提取链接
# 下一页的 url 在 ul.pager > li.next > a 里面
# 例如:<li class="next"><a href="catelogue/page-2.html">next</a><li>
next_url = response.css('ul.pager li.next a::attr(href)').extract_first()
if next_url:
# 如果找到下一页的 URL,得到绝对路径,构造新的 Request 对象
next_url = response.urljoin(next_url)
# request Header参数:http的请求头
yield scrapy.Request(next_url, callback=self.parse)
| [
"30616925+Magicxiangye@users.noreply.github.com"
] | 30616925+Magicxiangye@users.noreply.github.com |
614999da01d0970b0db97ba37e2e14bbb3af0abf | b6901ce5a4cd3fdcc2e4bdb29d54c34caf294fea | /eval.py | 9136298ce347da4fee3a48d2b41860b357575546 | [
"MIT"
] | permissive | ComputationalRadiationPhysics/InFlow | d00d22174b998602021ad4385bbf1b83820859ec | d10d219b26273029dab3d5ee7c422d839fbbdb2c | refs/heads/main | 2023-06-14T06:02:56.953578 | 2021-07-01T13:33:07 | 2021-07-01T13:33:07 | 319,951,364 | 11 | 1 | null | null | null | null | UTF-8 | Python | false | false | 21,184 | py |
# coding: utf-8
######################################################################################
#
##################Out of distribution detection using InFlow##########################
# This notebook will demonstrate the out-of-distribution (OoD) detection using InFlow
#
######################################################################################
# import packges
from functools import partial
import matplotlib.pyplot as plt
import numpy as np
get_ipython().run_line_magic('matplotlib', 'inline')
import torch
import torch.nn as nn
import torch.utils.data
import numpy as np
from tqdm import tqdm
import sys, os
from INN.modules import PermuteRandom, Concat
from INN.framework import InputNode, OutputNode, ConditionNode, Node, ReversibleGraphNet, GraphINN
from alibi_detect.cd import MMDDrift
from alibi_detect.cd.pytorch import preprocess_drift
from torch import distributions
import torchvision
import torchvision.transforms as transforms
import data
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
from typing import Callable, Union, Tuple, Iterable, List
from torch import Tensor
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
import tensorflow as tf
from sklearn import metrics
from ood_metrics import fpr_at_95_tpr
import math
#############################################################################################################################
# set random seed and hyperparameters
seed = 0
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
print(device)
sub_n = 10
p_value = 0.05
batch_size = 250
num_workers = 8
n_dim = 3 # number of dimensions of the RGB image
c_in = 3 # number of input channels
c_out = 3 # number of output channels
encoding_dim = 32
gpu_ids = [0]
#############################################################################################################################
# import CIFAR 10 dataset
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.cifar10.load_data()
X_train = X_train.astype('float32') / 255
X_test = X_test.astype('float32') / 255
y_train = y_train.astype('int64').reshape(-1,)
y_test = y_test.astype('int64').reshape(-1,)
#############################################################################################################################
# choose the subset of CIFAR 10 training images as in-distribution samples for training
n_data = X_train.shape[0]
idx = np.random.choice(n_data, size=n_data // sub_n, replace=False)
idx_h0 = np.delete(np.arange(n_data), idx, axis=0)
X_ref = X_train[idx]
print(X_ref.shape)
#permute the CIFAR 10 channels to fit as a pytorch tensor
def permute_c(x):
return np.transpose(x.astype(np.float32), (0, 3, 1, 2))
X_ref_pt = permute_c(X_ref)
#############################################################################################################################
# define encoder architecture
encoder_net = nn.Sequential(
nn.Conv2d(3, 64, 4, stride=2, padding=0),
nn.ReLU(),
nn.Conv2d(64, 128, 4, stride=2, padding=0),
nn.ReLU(),
nn.Conv2d(128, 512, 4, stride=2, padding=0),
nn.ReLU(),
nn.Flatten(),
nn.Linear(2048, encoding_dim)
).to(device).eval()
# define preprocessing function
preprocess_fn = partial(preprocess_drift, model=encoder_net, device=device, batch_size=512)
# initialise the attention mechanism
cd = MMDDrift(X_ref_pt, backend='pytorch', p_val=.05,
preprocess_fn=preprocess_fn, n_permutations=100)
#############################################################################################################################
# import CIFAR training dataset
train_cifar10_dataset = torchvision.datasets.ImageFolder(root='/home/......../CIFAR 10/test/',
transform=transforms.Compose([transforms.Resize((32,32)),
transforms.CenterCrop(32),
transforms.ToTensor(),]))
train_cifar10_loader = torch.utils.data.DataLoader(train_cifar10_dataset,batch_size=batch_size,shuffle=False,
num_workers=num_workers,pin_memory=True)
#############################################################################################################################
# import CIFAR testing dataset
test_cifar10_dataset = torchvision.datasets.ImageFolder(root='/home/......../CIFAR 10/test/',
transform=transforms.Compose([transforms.Resize((32,32)),
transforms.CenterCrop(32),
transforms.ToTensor(),]))
test_cifar10_loader = torch.utils.data.DataLoader(test_cifar10_dataset,batch_size=batch_size,shuffle=False,
num_workers=num_workers,pin_memory=True)
#############################################################################################################################
# import CelebA testing dataset
test_celeba_dataset = torchvision.datasets.ImageFolder(root='/home/......../CelebA/test/',
transform=transforms.Compose([transforms.Resize((32,32)),
transforms.CenterCrop(32),
transforms.ToTensor(),]))
test_celeba_loader = torch.utils.data.DataLoader(test_celeba_dataset,batch_size=batch_size,shuffle=False,
num_workers=num_workers,pin_memory=True)
#############################################################################################################################
# import SVHN testing dataset
test_svhn_dataset = torchvision.datasets.ImageFolder(root='/home/......../SVHN/test/',
transform=transforms.Compose([transforms.Resize((32,32)),
transforms.CenterCrop(32),
transforms.ToTensor(),]))
test_svhn_loader = torch.utils.data.DataLoader(test_svhn_dataset,batch_size=batch_size,shuffle=False,
num_workers=num_workers,pin_memory=True)
#############################################################################################################################
# import MNIST testing dataset
test_mnist_dataset = torchvision.datasets.ImageFolder(root='/home/......../MNIST/test/',
transform=transforms.Compose([transforms.Resize((32,32)),
transforms.CenterCrop(32),
transforms.ToTensor(),]))
test_mnist_loader = torch.utils.data.DataLoader(test_mnist_dataset,batch_size=batch_size,shuffle=False,
num_workers=num_workers,pin_memory=True)
#############################################################################################################################
# import FashionMNIST testing dataset
test_fashionmnist_dataset = torchvision.datasets.ImageFolder(root='/home/......../FashionMNIST/test/',
transform=transforms.Compose([transforms.Resize((32,32)),
transforms.CenterCrop(32),
transforms.ToTensor(),]))
test_fashionmnist_loader = torch.utils.data.DataLoader(test_fashionmnist_dataset,batch_size=batch_size,shuffle=False,
num_workers=num_workers,pin_memory=True)
#############################################################################################################################
use_cuda = torch.cuda.is_available()
dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
#############################################################################################################################
#define the sub networks 's' and 't' architecture
def subnet_conv(c_in, c_out):
return nn.Sequential(nn.Conv2d(c_in, 256, 3, padding=1), nn.ReLU(),
nn.Conv2d(256, c_out, 3, padding=1))
#############################################################################################################################
#construct the Invertible module for building the flow architecture
class InvertibleModule(nn.Module):
def __init__(self, dims_in: Iterable[Tuple[int]],
dims_c: Iterable[Tuple[int]] = None):
super().__init__()
if dims_c is None:
dims_c = []
self.dims_in = list(dims_in)
self.dims_c = list(dims_c)
def forward(self, x_or_z: Iterable[Tensor], c: Iterable[Tensor] = None,
rev: bool = False, jac: bool = True) \
-> Tuple[Tuple[Tensor], Tensor]:
raise NotImplementedError(
f"{self.__class__.__name__} does not provide forward(...) method")
def jacobian(self, *args, **kwargs):
raise DeprecationWarning("module.jacobian(...) is deprecated. "
"module.forward(..., jac=True) returns a "
"tuple (out, jacobian) now.")
def output_dims(self, input_dims: List[Tuple[int]]) -> List[Tuple[int]]:
raise NotImplementedError(
f"{self.__class__.__name__} does not provide output_dims(...)")
#############################################################################################################################
#construct the base coupling block
class _BaseCouplingBlock(InvertibleModule):
def __init__(self, dims_in, dims_c=[],
clamp: float = 2.,
clamp_activation: Union[str, Callable] = "ATAN"):
super().__init__(dims_in, dims_c)
self.channels = dims_in[0][0] # if input is 3 channels then it would be 3
# ndims means the rank of tensor strictly speaking.
# i.e. 1D, 2D, 3D tensor, etc.
self.ndims = len(dims_in[0])
self.split_len1 = self.channels // 2 # if input 3 channels then len1 = 1
self.split_len2 = self.channels - self.channels // 2 # if input 3 channels then len2 = 2
self.clamp = clamp
assert all([tuple(dims_c[i][1:]) == tuple(dims_in[0][1:]) for i in range(len(dims_c))]), "Dimensions of input and one or more conditions don't agree."
self.conditional = (len(dims_c) > 0)
self.condition_length = sum([dims_c[i][0] for i in range(len(dims_c))])
if isinstance(clamp_activation, str):
if clamp_activation == "ATAN":
self.f_clamp = (lambda u: 0.636 * torch.atan(u))
elif clamp_activation == "TANH":
self.f_clamp = torch.tanh
elif clamp_activation == "SIGMOID":
self.f_clamp = (lambda u: 2. * (torch.sigmoid(u) - 0.5))
else:
raise ValueError(f'Unknown clamp activation "{clamp_activation}"')
else:
self.f_clamp = clamp_activation
def forward(self, x, c=[], rev=False, jac=True):
'''See base class docstring'''
# notation:
# x1, x2: two halves of the input
# y1, y2: two halves of the output
# *_c: variable with condition concatenated
# j1, j2: Jacobians of the two coupling operations
x1, x2= torch.split(x[0], [self.split_len1, self.split_len2], dim=1)
s = c[0][0,0,0,0]
if not rev:
x2_c = torch.cat([x2, *c], 1) if self.conditional else x2
y1, j1 = self._coupling1(s,x1, x2_c)
y1_c = torch.cat([y1, *c], 1) if self.conditional else y1
y2, j2 = self._coupling2(s,x2, y1_c)
else:
# names of x and y are swapped for the reverse computation
x1_c = torch.cat([x1, *c], 1) if self.conditional else x1
y2, j2 = self._coupling2(s,x2, x1_c, rev=True)
y2_c = torch.cat([y2, *c], 1) if self.conditional else y2
y1, j1 = self._coupling1(s,x1, y2_c, rev=True)
return (torch.cat((y1, y2), 1),), j1 + j2
def _coupling1(self,s, x1, u2, rev=False):
raise NotImplementedError()
def _coupling2(self, s,x2, u1, rev=False):
raise NotImplementedError()
def output_dims(self, input_dims):
'''See base class for docstring'''
if len(input_dims) != 1:
raise ValueError("Can only use 1 input")
return input_dims
#############################################################################################################################
#construct the InFlow coupling block
class InFlowCouplingBlock(_BaseCouplingBlock):
def __init__(self, dims_in, dims_c=[],
subnet_constructor: Callable = None,
clamp: float = 2.,
clamp_activation: Union[str, Callable] = "ATAN"):
super().__init__(dims_in, dims_c, clamp, clamp_activation)
self.subnet_s1 = subnet_constructor(self.split_len1 + self.condition_length, self.split_len2)
self.subnet_t1 = subnet_constructor(self.split_len1 + self.condition_length, self.split_len2)
self.subnet_s2 = subnet_constructor(self.split_len2 + self.condition_length, self.split_len1)
self.subnet_t2 = subnet_constructor(self.split_len2 + self.condition_length, self.split_len1)
def _coupling1(self, s, x1, u2, rev=False):
s2, t2 = self.subnet_s2(u2), self.subnet_t2(u2)
s2 = self.clamp * self.f_clamp(s2)
j1 = torch.sum(s2, dim=tuple(range(1, self.ndims + 1)))
if rev:
y1 = (x1 - t2) * torch.exp(-s2)
return y1, -j1
else:
y1 = torch.exp(s * s2) * x1 + s * t2
return y1, j1
def _coupling2(self, s, x2, u1, rev=False):
s1, t1 = self.subnet_s1(u1), self.subnet_t1(u1)
s1 = self.clamp * self.f_clamp(s1)
j2 = torch.sum(s1, dim=tuple(range(1, self.ndims + 1)))
if rev:
y2 = (x2 - t1) * torch.exp(-s1)
return y2, -j2
else:
y2 = torch.exp(s * s1) * x2 + s * t1
return y2, j2
#############################################################################################################################
#Stack all the coupling blocks including the permute blocks and the conditional nodes
in1 = InputNode(3,32,32, name='input1')
cond = ConditionNode(3,32,32, name='Condition')
layer1 = Node(in1,InFlowCouplingBlock,{'subnet_constructor':subnet_conv, 'clamp':2.0},conditions=cond,name=F'coupling_{0}')
layer2 = Node(layer1, PermuteRandom,{'seed':0}, name=F'permute_{0}')
layer3 = Node(layer2,InFlowCouplingBlock,{'subnet_constructor':subnet_conv, 'clamp':2.0},conditions=cond,name=F'coupling_{1}')
layer4 = Node(layer3,PermuteRandom,{'seed':1},name=F'permute_{1}')
out1 = OutputNode(layer4, name='output1')
model = GraphINN([in1, cond, layer1, layer2, layer3, layer4, out1]).cuda()
#############################################################################################################################
#Load thetrained InFlow Model
state_dicts = torch.load('/home/......./ckptdir/199.pt')
model.load_state_dict(state_dicts['net'])
#############################################################################################################################
# Define the inference scheme for unknown test samples
dist = math.sqrt(2) * tf.math.erfinv(1- p_value).numpy()
D = 3 * 32 * 32
prior = distributions.MultivariateNormal(torch.zeros(D).cuda(),
dist * torch.eye(D).cuda())
def get_loss_vals_trained(loader, net, cd, batch_size):
loss_vals = []
cx = []
with torch.no_grad():
with tqdm(total=len(loader.dataset)) as progress_bar:
for x, _ in loader:
x_numpy = x.detach().cpu().numpy()
pval = cd.predict(x_numpy, return_p_val=True)['data']['p_val']
#print(pval)
if pval < 0.05:
x_c = torch.zeros(batch_size,3,32,32).cuda()
cx.append(np.zeros(batch_size))
x = x.cuda()
# Forward step:
z, log_jac_det = model(x,x_c)
z = z.reshape((z.shape[0], -1))
prior_ll = prior.log_prob(z)
losses = prior_ll
loss_vals.extend([loss.item() for loss in losses])
progress_bar.update(x.size(0))
else:
x_c = torch.ones(batch_size,3,32,32).cuda()
cx.append(np.ones(batch_size))
x = x.cuda()
# Forward step:
z, log_jac_det = model(x,x_c)
z = z.reshape((z.shape[0], -1))
prior_ll = prior.log_prob(z)
losses = prior_ll - log_jac_det
loss_vals.extend([loss.item() for loss in losses])
progress_bar.update(x.size(0))
return np.array(loss_vals), cx
#############################################################################################################################
#define the inference scheme for the in-distribution CIFAR 10 samples
def get_loss_vals_ones(loader, net, cd, batch_size):
loss_vals = []
cx = []
with torch.no_grad():
with tqdm(total=len(loader.dataset)) as progress_bar:
for x, _ in loader:
x_numpy = x.detach().cpu().numpy()
pval = cd.predict(x_numpy, return_p_val=True)['data']['p_val']
x_c = torch.ones(batch_size,3,32,32).cuda()
cx.append(np.ones(batch_size))
x = x.cuda()
# Forward step:
z, log_jac_det = model(x,x_c)
z = z.reshape((z.shape[0], -1))
prior_ll = prior.log_prob(z)
losses = prior_ll - log_jac_det
loss_vals.extend([loss.item() for loss in losses])
progress_bar.update(x.size(0))
return np.array(loss_vals), cx
#############################################################################################################################
#Start inference and calculate the log-likelihood scores
train_cifar10_loss_vals_attention, cx = get_loss_vals_ones(train_cifar10_loader, model, cd, batch_size)
test_cifar10_loss_vals_attention, cx = get_loss_vals_trained(test_cifar10_loader, model, cd, batch_size)
test_celeba_loss_vals_attention, cx = get_loss_vals_trained(test_celeba_loader, model, cd, batch_size)
test_svhn_loss_vals_attention, cx = get_loss_vals_trained(test_svhn_loader, model, cd, batch_size)
test_mnist_loss_vals_attention, cx = get_loss_vals_trained(test_mnist_loader, model, cd, batch_size)
test_fashionmnist_loss_vals_attention, cx = get_loss_vals_trained(test_fashionmnist_loader, model, cd, batch_size)
#############################################################################################################################
#definition for plotting the ROC Curve
def plot_roc_curve(fpr, tpr):
plt.plot(fpr, tpr, color='orange', label='ROC')
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
plt.show()
#############################################################################################################################
#Calculate the AUCROC, AUCPR and FPR95 scores for CelebA test dataset (Note: Follow similar steps for the other datasets
combined = np.concatenate((train_cifar10_loss_vals_attention, test_celeba_loss_vals_attention))
label_1 = np.ones(len(train_cifar10_loss_vals_attention))
label_2 = np.zeros(len(test_celeba_loss_vals_attention))
label = np.concatenate((label_1, label_2))
fpr, tpr, thresholds = metrics.roc_curve(label, combined, pos_label=0)
precision, recall, thresholds_ = metrics.precision_recall_curve(label, combined, pos_label=0)
plot_roc_curve(fpr, tpr)
rocauc = metrics.auc(fpr, tpr)
aucpr = metrics.auc(recall, precision)
print('AUCROC for CelebA OOD: ', rocauc)
print('AUCPR for CelebA OOD: ', aucpr)
print('FPR95 for CelebA OOD: ', 1- fpr_at_95_tpr(combined, label))
#############################################################################################################################
| [
"nishant.kumar@mailbox.tu-dresden.de"
] | nishant.kumar@mailbox.tu-dresden.de |
d1d9d03d08324df4aefc9c1fbbf15156ae007e15 | 6e437e2dff4ce620e181513d5327a2b271a08b0e | /sampleScripts/constantsLocalizer.py | ff6174cd343a1216b05dc4c72c14b9fa457bcfd9 | [] | no_license | samyag1/ExperimentFramework | 9b710c57aaa4116ac8fb2fe9716c8aa497312a9a | f2a7e74896d118d0457113766e698458906dbff2 | refs/heads/master | 2021-01-10T21:31:13.788196 | 2011-11-16T04:06:43 | 2011-11-16T04:06:43 | 2,784,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | # Localizer constants
OBJECT_LOCALIZER_RUN_NOS = [1,2]
RETINOTOPY_LOCALIZER_RUN_NOS = [3,4,5,6]
LOCALIZER_DUMMIES = 5
LOCALIZER_OBJECTS_PATH = 'objects'
LOCALIZER_OBJECTS_ORIGINALS_PATH = "originals"
LOCALIZER_OBJECTS_CREATEFILES_PATH = 'createFiles'
LOCALIZER_RETINOTOPY_PATH = 'retinotopy'
LOCALIZER_RET_CCW_PATH = 'counterClockwise'
LOCALIZER_RET_CW_PATH = 'clockwise'
LOCALIZER_RET_CONCENTRIC_PATH = 'concentric'
LOCALIZER_RET_EXPANDING_PATH = 'expanding'
LOCALIZER_OBJECT_BREAK_LENGTH = 160 # taken from the Gallant lab original stimOrder files
LOCALZER_OBJECT_FOLDERS = ['Body_parts', 'Faces', 'Objects', 'Scenes', 'Scrambled_objects']
# Stim creation constants
BLANK_FRAME_FILENAME = 'blankImage.jpg'
GROUPINGS = 4
IMAGES_PER_FOLDER = 80
IMAGES_PER_GROUPING = IMAGES_PER_FOLDER / GROUPINGS
GROUPING_ORDERINGS = [(1,2,3,4,5),(5,1,3,2,4),(4,1,5,2,3),(3,1,4,2,5)]
TYPE1INDEX1_FILENAME = 'Type1Index1List.csv'
| [
"samyag1@berkeley.edu"
] | samyag1@berkeley.edu |
e2ad3046de681b93c653af54d57e2a3159bffe5a | 3260f21b7d1508d50d6f790905355e48f9f97ac5 | /zmeiduo/apps/verifications/views.py | cb29726a3b7ffd95d3ccb3842e47d81a31bc10d9 | [] | no_license | qqqcai/md_project | 75ee5e65cf8c04d2104b40a54ced7cb38fcf89e7 | f375465fd07d5b3047602e99b8016be5c80711f9 | refs/heads/master | 2022-01-16T19:53:54.818362 | 2019-06-15T08:35:27 | 2019-06-15T08:35:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,460 | py | from random import randint
from django.views import View
from django.http import HttpResponse, JsonResponse
from django_redis import get_redis_connection
from . import constants
from verifications.libs.captcha.captcha import captcha
from zmeiduo.utils.response_code import RETCODE
# from celery_tasks.sms.tasks import send_sms_code
class ImageCodeView(View):
"""图形验证码"""
def get(self, request, uuid):
"""
:param request: 请求对象
:param uuid: 唯一标识图形验证码属于用户
:return: image/jpg
"""
# 生成图片验证码 name唯一标识 text是校验码的文字,image是2进制图片
name, text, image = captcha.generate_captcha()
# 保存图片验证码, "verify_code"在setting中设定
redis_conn = get_redis_connection("verify_code")
# 第2个参数是有效时间
redis_conn.setex('img_%s' % uuid, constants.IMAGE_CODE_REDIS_EXPIRES, text)
# 响应图片验证码
return HttpResponse(image, content_type='image/jpg')
class SMSCodeView(View):
"""发送短信验证码"""
def get(self, request, mobile):
redis_conn = get_redis_connection("verify_code")
send_flag = redis_conn.get(f"send_flag_{mobile}")
if send_flag:
sms_code_delay = redis_conn.ttl(f"send_flag_{mobile}")
print("短信再获取剩余时间:", sms_code_delay)
print(type(sms_code_delay))
# 返回多少秒,让体验更好点
return JsonResponse({'code':RETCODE.THROTTLINGERR, 'errmsg':"获取短信验证码过于频繁,请稍后再试", 'sms_code_delay':sms_code_delay})
# 获取参数
image_code_client = request.GET.get("image_code")
uuid = request.GET.get("uuid")
# 判断参数是否齐全
if not all([image_code_client, uuid]):
return JsonResponse({"code": RETCODE.NECESSARYPARAMERR, "errmsg": "缺少必要的参数"})
# 读取数据库里的image_code 和客户填写的来对比,有问题就返回
image_code_server = redis_conn.get("img_%s" % uuid)
if image_code_server is None:
return JsonResponse({"code": RETCODE.IMAGECODEERR, "errmsg": "图形验证码失效"})
# 删除图形验证码, 防止恶意验证
redis_conn.delete("img_%s" % uuid)
if image_code_server.decode().lower() != image_code_client.lower():
return JsonResponse({"code": RETCODE.IMAGECODEERR, "errmsg": "图形验证码错误"})
# 生成6位随机码
sms_code = "%06d" % randint(0, 999999)
# 用队列 保存短信验证码
pl = redis_conn.pipeline()
pl.setex(f"sms_{mobile}", constants.SMS_CODE_REDIS_EXPIRES, sms_code)
pl.setex(f"send_flag_{mobile}", constants.SEND_SMS_CODE_INTERVAL, 1)
pl.execute()
print(f"生成了短信随机码 {sms_code} ")
# 向mobile号码发送短信验证码
# sendTemplateSMS(手机号, [短信验证码, 短信中提示的过期时间-分钟], 容联云的模版)
# 原本没通过Celery的用这个
# result = sendTemplateSMS(mobile, [sms_code, constants.SMS_CODE_REDIS_EXPIRES // 60], constants.SEND_SMS_TEMPLATE_ID)
# 用Celery的用这个
# send_sms_code.delay(mobile, sms_code) # 生产任务
return JsonResponse({"code": RETCODE.OK, "errmsg": "发送短信成功"})
| [
"qqqcai@126.com"
] | qqqcai@126.com |
67a0bd9928efe52cd33a51befd468b75aed79b57 | 0a5d95b6d01465ddb68e94d426c2feee4c28bc64 | /shi_tomasi_corner_detection.py | dbabc00a28c407624cd21c50c80eb123ace77e56 | [] | no_license | vandita-chapadia/OPENCV_FUNDAMENTALS | f91bb49b477717f1da867d537030822566b61142 | bcfd34f75e72f0a3ae5b2d6c4558b5de86f13ced | refs/heads/main | 2023-05-08T00:50:15.281299 | 2021-06-10T09:54:42 | 2021-06-10T09:54:42 | 375,647,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | import cv2
import numpy as np
img = cv2.imread('shapes.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray , 30 , 0.01 , 10)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(img , (x,y) , 3 , (0,225,255) , -1)
cv2.imshow("image",img)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"vanditachapadia296@gmail.com"
] | vanditachapadia296@gmail.com |
df42fb81ab121a9776879d10e34a82753afc05d5 | 8cf5d738aa1bf604c1215bff0e57aef0218a5194 | /0x1F-pascal_triangle/0-pascal_triangle.py | 570ddb16f491d2e0ae1e2b7f26f319cb0f7f6d38 | [] | no_license | PilarPinto/holbertonschool-interview | 3493bdb41fbc437e4dcf58db99cebcc350c2029f | b58bbce825426e9a15fee67dec65768f0ae0d724 | refs/heads/master | 2023-07-13T09:28:56.071905 | 2021-08-27T03:29:44 | 2021-08-27T03:29:44 | 281,306,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | #!/usr/bin/python3
'''
Module where the integers representing the Pascal’s triangle
'''
def pascal_triangle(n):
'''Pascal priniting functions'''
if n <= 0:
return []
pas_r = [[1]]
if n > 1:
pas_r.append([1, 1])
for ind in range(3, n + 1):
pas_r.append([1] + list(map(
lambda i: pas_r[ind - 2][i] + pas_r[ind - 2][i + 1], range(
len(pas_r[ind - 2]) - 1))) + [1])
return pas_r
| [
"piapintoch@unal.edu.co"
] | piapintoch@unal.edu.co |
12e23d45d86604712c62c27d9d5d24bbd21d6e2f | c325866c577343752f0d4394c3d96e599674df0e | /models/nosis_configuracion.py | b133bf8a84cf2a4d4f2ff5dd7f1a714f0cc0ee4e | [] | no_license | levislibra/financiera_nosis | ff11f4f8417917d48220d40c1524f91d5f1a4d24 | 3227e9258e2f8519880081232070734e929af3f8 | refs/heads/master | 2023-01-05T20:23:01.509995 | 2022-12-22T18:33:05 | 2022-12-22T18:33:05 | 236,527,122 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,340 | py | # -*- coding: utf-8 -*-
from openerp import models, fields, api
from datetime import datetime, timedelta
from dateutil import relativedelta
from openerp.exceptions import UserError, ValidationError
import time
import requests
ENDPOINT_NOSIS = 'https://ws01.nosis.com/rest/variables'
class FinancieraNosisConfiguracion(models.Model):
_name = 'financiera.nosis.configuracion'
name = fields.Char('Nombre')
usuario = fields.Char('Usuario')
token = fields.Char('Token')
id_informe = fields.Integer('Id proximo informe', default=1)
id_cuestionario = fields.Integer('Id proximo cuestionario', default=1)
ejecutar_cda_al_solicitar_informe = fields.Boolean('Ejecutar CDAs al solicitar informe')
solicitar_informe_enviar_a_revision = fields.Boolean('Solicitar informe al enviar a revision')
vr = fields.Integer('Grupo de variables')
nro_grupo_vid = fields.Integer('Grupo VID')
nro_grupo_vid2 = fields.Integer('Grupo VID 2do intento')
nosis_variable_1 = fields.Char('Variable 1')
nosis_variable_2 = fields.Char('Variable 2')
nosis_variable_3 = fields.Char('Variable 3')
nosis_variable_4 = fields.Char('Variable 4')
nosis_variable_5 = fields.Char('Variable 5')
asignar_nombre_cliente = fields.Boolean('Asignar Nombre al cliente')
asignar_nombre_cliente_variable = fields.Char('Variable para el Nombre', default='VI_RazonSocial')
asignar_direccion_cliente = fields.Boolean('Asignar Direccion al cliente')
asignar_calle_cliente_variable = fields.Char('Variable para la calle', default='VI_DomAF_Calle')
asignar_nro_cliente_variable = fields.Char('Variable para el Nro', default='VI_DomAF_Nro')
asignar_piso_cliente_variable = fields.Char('Variable para el Piso', default='VI_DomAF_Piso')
asignar_departamento_cliente_variable = fields.Char('Variable para el Departamento', default='VI_DomAF_Dto')
asignar_ciudad_cliente = fields.Boolean('Asignar Ciudad a direccion')
asignar_ciudad_cliente_variable = fields.Char('Variable para la ciudad', default='VI_DomAF_Loc')
asignar_cp_cliente = fields.Boolean('Asignar CP a direccion')
asignar_cp_cliente_variable = fields.Char('Variable para el CP', default='VI_DomAF_CP')
asignar_provincia_cliente = fields.Boolean('Asignar Provincia a direccion')
asignar_provincia_cliente_variable = fields.Char('Variable para la Provincia', default='VI_DomAF_Prov')
asignar_identificacion_cliente = fields.Boolean('Asignar identificacion al cliente')
asignar_identificacion_cliente_variable = fields.Char('Variable para la identificacion', default='VI_Identificacion')
asignar_genero_cliente = fields.Boolean('Asignar genero al cliente')
asignar_genero_cliente_variable = fields.Char('Variable para genero', default='VI_Sexo')
company_id = fields.Many2one('res.company', 'Empresa', required=False, default=lambda self: self.env['res.company']._company_default_get('financiera.nosis.configuracion'))
@api.one
def test_conexion(self):
params = {
'usuario': self.usuario,
'token': self.token,
}
response = requests.get(ENDPOINT_NOSIS, params)
if response.status_code == 400:
raise UserError("La cuenta esta conectada.")
else:
raise UserError("Error de conexion.")
class ExtendsResCompany(models.Model):
_name = 'res.company'
_inherit = 'res.company'
nosis_configuracion_id = fields.Many2one('financiera.nosis.configuracion', 'Configuracion Nosis')
| [
"levislibra@hotmail.com"
] | levislibra@hotmail.com |
3f0368c913e52dff7e4bfd737ca4028f9e25ee32 | f4b7fce1c5277ab81e66a72746553554b9ae9a7b | /MothersVengeance/classes/enemy.py | 5d35fd6beefdce236444b45d58628aaf22690a9d | [] | no_license | deco93/MomsVengeance | 9bf468e57ad48e57357f5c37b574a63153abd0e5 | c1ce281bf5e124667bd6d5e343f91c729f5fb00c | refs/heads/master | 2022-12-12T08:06:00.830909 | 2020-09-13T13:48:24 | 2020-09-13T13:48:24 | 290,578,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,548 | py | import pygame
class Enemy(object):
def __init__(self, x, y, width, height, end):
self.x = x
self.y = y
self.width = width
self.height = height
self.end = end
self.path = [self.x, self.end]
self.walkCount = 0
self.vel = 3
self.walkRight = [pygame.image.load('./imgs/R1E.png'), pygame.image.load('./imgs/R2E.png'), pygame.image.load('./imgs/R3E.png'), pygame.image.load('./imgs/R4E.png'), pygame.image.load('./imgs/R5E.png'), pygame.image.load('./imgs/R6E.png'), pygame.image.load('./imgs/R7E.png'), pygame.image.load('./imgs/R8E.png'), pygame.image.load('./imgs/R9E.png'), pygame.image.load('./imgs/R10E.png'), pygame.image.load('./imgs/R11E.png')]
self.walkLeft = [pygame.image.load('./imgs/L1E.png'), pygame.image.load('./imgs/L2E.png'), pygame.image.load('./imgs/L3E.png'), pygame.image.load('./imgs/L4E.png'), pygame.image.load('./imgs/L5E.png'), pygame.image.load('./imgs/L6E.png'), pygame.image.load('./imgs/L7E.png'), pygame.image.load('./imgs/L8E.png'), pygame.image.load('./imgs/L9E.png'), pygame.image.load('./imgs/L10E.png'), pygame.image.load('./imgs/L11E.png')]
self.hitbox = (self.x + 12, self.y, self.width - 20, self.height)
self.health = 10
self.visible = True
self.healthbar_width = 50
self.healthbar_height = 10
def draw(self, win):
self.move()
if self.visible:
if self.walkCount +1 >= 33:
self.walkCount = 0
if self.vel > 0:
win.blit(self.walkRight[self.walkCount//3], (self.x, self.y))
self.walkCount += 1
else:
win.blit(self.walkLeft[self.walkCount//3], (self.x, self.y))
self.walkCount += 1
pygame.draw.rect(win, (255, 0, 0), (self.hitbox[0], self.hitbox[1] - 20, self.healthbar_width, self.healthbar_height), 0)
pygame.draw.rect(win, (0, 255, 0), (self.hitbox[0], self.hitbox[1] - 20, self.health * (self.healthbar_width/10), self.healthbar_height), 0)
self.hitbox = (self.x + 12, self.y, self.width - 20, self.height)
# pygame.draw.rect(win, (255,0,0), (self.x + 12, self.y, self.width - 20, self.height), 2)
def move(self):
if self.vel > 0:
if self.x + self.vel < self.path[1]:
self.x += self.vel
else:
self.vel = self.vel * -1
self.walkCount = 0
else:
if self.x + self.vel > self.path[0]:
self.x += self.vel
else:
self.vel = self.vel * -1
self.walkCount = 0
def hit(self, score):
if self.health > 0:
self.health -= 1
score += 1
pygame.mixer.Channel(1).play(pygame.mixer.Sound('./sounds/hit.wav'))
if self.health == 0:
self.visible = False
print('GOBLIN HIT...')
return score
| [
"saranshwali@yahoo.in"
] | saranshwali@yahoo.in |
f10521bec9c35ed9de1f626cde80d9f4c3eccfd2 | 3b5c46ce2daa75e1e157838d0f6cfd92469471a0 | /plastering/inferencers/scrabble/ground_truth_gen.py | 06262e23e98f035aa786f957245812f56a341b1c | [
"MIT"
] | permissive | plastering/plastering | 1b4e9c04fce4b26b22fe5ade05af9baf644b4eaa | 26ffeecb38844ebb122fde5d9bd2276a7b4150a0 | refs/heads/master | 2023-04-04T07:50:59.087529 | 2021-05-17T23:31:40 | 2021-05-17T23:31:40 | 149,086,461 | 37 | 17 | MIT | 2023-03-24T23:19:24 | 2018-09-17T07:32:17 | Python | UTF-8 | Python | false | false | 3,069 | py | import pdb
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(choices=['ap_m','ebu3b', 'bml'], dest='building')
args = parser.parse_args()
import pandas as pd
from brick_parser import equipTagsetList as equip_tagsets, \
locationTagsetList as location_tagsets,\
pointSubclassDict as point_subclass_dict,\
equipSubclassDict as equip_subclass_dict,\
locationSubclassDict as location_subclass_dict
subclass_dict = dict()
subclass_dict.update(point_subclass_dict)
subclass_dict.update(equip_subclass_dict)
subclass_dict.update(location_subclass_dict)
subclass_dict['networkadapter'] = list()
subclass_dict['none'] = list()
subclass_dict['unknown'] = list()
building = args.building
sensor_df = pd.read_csv('metadata/{0}_sensor_types_location.csv'\
.format(building)).set_index('Unique Identifier')
with open('metadata/{0}_label_dict_justseparate.json'\
.format(building), 'r') as fp:
label_dict = json.load(fp)
with open('metadata/{0}_sentence_dict_justseparate.json'\
.format(building), 'r') as fp:
sentence_dict = json.load(fp)
nonpoint_tagsets = equip_tagsets + location_tagsets + ['networkadapter']
def find_nonpoint_tagsets(tagset):
if tagset.split('-')[0] in nonpoint_tagsets:
return tagset
else:
return ''
truth_dict = dict()
for srcid, label_list in label_dict.items():
sentence = sentence_dict[srcid]
phrase_list = list()
truth_list = list()
sentence_meanings = [(token,label)
for token, label
in zip(sentence, label_list)
if label not in ['none', 'unknown']]
right_identifier_buffer = ''
for (token, label) in sentence_meanings:
if label=='leftidentifier':
# phrase_list[-1] += ('-' + token)
continue
elif label=='rightidentifier':
# right_identifier_buffer += token
continue
phrase_list.append(label)
if right_identifier_buffer:
phrase_list[-1] += ('-' + right_identifier_buffer)
truth_list = [phrase
for phrase
in phrase_list
if find_nonpoint_tagsets(phrase)]
removing_tagsets = list()
for tagset in truth_list:
subclasses = subclass_dict[tagset.split('-')[0]]
if sum([True if tagset in subclasses else False
for tagset in truth_list]) > 1:
removing_tagsets.append(tagset)
for tagset in removing_tagsets:
truth_list = list(filter(tagset.__ne__, truth_list))
try:
truth_list.append(sensor_df['Schema Label'][srcid].replace(' ', '_'))
except:
print(srcid, 'failed')
truth_dict[srcid] = list(set(truth_list))
# TODO: add all labels to a dict (except point type info)
with open('metadata/{0}_ground_truth.json'.format(building), 'w') as fp:
json.dump(truth_dict, fp, indent=2)
| [
"bk7749@gmail.com"
] | bk7749@gmail.com |
2bfcd3ada8bcb27eca2b547c6824f1c5cae3645b | d51781836c4750d5e8116cc0d815ac9fb5fabe04 | /pya/cli.py | 56b565d2dc54e1a926f38365c0c1c8b522387191 | [
"MIT"
] | permissive | gsemet/pya | 1dc689752e96bb4107cfb59b1ed3f1647e51943b | 578e1c0fef58fa6a97fc3bfba62d82c3cdb16828 | refs/heads/master | 2022-12-14T05:09:34.554969 | 2018-06-28T06:09:02 | 2018-06-28T06:09:02 | 138,974,579 | 0 | 0 | MIT | 2021-06-01T22:31:25 | 2018-06-28T06:15:39 | Python | UTF-8 | Python | false | false | 523 | py | # -*- coding: utf-8 -*-
"""Console script for pya."""
import sys
import click
@click.command()
@click.argument('package')
def main(package, args=None):
"""Console script for pya."""
click.echo("Python Application Installer")
from subprocess import Popen, PIPE, STDOUT
p = Popen(['pip', 'install', '--help'], stdout = PIPE, stderr = STDOUT)
for line in p.stdout:
print(line.decode('utf-8').replace('\n', ''))
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| [
"gaetan@xeberon.net"
] | gaetan@xeberon.net |
054fc7189e60b62900473e12995542239a2b4346 | 729d4da94810b2c8b09de7eacd62f6a3deb90cc6 | /Day3/app.py | 30d29ed7a00bedd661c4e65e560fb4d2974944d6 | [] | no_license | vishwajeetsinghrana8/Flask_For_ML_7Days_Workshop | b09c90da991a231eb2c05f069c3858d3be7e7314 | 9d92bae818ae2236f14f21507c483387afbafe0f | refs/heads/main | 2023-07-15T22:55:21.613338 | 2021-09-03T14:46:23 | 2021-09-03T14:46:23 | 402,436,243 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/signup')
def signup():
return render_template('signup.html')
@app.route('/thankyou')
def thankyou():
first = request.args.get('first')
last = request.args.get('last')
return render_template('thankyou.html', first=first, last=last)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
if __name__ == '__main__':
app.run(debug=True) | [
"noreply@github.com"
] | noreply@github.com |
563eba447c671fd512d395f592dacda7801a7acf | 1b9075ffea7d4b846d42981b41be44238c371202 | /2008/devel/applications/office/abiword/actions.py | e75fb6419d91ba18ae0f32bc78933b9d14e7ebfc | [] | no_license | pars-linux/contrib | bf630d4be77f4e484b8c6c8b0698a5b34b3371f4 | 908210110796ef9461a1f9b080b6171fa022e56a | refs/heads/master | 2020-05-26T20:35:58.697670 | 2011-07-11T11:16:38 | 2011-07-11T11:16:38 | 82,484,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2
# See the file http://www.gnu.org/copyleft/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.configure("--with-x \
--with-ImageMagick \
--with-libxml2 \
--with-zlib \
--with-libpng \
--with-popt \
--enable-printing \
--enable-gnomeui")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("docs/Abi*", "docs/NonLatin1UnixLocales.abw")
| [
"MeW@a748b760-f2fe-475f-8849-a8a11d7a3cd2"
] | MeW@a748b760-f2fe-475f-8849-a8a11d7a3cd2 |
b5a99dafbe66010b9ca89c8e85b7ff6848b672a2 | 6627d27fc69922f179b14b612b366bbf0bc4eff9 | /old/robots/claymore/netconsole.py | 8bb255b6187ac8cb4eeb6db65d0f2c73d2075da6 | [] | no_license | errorcodexero/newarch | 1a4773377197174ae58b6e4ef6d670bf197c643b | e69a864012e09548014ad208affeb8901835a654 | refs/heads/master | 2021-06-03T16:28:41.840622 | 2020-03-15T18:15:08 | 2020-03-15T18:15:08 | 139,747,384 | 9 | 6 | null | 2020-01-31T05:35:34 | 2018-07-04T16:54:36 | C++ | UTF-8 | Python | false | false | 2,488 | py | #!/usr/bin/env python
# Copyright (c) Robert Blair Mason Jr. (rbmj) rbmj@verizon.net
# see LICENSE for license information.
import socket
import sys
import threading
import atexit
import time
import os
#allow import in both python 2.x and 3.x
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
#ports
UDP_IN_PORT=6666
UDP_OUT_PORT=6668
#set up recieving socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind( ('',UDP_IN_PORT) )
#set up sending socket - use separate socket to avoid race condition
out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
out.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
out.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
out.bind( ('',UDP_OUT_PORT) ) #bind is necessary for escoteric reasons stated on interwebs
#set up atexit handler to close sockets
def atexit_func():
sock.close()
out.close()
atexit.register(atexit_func)
#set up threads to emulate non-blocking io
#thread-level emulation required for compatibility with windows
stdin_queue = Queue()
sock_queue = Queue()
def enqueue_output_file(f, q):
for line in iter(f.readline, b''): #thanks to stackoverflow
q.put(line)
def enqueue_output_sock(s, q):
while True:
q.put(s.recv(4096))
stdin_reader = threading.Thread(target = enqueue_output_file, args = (sys.stdin, stdin_queue))
sock_reader = threading.Thread(target = enqueue_output_sock, args = (sock, sock_queue))
stdin_reader.daemon = True
sock_reader.daemon = True
stdin_reader.start()
sock_reader.start()
#send a message out the socket
def send_msg(msg):
out.sendto(line, ('255.255.255.255', UDP_OUT_PORT))
#main loop
have_now=''
group=[]
clear_banner = 'CLEAR_SCREEN'
try:
while True:
try: msg = sock_queue.get_nowait()
except Empty:
pass # no output
else:
sp=msg.split('\n')
assert len(sp)
sp[0]=have_now+sp[0]
have_now=sp[-1]
sp=sp[:-1]
for elem in sp:
for ele in group:
if clear_banner in ele:
os.system('clear')
else:
print ele
group=[]
group.append(elem)
#for i,elem in enumerate(sp):
# if 'in:' in elem:
# os.system('clear')
# sys.stdout.write(elem)
# if i+1!=len(sp):
# sys.stdout.write('\n')
#sys.stdout.write(msg)
try: line = stdin_queue.get_nowait()
except Empty:
pass # no input
else:
send_msg(line)
#time.sleep(0.05)
except:
for elem in group: print elem
| [
"butchg@comcast.net"
] | butchg@comcast.net |
a0ef3ab113554de3c81c5dcf8b264d3133083b8b | 6a8fd1d54d5c1ff7f8599d850fa12d2ee1b6cb87 | /floorWall/blog/feeds.py | 9c8ebfb4c4e8f52b294583d6c4e9b7212bf7c9c2 | [] | no_license | Deathcharge/MASTER | 6dde2c9924181b1531c3edd9918aa92e1a7904b7 | 7129111c21b98812a117a4372a28b9314567f9c6 | refs/heads/master | 2022-11-22T13:04:52.581713 | 2020-07-19T12:34:06 | 2020-07-19T12:34:06 | 280,714,733 | 0 | 0 | null | 2020-07-25T08:35:32 | 2020-07-18T18:23:13 | Python | UTF-8 | Python | false | false | 802 | py | from django.contrib.syndication.views import Feed
from django.template.defaultfilters import truncatewords
from .models import Post
#from django.urls import reverse
class LatestPostsFeed(Feed):
title = "My blog"
link = ""
description = "New posts of my blog."
def items(self):
return Post.objects.filter(status=1)
def item_title(self, item):
return item.title
def item_description(self, item):
return truncatewords(item.content, 30)
# Only needed if the model has no get_absolute_url method
# def item_link(self, item):
# return reverse("post_detail", args=[item.slug])
from django.utils.feedgenerator import Atom1Feed
class AtomSiteNewsFeed(LatestPostsFeed):
feed_type = Atom1Feed
subtitle = LatestPostsFeed.description | [
"ward.andrew32@gmail.com"
] | ward.andrew32@gmail.com |
7512288e5822c7a47a7857cd42a28c73b87de179 | 1ee5060bca6c2138e9b933494cc98297f595b458 | /tests/bq_test16.py | 201a6b9672c724aaf4361aa0608a47a678c22ba0 | [] | no_license | OmarKhayyam/UGym | e051acc474e88285301d23b8cf968d3b4109fe35 | f8c9066c4a2a78fe24f9174cc657912cdbdbfaf3 | refs/heads/main | 2023-05-04T19:48:41.011630 | 2021-05-30T11:55:51 | 2021-05-30T11:55:51 | 372,201,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | #!/usr/bin/env python
import sys
sys.path.insert(1, '../BQDetailsObject')
from BQDetailsObject import BQDetailsObject
b = BQDetailsObject()
print("Testing getSumOfNumericStructMembers()...")
b.getAllDatasets()
for ds in b.dsls:
b.getAllTablesForDataset(ds.dataset_id)
for t in b.tblls:
dtset = t.dataset_id
tab = t.table_id
if tab == 'rns_db_6':
mytablst = b.getColumnDetails(dtset,tab)
print(b.getSumOfNumericStructMembers(mytablst))
print("Test for getSumOfNumericStructMembers() succeeded.")
| [
"shringar@amazon.com"
] | shringar@amazon.com |
8d706c35396cf8ae9ce4978e5e8218b88d2004f6 | 1d2845117bf4ccc6af503d6133fc2fc1d31a8a1a | /pytorch_code/main.py | 93c6ca782c42b2819fed7c736efddf5479ee3536 | [] | no_license | wellimbharath/xmlcnn-public | 54b19951703a02409f60e5213bec1730ea43df13 | b75e7fe483735a31181c39ed71ce92c9babf2450 | refs/heads/master | 2022-05-01T22:38:56.035884 | 2018-05-01T07:30:51 | 2018-05-01T07:30:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,496 | py | from datetime import datetime as dt
import torch.optim as optim
import torch.nn as nn
import os
import time
import argparse
import pickle
import torch
from IPython.core.debugger import Pdb
from torch.utils.data import Dataset, DataLoader
import utils
import xml_dataset
import cnntext
import numpy as np
import data_samplers
import train_xml
import error_analysis
cuda = False
def main(args):
global cuda
cuda = torch.cuda.is_available()
if cuda:
train_xml.cuda = cuda
cnntext.cuda = cuda
#criterion.cuda()
utils.log('start loading vocab')
vocabs = pickle.load(open(args.vocab_path, 'rb'))
utils.log('done loading vocab')
_ = xml_dataset.xml_dataset(max_length= args.max_length, vocab_size = args.vocab_size, min_count = args.min_count, word_counts = vocabs['word_counts'], vocabulary_inv= vocabs['vocabulary_inv'], only_initialize_vocab = True)
vocab_size = xml_dataset.xml_dataset.vocab_size
embedding_init = vocabs['embedding_init']
embedding_init = embedding_init[:vocab_size]
num_labels = len(vocabs['labels_inv'])
model = cnntext.select_model(args, num_labels, vocab_size, embedding_init)
my_loss_fn = train_xml.get_loss_fn(args)
(train_loader, val_loader) = xml_dataset.get_data_loaders(args,vocabs)
utils.log('done loading vocab')
#optimizer = optim.SGD(model.parameters(), momentum = 0.9, lr = lr, weight_decay = 0.0005)
optimizer = optim.Adam(model.parameters(), lr=args.lr,
weight_decay=args.decay)
exp_name = '{}_lr_{}_nf_{}_ch_{}_decay_{}_hd_{}_kernels_{}_ml_{}_vmc_{}_vs_{}_l_{}_a_{}'.format ( args.exp_name, args.lr, args.num_features, args.channels, args.decay, args.hidden_size, '-'.join([str(x) for x in args.kernels]), args.max_length, args.min_count, args.vocab_size, args.lstm, args.attn)
log_file = '{}.csv'.format(exp_name)
checkpoint_file = os.path.join(args.output_path, '{}_checkpoint.pth'.format(exp_name))
best_checkpoint_file = os.path.join(args.output_path, '{}_best_checkpoint.pth'.format(exp_name))
utils.log('save checkpoints at {} and best checkpoint at : {}'.format(checkpoint_file, best_checkpoint_file))
if not os.path.exists(args.output_path):
try:
os.makedirs(args.output_path)
except:
pass
#
if args.checkpoint != '':
utils.log('start from checkpoint: {}'.format(args.checkpoint))
tfh = open(os.path.join(args.output_path, log_file), 'a')
load_checkpoint_file = args.checkpoint
cp = torch.load(os.path.join(args.output_path, args.checkpoint))
model.load_state_dict(cp['model'])
optimizer.load_state_dict(cp['optimizer'])
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr
param_group['weight_decay'] = args.decay
else:
tfh = open(os.path.join(args.output_path, log_file), 'w')
start_epoch = 0
num_epochs = args.num_epochs
# Pdb().set_trace()
utils.log('start train/validate cycle')
best_score = 0
for epoch in range(start_epoch, num_epochs):
train_xml.compute_xml_title(epoch, model, train_loader,
optimizer, 'train', tfh, args.backprop_batch_size, [args.lr, exp_name],loss_fn = my_loss_fn)
rec,i,topk_pred,actual_labels,ec = train_xml.compute_xml_title(epoch, model, val_loader,
None, 'eval', tfh, args.backprop_batch_size, [args.lr, exp_name], loss_fn = my_loss_fn)
is_best = False
utils.log('best score: {}, this score: {}'.format(best_score, rec[i]))
if rec[i] > best_score:
best_score = rec[i]
is_best = True
#
utils.save_checkpoint( {
'epoch': epoch,
'best_score': best_score,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'is_best': is_best
} , epoch, is_best, checkpoint_file, best_checkpoint_file)
#
tfh.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', help='exp name',
type=str, default='eurlex')
parser.add_argument('--lstm', help=' should apply lstm?',
action='store_true')
parser.add_argument('--attn', help=' should apply attn',
action='store_true')
parser.add_argument('--train_path',
help='numericalized data in pickle', type=str,
default='../data/eurlex/train2.pkl')
parser.add_argument('--val_path',
help='val numericalized data in pickle', type=str,
default='../data/eurlex/test2.pkl')
parser.add_argument('--vocab_path',
help='vocab in pickle', type=str,
default='../data/eurlex/vocab2.pkl')
parser.add_argument('--output_path',
help='output path', type=str,
default='../output/eurlex/best_models')
parser.add_argument('--kernels',
help='number of filter sizes (could be a list of integer)', type=int,
default=[2, 4, 8], nargs='+')
parser.add_argument('--channels',
help='number of filters (i.e. kernels) in CNN model', type=int,
default=128)
parser.add_argument('--num_features',
help='number of pooling units in 1D pooling layer', type=int,
default=8)
parser.add_argument('--hidden_size',
help='number of hidden units', type=int,
default=512)
parser.add_argument('--lstm_hidden_size',
help='number of hidden units in lstm', type=int,
default=256)
parser.add_argument('--batch_size',
help='number of batch size', type=int,
default=256)
parser.add_argument('--num_epochs',
help='number of epcohs for training', type=int,
default=200)
parser.add_argument('--lr',
help='learning rate', type=float,
default=0.001)
parser.add_argument('--decay',
help='decay in adam', type=float,
default=0.0)
parser.add_argument('--max_length',
help='max_sentence length', type=int,
default=500)
parser.add_argument('--vocab_size',
help='max_sentence length', type=int,
default=75000)
parser.add_argument('--min_count',
help='max_sentence length', type=int,
default=2)
parser.add_argument('--backprop_batch_size',
help='batch size for backprop', type = int, default=256)
parser.add_argument('--has_titles', help=' has titles in addn to description',
action='store_true')
parser.add_argument('--title_hidden_size', help=' has titles in addn to description',type=int,
default = 128)
parser.add_argument('--checkpoint',
help='continue from this checkpoint', type=str,
default='')
args = parser.parse_args()
#Pdb().set_trace()
main(args)
| [
"yatin.nandwani@gmail.com"
] | yatin.nandwani@gmail.com |
f0c3e3f7455a44a35b9adec11036b034f8b83fe1 | be969df481f98b0094fff83eb6d78c7ff3673aba | /tool_error/error.py | ac2ab2764266969e6a3507aafc71ff7e5461ce7d | [
"Apache-2.0"
] | permissive | hustcsxg/pytools | 68b6683209435d5e47741ee47b6299f360c21c75 | 2180509847a6a88906dd110a01164425ed902ac7 | refs/heads/master | 2021-07-03T17:29:36.336247 | 2020-12-03T09:45:23 | 2020-12-03T09:45:23 | 205,327,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,070 | py | """
错误码定义示例文件
"""
import copy
ERROR_CODE_PREFIX_SYSTEM = '0'
ERROR_CODE_PREFIX_THIRD_PARTY = '1'
def wrap_error_code(code):
"""
包装第三方系统返回的错误代码
"""
return ERROR_CODE_PREFIX_THIRD_PARTY + str(code)
class BaseException(Exception):
pass
class RequestThirdPartyException(BaseException):
"""
当请求第三方系统时抛出的异常
"""
def __init__(self, raw_exc, system_name, interface_name):
self.raw_exc = raw_exc
self.system_name = system_name
self.interface_name = interface_name
def __str__(self):
return u'Component request third-party system [%s] interface [%s] error: %s, '\
'please try again later or contact component developer to handle this'\
% (self.system_name, self.interface_name, self.raw_exc.message)
def get_message(self):
"""
返回到终端用户的错误信息
"""
return u'Component request third-party system [%s] interface [%s] error: %s, '\
'please try again later or contact component developer to handle this'\
% (self.system_name, self.interface_name, self.raw_exc.message)
class RequestSSLException(BaseException):
"""SSL错误,明确错误信息
"""
def __init__(self, raw_exc, system_name, interface_name):
self.raw_exc = raw_exc
self.system_name = system_name
self.interface_name = interface_name
def __str__(self):
return self.get_message()
def get_message(self):
"""
返回到终端用户的错误信息
"""
if isinstance(self.raw_exc.cert, tuple):
self.raw_exc.cert = u', '.join(self.raw_exc.cert)
return u'Component request third-party system [%s] interface [%s] SSL error: '\
'SSL configuration file [%s] does not exist or is illegal, '\
'please get the certificates from the documentation and unzip it into [%s]' % (
self.system_name, self.interface_name, self.raw_exc.cert, self.raw_exc.SSL_ROOT_DIR)
class TestHostNotFoundException(BaseException):
"""
当以测试环境访问没有host_test的SmartHost时抛出
"""
pass
class RequestBlockedException(BaseException):
"""
当前请求被屏蔽之后抛出的异常
"""
pass
class APIError(BaseException):
"""
API Error
"""
def __init__(self, code):
self.code = code
BaseException.__init__(self, code.prompt)
def __str__(self):
return "<APIError %s[%s]: %s>" \
% (self.code.status, self.code.code, self.code.prompt)
def format_prompt(self, prompt=None, replace=False, args=(), kwargs={}):
"""
Using a customized prompt for this ErrorCode
"""
self.code = copy.copy(self.code)
if prompt:
if replace:
self.code.prompt = prompt
else:
self.code.prompt += ', %s' % prompt
# Render prompt string
if args:
self.code.prompt = self.code.prompt % args
if kwargs:
self.code.prompt = self.code.prompt % kwargs
return self
class ErrorCode(object):
"""
Error Code class
"""
def __init__(self, code_name, code, prompt, status=200):
self.code_name = code_name
self.code = code
self.prompt = prompt
self.status = status
def as_dict(self):
return {
'result': False,
'code': self.code,
'data': None,
'message': self.prompt
}
class ErrorCodes(object):
"""
错误代码规范
7位整数,13代表蓝鲸PaaS,06代表ESB,最后3位可自定义
1306xxx
"""
error_codes = (
# 13064xx, user error
ErrorCode('OPERATOR_REQUIRED', 1306401, 'You must specify the current operator'),
ErrorCode('USER_PERMISSION_DENIED', 1306402, 'User permission is insufficient'),
ErrorCode('APP_PERMISSION_DENIED', 1306403, 'APP permission is insufficient'),
ErrorCode('COMPONENT_NOT_FOUND', 1306404, 'Not found, component class not found'),
ErrorCode('INACTIVE_CHANNEL', 1306405, 'Not found, inactive channel'),
ErrorCode('ARGUMENT_ERROR', 1306406, 'Parameters error'),
ErrorCode('BUFFET_CANNOT_FORMAT_PATH', 1306407, "The component's destination request path cannot be formatted"),
ErrorCode('RATE_LIMIT_RESTRICTION', 1306429, 'Access frequency limit'),
# 通用错误编码,用于目前系统中没有错误code的情况
ErrorCode('COMMON_ERROR', 1306000, 'System error'),
# 13062xx, 第三方系统错误
ErrorCode('REQUEST_THIRD_PARTY_ERROR', 1306201, 'Request third-party interface error'),
ErrorCode('REQUEST_SSL_ERROR', 1306203, 'Request third-party interface error'),
ErrorCode('TEST_HOST_NOT_FOUND', 1306206, 'Error, the component does not support access third-party test environment'), # noqa
ErrorCode('REQUEST_BLOCKED', 1306207, 'Request to the third-party system is blocked'),
ErrorCode('THIRD_PARTY_RESULT_ERROR', 1306208, '%s system interface results in an unknown format'),
ErrorCode('REQEUST_DEST_METHOD_ERROR', 1306209, 'The system interface does not support the request method'),
)
# Init dict
_error_codes_dict = {}
for error_code in error_codes:
_error_codes_dict[error_code.code_name] = error_code
def __getattr__(self, code_name):
error_code = self._error_codes_dict[code_name]
return APIError(error_code)
class RequestThirdPartyErrorCodes(object):
"""
请求第三方系统错误代码
"""
error_codes = {
'STATUS_CODE_500': 'Third-party system internal error',
'STATUS_CODE_502': 'Third-party system Bad Gateway',
'STATUS_CODE_403': 'Third-party system prohibit access to this interface',
'STATUS_CODE_404': 'Third-party system does not find this interface',
'STATUS_CODE_302': 'Third-party system redirects this interface',
}
error_codes = ErrorCodes()
request_third_party_error_codes = RequestThirdPartyErrorCodes()
class CommonAPIError(APIError):
"""
Shortcut for returning an error response
"""
def __init__(self, message, error_code=None, status=None):
"""
初始化一个常用的通用错误
:param str message: 自定义的错误消息
:param str error_code: 返回到相应的错误代码,默认 1306000
"""
self.message = message
code = error_codes.COMMON_ERROR.format_prompt(message, replace=True).code
if error_code:
code.code = error_code
if status:
code.status = status
super(CommonAPIError, self).__init__(code)
| [
"xigang@xsyxsc.com"
] | xigang@xsyxsc.com |
970e9469619a802e49f76d3ff6847358c83718f2 | 95d9e1701d9c09188bb14ac9c9f34a553942e276 | /venv/bin/melody_rnn_train | 25a3191142235c6e51aa744d5f7c9b6215465030 | [
"MIT"
] | permissive | abinayadinesh1/Expressiva | aef9c65b4a7c2d9cdf30574ce60b071f347e7321 | 1f97456ea183c1ceb4ccea7424e65b809b3d6d6b | refs/heads/master | 2023-01-10T11:45:22.853552 | 2020-11-15T16:08:39 | 2020-11-15T16:08:39 | 312,972,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | #!/Users/abi3/Downloads/aiexperiments-ai-duet-master/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from magenta.models.melody_rnn.melody_rnn_train import console_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(console_entry_point())
| [
"abi3@abinayas-mbp.lan"
] | abi3@abinayas-mbp.lan | |
9dd4e56b0b77bc9d2e0135a47038c8ea171a9e01 | 0c81a31bb625e30330a35c81794334f3720e80c3 | /yield curve.py | a797f492829770bb59cbe06c053a1c5d5bb82b91 | [] | no_license | Niels555/my_python_files | 1aaa0668625379ded2fe6e89d8ba95df48700bf2 | 51190045eeb2acc419ad7bcf190b65aa523fc6f5 | refs/heads/master | 2020-08-29T01:31:19.304426 | 2019-10-27T16:13:11 | 2019-10-27T16:13:11 | 217,880,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,926 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jul 13 11:04:44 2019
@author: jonkm
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
import scipy.integrate as integrate
import math
def Theta(ita,lbda,t):#theta function
return 0.06*t+0.15+0.06/lbda + (ita**(2)/(2*lbda**2))*(1-np.exp(-2*lbda*t))
#
def B(lbda, t,T): #B_r function
tau = T-t
return 1/lbda*(np.exp(-lbda*tau)-1)
#
def A(eta,lbda,t,T,acc):#A_r function
tau = T-t
steps = np.linspace(t,T,acc) #making the steps for calculating the integral part
dt = tau/(acc*1.0-1)
#C is A - the integral part
C= eta**(2)/(4*lbda**(3))*(np.exp(-2*lbda*tau)*(4*np.exp(lbda*tau)-1)-3)+(eta**(2)*tau)/(2*lbda**2)
beta = [0]*len(steps) #this will be the integral part
beta[0] = 0 #initial value
for i in range(0,len(steps)-1): #we will use the trapezium rule
beta[i+1] = beta[i]+0.5*dt*(Theta(eta, lbda,steps[i])*B(lbda,steps[i],T) + Theta(eta, lbda,steps[i+1])*B(lbda,steps[i+1],T))
return lbda*beta[-1]+C #A=lbda*integralPart+C
def P(t0,T,NoOfSteps,y,r0,eta,lbda,acc):
P = np.exp(A(eta,lbda,t0,T,acc)+B(lbda,t0,T)*r0)
return P
def priceIRSwap(K,T,p): #input is strike K, strip of discount factors at Ti and T array of payment dates Ti
Ptk = 0 #discount factors p = P(t0,Ti)
#T array varies, p size is always 3
for i in range(0,len(T)-1): #T is array input
Ptk = Ptk + p[i]
Vt0 = 1 - p[len(T)-2] - K*Ptk #V = P(t,Tm) + P(t,Tn) - K*P(t,Tk), P(t,Tm) = P(t,T0) = P(0.0) = 1
return Vt0
def Jaco(K,T,p): #input is array with strike prices, array with maturity times/payment dates, p array of discount factors
dp = 10**(-5)
T1 = np.linspace(0,T[0],2) #array of payments for swap 1
T2 = np.linspace(0,T[1],3) #array of payments for swap 2
T3 = np.linspace(0,T[2],4)
Ti = np.array([T1,T2,T3])
J = np.zeros((3,3))
for i in range(0,3): #row
for j in range(0,3): #column
dpV = np.zeros(len(p)) #vector with only dp on position of pi
dpV[j] = 0.5*dp #on position p[j] we want to at dp
J[i,j] = (priceIRSwap(K[i],Ti[i],p+dpV) - priceIRSwap(K[i],Ti[i],p-dpV))/dp #central difference scheme
return J
def MultiNR(K,T,pi,q,error = 10**(-4),maxIter = 20): #pi is initual guess, error is tolerance
dp=10**(-5) #h-->0
T1 = np.linspace(0,T[0],2) #array of payments for swap 1
T2 = np.linspace(0,T[1],3) #array of payments for swap 2
T3 = np.linspace(0,T[2],4)
Ti = np.array([T1,T2,T3])
increment = 0.01 #for searchloop
#pi is initual guess
y = np.zeros(len(pi)); #this is going to be the output
while np.min(pi) <= 20: #discount factors wont be 10, stop searching for initual guess
p0 = pi
n=1
while n<= maxIter:
pv = np.array([priceIRSwap(K[0],Ti[0],p0),priceIRSwap(K[1],Ti[1],p0),priceIRSwap(K[2],Ti[2],p0)])-q
Jinv = np.linalg.inv(Jaco(K,T,p0))
p1 = p0 - np.matmul(Jinv, pv)
if abs(np.min(p1)-np.min(p0)) <error and p0[0]!=p1[0] and p0[1]!=p1[1] and p0[2]!=p1[2]:
break #goes outside while loop
p0 = p1
n = n+1
if abs(np.min(p1)-np.min(p0)) < error and p0[0]!=p1[0] and p0[1]!=p1[1] and p0[2]!=p1[2]: #if error is small and they are not the same, we have found solution
break #found solution, go outside first while loop
#if not the case, we are still in while loop, try new initual guess:
pi = pi+increment
if np.min(pi) >= 10: #pi wont get zo big
pi = np.zeros(len(p0))
increment = increment/2 #try new initial search for good initual guess, now with smaller increments for searching
df = p1
return df
def Maincalculation():
lbda = 0.5
eta = 0.03
acc = 40 #accuracy of integral of Ar
t0 = 0
NoOfSteps = 100
T0 = 0 #Tm = T0
T1 = 1
T2 = 2
T3 = 3
y=1 #one year
T = np.linspace(T1,T3,3)
#fixed rates, swaps V(t0) equal zero
K1= 0.01
K2 = 0.0214
K3 = 0.038
K = np.array([K1,K2,K3])
qm = np.zeros(3) #market prices are zero at t0
#pi = np.array([0.8,0.6,0.4]) #initual guess for set of discount factors ,
pi = np.array([0,0,0])
V0 = priceIRSwap(K[0],T,pi)
print('example swap', V0)
J = Jaco(K,T,pi)
print('solution of Jacobian matrix',J)
print(np.linalg.inv(J))
spinePoints = MultiNR(K,T,pi,qm,error = 10**(-5),maxIter = 20)
print('optimal spine points:',spinePoints)
print('swap prices',priceIRSwap(K,T,spinePoints))
Maincalculation() | [
"noreply@github.com"
] | noreply@github.com |
98e0e1153c797c4af9dc93ebe684c88e04078fdb | 848471f05a8df7e7fe0314b0ab5acaf3d075d0a6 | /Komunikazioa/appBatuketa.py | bbce0ffc0899e8bf543fe536a2dca835b83addbb | [] | no_license | ekaitzhara/EkaitzHurtado_GrAL | bb04b6d13b7d2ae92f97e1da474435d3beb22314 | 560e7c2ca88b8147bc0de3155ee5a69562a3faa7 | refs/heads/master | 2022-11-09T20:15:20.537849 | 2020-06-24T22:14:49 | 2020-06-24T22:14:49 | 274,714,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | from flask import Flask, request, jsonify
import sys
app = Flask(__name__)
batuketaTotala = 0
kont = 0
@app.route('/', methods=['POST', 'GET'])
def index():
global kont
global batuketaTotala
# Lortutako HTTP mezua POST motakoa bada
if request.method == 'POST':
zenbakia = request.json['zenbakia']
print('Lortutako zenbakia: ' + str(zenbakia), file=sys.stderr)
batuketaTotala = batuketaTotala + zenbakia
print('Batuketa osoa: ' + str(batuketaTotala), file=sys.stderr)
kont = kont + 1
if(kont == 10):
print('########################', file=sys.stderr)
print('Azken emaitza: ' + str(batuketaTotala), file=sys.stderr)
print('########################', file=sys.stderr)
return jsonify("Azken emaitza: " + str(batuketaTotala))
# Lortutako HTTP mezua GET motakoa bada
if request.method == 'GET':
print("Orain arteko batuketa: " + str(batuketaTotala), file=sys.stderr)
print("Zein iterazioan gaude: " + str(kont), file=sys.stderr)
return jsonify("Orain arteko batuketa: " + str(batuketaTotala)) | [
"noreply@github.com"
] | noreply@github.com |
365d03142d2a760e74ebd64925d6f47d0a8f9b47 | dad35c2e4e166f2e9271b9ace39727df5474cd5f | /Python/TCP/server_multi.py | 4376b0242431df9d7cb5f0a3f3f27ab28f2b74fd | [] | no_license | MattMarti/Practice | 60d43fa9f690fee12442ada88527236b37709749 | 48d06eaa0b45d447b160e8200875ec241d79f630 | refs/heads/master | 2023-03-14T14:20:53.867943 | 2021-03-13T21:24:51 | 2021-03-13T21:24:51 | 266,005,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,275 | py | # server_multi.py
#
# Opens TCP socket and accepts data from multiple clients and prints
# to command line
import socket
import selectors
import types
def run():
host = "localhost"
port = 65432
# AF_INET is IPv4 protocol
# SOCK_STREAM specifies TCP
sel = selectors.DefaultSelector() # Set listener
lsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Opens TCP connection
lsock.bind((host, port)) # Associates socket with specific network interface
lsock.listen() # Enables the server to accept connections. Basically this establishes this program as a server
print("Listenting on ", (host, port));
# Configure the socket to non-blocking mode. Calls made to socket will no longer block.
lsock.setblocking(False)
sel.register(lsock, selectors.EVENT_READ, data = None)
def accept_wrapper(sock):
connection, address = sock.accept()
print("Accepted connection from ", address)
connection.setblocking(False) # Set non-blocking so the server never 'hangs'
data = types.SimpleNamespace(address=address, inb=b'', outb=b'')
events = selectors.EVENT_READ | selectors.EVENT_WRITE
sel.register(connection, events, data=data)
#
def service_connection(key, mask):
sock = key.fileobj
data = key.data
if mask & selectors.EVENT_READ: # If socket is ready for reading
recv_data = sock.recv(1024)
if recv_data:
data.outb += recv_data
else:
print("Closing connection to", data.address)
sel.unregister(sock)
sock.close()
#
#
if mask & selectors.EVENT_WRITE:
if data.outb:
print("Echo from", data.address, ":", repr(data.outb))
sent = sock.send(data.outb)
data.outb = data.outb[sent:]
#
#
#
# Event loop
while True:
events = sel.select(timeout = None)
for key, mask in events:
if key.data is None:
accept_wrapper(key.fileobj)
else:
service_connection(key, mask)
#
#
#
#
if __name__ == "__main__":
run()
# | [
"matthew.marti@gmail.com"
] | matthew.marti@gmail.com |
250b1d292457924a49b4539472683815fd9263e8 | 0749fdebffc1dee69406174381305ab405c82f8b | /pydockerreg/shells/index.py | da087290259929651ecf60aef9c9199d9c641c4c | [] | no_license | woosley/pydockerreg | 179733e7671ce543b1c43dd8715233246bd4f386 | 18c0c12499c40969336751821e0e67194c439eed | refs/heads/master | 2021-01-20T18:27:51.369866 | 2016-06-27T13:27:20 | 2016-06-27T13:27:20 | 61,873,937 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | from __future__ import unicode_literals
import click
from six.moves.urllib_parse import urlparse
from .base import Base
from ..utils import opt_manager, gather_opts, set_completer_var
from .repo import Repo
@gather_opts
class Index(Base):
"""index commands for dockerreg"""
def get_prompt(self):
host = urlparse(self.session.url).netloc
return host + "> "
@opt_manager()
def ls(self):
"""list all repos on the registry"""
repos = self.repos() or []
for i in repos:
click.echo(i)
@opt_manager("repo", help="repo to browse")
def cd(self):
"""cd to the target repo"""
if self.args.repo not in self.repos():
click.echo("No such repo")
else:
Repo(self.session, self.args.repo).run()
@set_completer_var("repo")
def repos(self):
"""get all repos."""
res = self.session.get("_catalog").json()['repositories']
return res
| [
"woosley.xu@gmail.com"
] | woosley.xu@gmail.com |
dc9d3cb48eb30aeeebe9eb2a807410065c170b8b | 7f29c186de0b50d3829066b767e5082a6e9e5947 | /truva_installer/config.py | 1cdb36f7956617f051f8793e85e7c5955f881349 | [] | no_license | caylakpenguen/anatolya | 6ffbd30944ec11b178bc3284dd6626058cfe992f | 6d95135ed1ce709ac1270cc8d00d4a8cd8f2d021 | refs/heads/master | 2020-05-18T19:30:15.319698 | 2015-03-14T18:53:59 | 2015-03-14T18:53:59 | 32,226,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | #Kurulum sonrasi ayarlari yapiliyor
setup_1 = ('chroot %s /sbin/ldconfig' %mntdir)
os.system(setup_1)
setup_2 = ('chroot %s /usr/X11R6/bin/fc-cache -f' %mntdir)
os.system(setup_2)
shutil.copyfile("/truva_installer/files/rc.keymap","%s/etc/rc.d/rc.keymap" %mntdir)
setup_3 = ('chmod 755 %s/etc/rc.d/rc.keymap' %mntdir)
os.system(setup_3)
shutil.copyfile("/truva_installer/files/rc.font","%s/etc/rc.d/rc.font" %mntdir)
setup_4 = ('chmod 755 %s/etc/rc.d/rc.font' %mntdir)
os.system(setup_4)
setup_5 = ('chmod 755 %s/etc/rc.d/rc.postinstall' %mntdir)
os.system(setup_5)
setup_6 = ('chmod 755 %s/etc/rc.d/rc.messagebus' %mntdir)
os.system(setup_6)
setup_7 = ('chmod 755 %s/etc/rc.d/rc.hald' %mntdir)
os.system(setup_7)
shutil.copyfile("/truva_installer/files/fstab","%s/etc/fstab" %mntdir)
| [
"caylakpenguen@gmail.com"
] | caylakpenguen@gmail.com |
f982f49bded21d3ec480ed23147785cb1e622b6f | e4007870b4d75ba23c2f12ac6646f272cf17865c | /Types/Detection_3D.py | 33c52d337085600db6cc52e4e9c38d9631902223 | [
"MIT"
] | permissive | knut0815/PythonUtility | 385ce332ff34501be7ad21ac7948eb609770e72a | 0062e1e60dc151776b963d13bc4c1763eb90d333 | refs/heads/master | 2023-01-10T09:58:14.619531 | 2020-11-10T12:22:47 | 2020-11-10T12:22:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,453 | py | import numpy as np
from Utility.Classes.Frozen_Class import FrozenClass
class Detection3D(FrozenClass):
def __init__(self, frame, track_id, detection_type, truncation, occlusion, obs_angle, bbox, dimensions, location, rotation_y, score):
self.frame = frame
self.track_id = track_id
# detection_type: 'Car', 'Van', 'Truck', 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram', 'Misc' or 'DontCare'
self.detection_type = detection_type
# truncated: Float from 0 (non-truncated) to 1 (truncated)
self.truncation = truncation
# occluded: integer (0,1,2,3) indicating occlusion state:
# 0 = fully visible, 1 = partly occluded, 2 = largely occluded, 3 = unknown
self.occlusion = occlusion
# bservation angle of object, ranging [-pi..pi]
self.obs_angle = obs_angle
# 2D bounding box of object in the image (0-based index): contains left, top, right, bottom pixel coordinates
self.bbox = bbox
# 3D object dimensions: height, width, length (in meters)
self.dimensions = dimensions
# 3D object location x,y,z in camera coordinates (in meters)
self.location = location
# Rotation ry around Y-axis in camera coordinates [-pi..pi]
self.rotation_y = rotation_y
self.score = score
@classmethod
def from_string_list(cls, string_list):
return cls(
frame=int(float(string_list[0])), # frame
track_id=int(float(string_list[1])), # id
detection_type=string_list[2].lower(), # object type [car, pedestrian, cyclist, ...]
truncation=float(string_list[3]), # truncation [0..1]
occlusion=int(float(string_list[4])), # occlusion [0,1,2]
obs_angle=float(string_list[5]), # observation angle [rad]
bbox=np.array([float(string_list[6]), float(string_list[7]), float(string_list[8]), float(string_list[9])], dtype=float), # left [px], top [px], right [px], bottom [px]
dimensions=np.array([float(string_list[10]), float(string_list[11]), float(string_list[12])], dtype=float), # height [m], width [m], length [m]
location=np.array([float(string_list[13]), float(string_list[14]), float(string_list[15])], dtype=float), # X [m]
rotation_y=float(string_list[16]), # yaw angle [rad]
score=float(string_list[17]) if len(string_list) >= 18 else None
)
| [
"sebastian.bullinger@iosb.fraunhofer.de"
] | sebastian.bullinger@iosb.fraunhofer.de |
c805b342485e670743486773449b5dfe5ee5d797 | 5c269629ca7d5ffb3a6035d056ae88f90fd8153a | /pandas/tests/series/test_dtypes.py | 6864eac603ded8a41a02dd6bd6d298bf10d41607 | [
"BSD-3-Clause",
"LicenseRef-scancode-other-permissive",
"BSD-2-Clause"
] | permissive | bdrosen96/pandas | 416e5cb1941b21cee38a30346056a257b7d2b0ce | 506520bd35331aa82db50686c07d96594cac0c10 | refs/heads/master | 2021-01-15T09:20:22.851970 | 2016-07-19T02:06:18 | 2016-07-19T02:06:23 | 63,601,381 | 0 | 0 | NOASSERTION | 2019-11-21T13:08:56 | 2016-07-18T12:31:49 | Python | UTF-8 | Python | false | false | 5,127 | py | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import sys
from datetime import datetime
import string
from numpy import nan
import numpy as np
from pandas import Series
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range, u
from pandas import compat
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesDtypes(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_astype(self):
s = Series(np.random.randn(5), name='foo')
for dtype in ['float32', 'float64', 'int64', 'int32']:
astyped = s.astype(dtype)
self.assertEqual(astyped.dtype, dtype)
self.assertEqual(astyped.name, s.name)
def test_dtype(self):
self.assertEqual(self.ts.dtype, np.dtype('float64'))
self.assertEqual(self.ts.dtypes, np.dtype('float64'))
self.assertEqual(self.ts.ftype, 'float64:dense')
self.assertEqual(self.ts.ftypes, 'float64:dense')
assert_series_equal(self.ts.get_dtype_counts(), Series(1, ['float64']))
assert_series_equal(self.ts.get_ftype_counts(), Series(
1, ['float64:dense']))
def test_astype_cast_nan_int(self):
df = Series([1.0, 2.0, 3.0, np.nan])
self.assertRaises(ValueError, df.astype, np.int64)
def test_astype_cast_object_int(self):
arr = Series(["car", "house", "tree", "1"])
self.assertRaises(ValueError, arr.astype, int)
self.assertRaises(ValueError, arr.astype, np.int64)
self.assertRaises(ValueError, arr.astype, np.int8)
arr = Series(['1', '2', '3', '4'], dtype=object)
result = arr.astype(int)
self.assert_series_equal(result, Series(np.arange(1, 5)))
def test_astype_datetimes(self):
import pandas.tslib as tslib
s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5))
s = s.astype('O')
self.assertEqual(s.dtype, np.object_)
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype('O')
self.assertEqual(s.dtype, np.object_)
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
self.assertEqual(s.dtype, 'M8[ns]')
s = s.astype('O')
self.assertEqual(s.dtype, np.object_)
def test_astype_str(self):
# GH4405
digits = string.digits
s1 = Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)])
s2 = Series([digits * 10, tm.rands(63), tm.rands(64), nan, 1.0])
types = (compat.text_type, np.str_)
for typ in types:
for s in (s1, s2):
res = s.astype(typ)
expec = s.map(compat.text_type)
assert_series_equal(res, expec)
# GH9757
# Test str and unicode on python 2.x and just str on python 3.x
for tt in set([str, compat.text_type]):
ts = Series([Timestamp('2010-01-04 00:00:00')])
s = ts.astype(tt)
expected = Series([tt('2010-01-04')])
assert_series_equal(s, expected)
ts = Series([Timestamp('2010-01-04 00:00:00', tz='US/Eastern')])
s = ts.astype(tt)
expected = Series([tt('2010-01-04 00:00:00-05:00')])
assert_series_equal(s, expected)
td = Series([Timedelta(1, unit='d')])
s = td.astype(tt)
expected = Series([tt('1 days 00:00:00.000000000')])
assert_series_equal(s, expected)
def test_astype_unicode(self):
# GH7758
# a bit of magic is required to set default encoding encoding to utf-8
digits = string.digits
test_series = [
Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([u('データーサイエンス、お前はもう死んでいる')]),
]
former_encoding = None
if not compat.PY3:
# in python we can force the default encoding for this test
former_encoding = sys.getdefaultencoding()
reload(sys) # noqa
sys.setdefaultencoding("utf-8")
if sys.getdefaultencoding() == "utf-8":
test_series.append(Series([u('野菜食べないとやばい')
.encode("utf-8")]))
for s in test_series:
res = s.astype("unicode")
expec = s.map(compat.text_type)
assert_series_equal(res, expec)
# restore the former encoding
if former_encoding is not None and former_encoding != "utf-8":
reload(sys) # noqa
sys.setdefaultencoding(former_encoding)
def test_complexx(self):
# GH4819
# complex access for ndarray compat
a = np.arange(5, dtype=np.float64)
b = Series(a + 4j * a)
tm.assert_numpy_array_equal(a, b.real)
tm.assert_numpy_array_equal(4 * a, b.imag)
b.real = np.arange(5) + 5
tm.assert_numpy_array_equal(a + 5, b.real)
tm.assert_numpy_array_equal(4 * a, b.imag)
| [
"jeff@reback.net"
] | jeff@reback.net |
e8b2650b9dc9cecf5f0c9ee376a71fc41204fd5f | f2a77ee7c886b4f09c73d6e67133953ee80f213a | /all7/all7.py | 54933dc0db4b2f48dcec4922fdf85b970d402899 | [] | no_license | pjpalla/new_movc | d4ac3ace71d9d1b319bb7d71f52951b6e00d4fa1 | 3559e5ff3fbab0da50c4b00f57ee4038bcfbb327 | refs/heads/master | 2021-01-19T14:24:20.719137 | 2018-05-30T08:45:37 | 2018-05-30T08:45:37 | 88,159,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,181 | py | __author__ = 'pg'
from openpyxl import *
from movc.province_data import *
from all7.all7_consts import *
import re
class All7:
def __init__(self, template_file_path, year, province):
self.year = int(year)
self.template = load_workbook(template_file_path)
self.province = province
def load_xl(self, movc_xl_path):
self.movc = load_workbook(movc_xl_path, data_only=True)
self.sheets = self.movc.get_sheet_names()
if self.sheets[-1] == 'Riepilogo':
self.sheets = self.sheets[0:-1]
def get_alberghi(self, type='arrivi', category='residenti'):
tot = 0
for sheet_name in self.sheets:
current_sheet = self.movc.get_sheet_by_name(sheet_name)
# print(current_sheet.title)
RANGE = RESIDENTS_RANGE if category == 'residenti' else NON_RESIDENTS_RANGE
for i in RANGE:
idx = 'O' + str(i) if type == 'arrivi' else 'P' + str(i)
# print(current_sheet[idx].value)
tot += int(current_sheet[idx].value)
#print(tot)
return(tot)
def get_alloggi(self, type='arrivi', category='residenti'):
tot = 0
#sheet_name = self.sheets[0]
for sheet_name in self.sheets:
current_sheet = self.movc.get_sheet_by_name(sheet_name)
# print(current_sheet.title)
RANGE = RESIDENTS_RANGE if category == 'residenti' else NON_RESIDENTS_RANGE
for row_idx in RANGE:
for k in ALLOGGI_DICT.keys():
value_idx = 0 if (type == 'arrivi') else 1
idx = ALLOGGI_DICT[k][value_idx] + str(row_idx)
tot += current_sheet[idx].value
#print("row id: " + str(row_idx) + " " + str(tot_arrivi))
return(tot)
def get_campeggi(self, type="arrivi", category='residenti'):
tot = 0
for sheet_name in self.sheets:
current_sheet = self.movc.get_sheet_by_name(sheet_name)
RANGE = RESIDENTS_RANGE if category == 'residenti' else NON_RESIDENTS_RANGE
for row_idx in RANGE:
for k in CAMPEGGI_DICT.keys():
value_idx = 0 if (type == "arrivi") else 1
idx = CAMPEGGI_DICT[k][value_idx] + str(row_idx)
# print(idx)
tot += current_sheet[idx].value
return(tot)
def get_altri_alloggi(self, type='arrivi', category='residenti'):
tot = 0
for sheet_name in self.sheets:
current_sheet = self.movc.get_sheet_by_name(sheet_name)
RANGE = RESIDENTS_RANGE if category == 'residenti' else NON_RESIDENTS_RANGE
for row_idx in RANGE:
for k in ALTRI_ALLOGGI_DICT.keys():
value_idx = 0 if (type == "arrivi") else 1
idx = ALTRI_ALLOGGI_DICT[k][value_idx] + str(row_idx)
# print(idx)
tot += current_sheet[idx].value
return(tot)
def get_giornate_letto(self):
tot = 0
for sheet_name in self.sheets:
# print(sheet_name)
current_sheet = self.movc.get_sheet_by_name(sheet_name)
idx = 'P11'
tot += int(current_sheet[idx].value)
return(tot)
def get_giornate_camere(self, type = 'disponibili'):
tot = 0
idx = 'P12' if (type == 'disponibili') else 'P13'
for sheet_name in self.sheets[0:3]:
# print(sheet_name)
current_sheet = self.movc.get_sheet_by_name(sheet_name)
tot += int(current_sheet[idx].value)
return(tot)
def build_xl(self, movc_dir, output_file):
template_sheets = self.template.get_sheet_names()
file_names = []
buffer_residents = []
buffer_no_residents = []
totali_res = [0]*8
totali_no_res = [0]*8
totali = [0]*8
totali_giornate = [0]*3
try:
file_names = os.listdir(movc_dir)
except NotADirectoryError():
print("Wrong directory! Please enter the correct movc directory name")
return
self.check_movc_files(movc_dir)
for file in file_names:
file_path = os.path.join(movc_dir, file)
self.load_xl(file_path)
if(not(re.search(self.check_province(self.movc), self.province)) and int(self.check_year(self.movc))!= self.year):
# if (self.check_province(self.movc)!= self.province and self.check_year(self.movc) != self.year):
print("Wrong province or year selected!")
return
month = self.movc.active['A3'].value
#### Residents
idx_res = ALL7_RESIDENTS_RANGE[MONTHS_DICT[month]]
arrivi_alberghi_res = self.get_alberghi()
presenze_alberghi_res = self.get_alberghi(type='presenze')
arrivi_alloggi_res = self.get_alloggi()
presenze_alloggi_res = self.get_alloggi(type='presenze')
arrivi_campeggi_res = self.get_campeggi()
presenze_campeggi_res = self.get_campeggi(type = 'presenze')
arrivi_altri_alloggi_res = self.get_altri_alloggi()
presenze_altri_alloggi_res = self.get_altri_alloggi(type='presenze')
buffer_residents = [arrivi_alberghi_res, presenze_alberghi_res, arrivi_alloggi_res, presenze_alloggi_res, arrivi_campeggi_res, presenze_campeggi_res,
arrivi_altri_alloggi_res, presenze_altri_alloggi_res]
totali_res = [sum(x) for x in zip(totali_res, buffer_residents)]
###NON Residents
idx_no_res = ALL7_NON_RESIDENTS_RANGE[MONTHS_DICT[month]]
arrivi_alberghi_no_res = self.get_alberghi(category='non residenti')
presenze_alberghi_no_res = self.get_alberghi(type='presenze', category='non residenti')
arrivi_alloggi_no_res = self.get_alloggi(category='non residenti')
presenze_alloggi_no_res = self.get_alloggi(type='presenze', category='non residenti')
arrivi_campeggi_no_res = self.get_campeggi(category='non residenti')
presenze_campeggi_no_res = self.get_campeggi(type='presenze', category='non residenti')
arrivi_altri_alloggi_no_res = self.get_altri_alloggi(category='non residenti')
presenze_altri_alloggi_no_res = self.get_altri_alloggi(type='presenze', category='non residenti')
buffer_no_residents = [arrivi_alberghi_no_res, presenze_alberghi_no_res, arrivi_alloggi_no_res, presenze_alloggi_no_res, arrivi_campeggi_no_res, presenze_campeggi_no_res,
arrivi_altri_alloggi_no_res, presenze_altri_alloggi_no_res]
totali_no_res = [sum(x) for x in zip(totali_no_res, buffer_no_residents)]
totali = [sum(z) for z in zip(totali_res, totali_no_res)]
self.template.active = 0
buff_idx = 0
for c in ALL7_COL1:
self.template.active.cell(row=idx_res, column=c, value=buffer_residents[buff_idx])
self.template.active.cell(row=idx_no_res, column=c, value=buffer_no_residents[buff_idx])
buff_idx += 1
### Totals for residents and non residents
tot_idx = 0
for c in ALL7_COL1:
self.template.active.cell(row = TOT_RES_IDX, column=c, value=totali_res[tot_idx])
self.template.active.cell(row=TOT_NO_RES_IDX, column=c, value=totali_no_res[tot_idx])
self.template.active.cell(row=TOT_IDX, column=c, value=totali[tot_idx])
tot_idx += 1
### Available bed days
self.template.active = 1
giornate_letto = self.get_giornate_letto()
giornate_camere = self.get_giornate_camere()
giornate_camere_occupate = self.get_giornate_camere(type='occupate')
buffer_giornate = [giornate_letto, giornate_camere, giornate_camere_occupate]
totali_giornate = [sum(x) for x in zip(totali_giornate, buffer_giornate)]
day_idx = ALL7_DAYS_RANGE[MONTHS_DICT[month]]
g_idx = 0
for c in ALL7_COL2:
self.template.active.cell(row = day_idx, column = c, value = buffer_giornate[g_idx])
g_idx += 1
tot_day_idx = 0
for c in ALL7_COL2:
self.template.active.cell(row=TOT_DAYS_IDX, column = c, value = totali_giornate[tot_day_idx])
tot_day_idx += 1
print("...\n")
self.template.save(output_file)
def check_movc_files(self, movc_dir):
if (not os.path.isdir(movc_dir)):
print("invalid movc directory!")
return
files = os.listdir(movc_dir)
if len(files) != NUM_OF_MOVC:
print("invalid number of movc modules")
return
def check_province(self, workbook):
ws = workbook.active
province = ws[PROVINCE_CELL].value
return province
def check_year(self, workbook):
ws = workbook.active
year = ws[YEAR_CELL].value
return year | [
"palla.pg@gmail.com"
] | palla.pg@gmail.com |
952498fe3ce65449fb818515ea9a956611e27c3a | 37f48a90a33015a6e51d8b4ad839f5741a0c320f | /NoSQL_Cassandra/4_where_clause.py | 68a612d995a381451d49b6fabe6b8caf595c9534 | [] | no_license | Hadryan/Data_Engineering | 90376170a9a6a9700d1a1f32ea4b6efe6cdcbd98 | f02db4f2ffb592277b44c2807884443c910725b1 | refs/heads/master | 2020-12-14T11:01:33.399933 | 2019-12-21T15:36:33 | 2019-12-21T15:36:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,248 | py | # Since NoSQL has no JOINs, where becomes imperative
import cassandra
from cassandra.cluster import Cluster
print('create connection to database \n')
try:
cluster = Cluster(['127.0.0.1'])
session = cluster.connect()
except Exception as e:
print(e)
print('create keyspace/database \n')
try:
session.execute("""
CREATE KEYSPACE IF NOT EXISTS udacity
WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}""")
except Exception as e:
print(e)
# connect to key space
print('connect to key space \n')
try:
session.set_keyspace('udacity')
except Exception as e:
print(e)
# create table with query impression : 4 queries
# query 1 = all albums in a given year
# query 2 = album realeased by 'The Beatles'
# query 3 = select city from year=1970 & artist_name=The Beatles
print('create table \n')
query = "CREATE TABLE IF NOT EXISTS songs_library "
query = query + \
'(year int, artist_name text, album_name text, city text, PRIMARY KEY (year, artist_name, album_name))'
try:
session.execute(query)
except Exception as e:
print(e)
# Insert 5 rows
print('insert rows \n')
query = "INSERT INTO songs_library (year, artist_name, album_name, city)"
query = query + "values(%s, %s, %s, %s)"
try:
session.execute(query, (1970, "The Beatles", "Let It Be", 'Liverpool'))
except Exception as e:
print(e)
try:
session.execute(query, (1965, "The Beatles", "Rubber Soul", 'Oxford'))
except Exception as e:
print(e)
try:
session.execute(query, (1965, "The Who", "My Generation", 'London'))
except Exception as e:
print(e)
try:
session.execute(query, (1966, "The Monkees", "The Monkees", 'Los Angeles'))
except Exception as e:
print(e)
try:
session.execute(query, (1970, "The Carpenters",
"Close To You", 'San Diego'))
except Exception as e:
print(e)
# validate that data was inserted
print('query 1 = all albums in a given year=1970 \n')
query = "SELECT * FROM songs_library WHERE year=1970"
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print(row.year, row.artist_name, row.album_name, row.city)
print("\n query 2 = album realeased by 'The Beatles' where year=1970 \n")
query = "SELECT * FROM songs_library WHERE year=1970 AND artist_name='The Beatles' "
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print(row.year, row.artist_name, row.album_name, row.city)
print("\n query 3 = album released year=1970 AND artist_name='The Beatles' AND album_name='Let IT BE' \n ")
query = "SELECT city FROM songs_library WHERE year = 1970 AND artist_name = 'The Beatles' AND album_name = 'Let It Be' "
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print(row.city)
# drop table
print("\n drop table \n")
query = "DROP TABLE songs_library"
try:
rows = session.execute(query)
except Exception as e:
print(e)
# close session & cluster connection
print('close session & connection \n')
session.shutdown()
cluster.shutdown()
| [
"noreply@github.com"
] | noreply@github.com |
16671546e785ab5c884f8db12fe6dd201b9ae9e7 | a3b36f0c29241e5d4f96b1bfb06a1edf7f44117d | /organizer/organizer/asgi.py | 9373a72befcbee44b38a86c37c6bb1210b09ee3f | [] | no_license | druidmaciek/skillhq | 48d75057aaa6b68d62725a3a375bad467339cadc | 6be97540a32e58bcbd7273932ce3d1074b1c409e | refs/heads/master | 2022-11-30T23:44:42.585938 | 2020-08-06T21:14:53 | 2020-08-06T21:14:53 | 280,944,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
ASGI config for organizer project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "organizer.settings")
application = get_asgi_application()
| [
"maciekjanowski42@icloud.com"
] | maciekjanowski42@icloud.com |
046a8a61f00b1937867df9402f663f9cc3f5cff4 | ebf50a1aa0aa84a020a1e5a7dfdc8b4428e1eeb7 | /reminder_app/manage.py | 034315700cc1cb26ce8196c37cc1ca2740b5fd24 | [] | no_license | puchmichal/reminder_app | c487d5304b3c4fec52ab6f5031fdc9c72011fbee | 3aa6ff4f1f291f3ee763a3cb7119959cbffdb48f | refs/heads/master | 2020-06-07T20:21:29.392208 | 2019-06-25T19:35:51 | 2019-06-25T19:35:51 | 193,086,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reminder_app.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"alicja.kocieniewska@op.pl"
] | alicja.kocieniewska@op.pl |
823cdff25f1566335481806d80b1386c3ff00042 | 7b568c2420bb13be6978305d908e9b9561da5695 | /Barcos.py | a28b7510cb1bd1c38b3d94c252e111a596c3550e | [] | no_license | alexdjukic/test | 691d930add0e73e4de4f1d3ee02ca683e87e79bd | 290be80a93ca745185b8b923d8cddb5d6b48524b | refs/heads/master | 2021-03-15T13:00:26.079188 | 2020-03-12T14:23:29 | 2020-03-12T14:23:29 | 246,852,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | class Barcos:
def __init__(self,pos_x,pos_y,vidas):
self.pos_x = pos_x
self.pos_y = pos_y
self.vidas = vidas | [
"alejandrodjukic99@gmail.com"
] | alejandrodjukic99@gmail.com |
4088843b646eab6f6b40d2158cddb8ac622154dd | f0acc407f95b758fa734f5ed5f6506a8b20d2706 | /tests/test_tutorial/test_options/test_name/test_tutorial004_an.py | 087b436d55d07adedb8c0365657f3f42ab29d946 | [
"MIT"
] | permissive | shnups/typer | ede6d86c5b169e8caa7823b0552f8531ed041f84 | e0b207f3f577cb2e59fdd60da39686a2f5ed0e77 | refs/heads/master | 2023-08-31T01:54:21.168547 | 2023-08-01T09:36:09 | 2023-08-01T09:36:09 | 313,047,732 | 0 | 0 | MIT | 2020-11-15T14:22:06 | 2020-11-15T14:22:05 | null | UTF-8 | Python | false | false | 1,018 | py | import subprocess
import sys
import typer
from typer.testing import CliRunner
from docs_src.options.name import tutorial004_an as mod
runner = CliRunner()
app = typer.Typer()
app.command()(mod.main)
def test_option_help():
result = runner.invoke(app, ["--help"])
assert result.exit_code == 0
assert "-n" in result.output
assert "--user-name" in result.output
assert "TEXT" in result.output
assert "--name" not in result.output
def test_call():
result = runner.invoke(app, ["-n", "Camila"])
assert result.exit_code == 0
assert "Hello Camila" in result.output
def test_call_long():
result = runner.invoke(app, ["--user-name", "Camila"])
assert result.exit_code == 0
assert "Hello Camila" in result.output
def test_script():
result = subprocess.run(
[sys.executable, "-m", "coverage", "run", mod.__file__, "--help"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
assert "Usage" in result.stdout
| [
"noreply@github.com"
] | noreply@github.com |
02840fe2d32009474eb5ee755a0eebc44dec3007 | 80444ee4732e227e28e392d0cedbabc47445c32f | /Scripts/Tutorials/20-11-30-MLM_StableGAN.py | 7904e788b6a02c30f836e9a92ebd70e5625f083a | [] | no_license | canafarci/MARCH_Repo | 1432a9f4b99f99bc30280ef8a0a8035f62cd5fc1 | 5a061e8dd65c8bef66d8ec6883eba1ed4ba1b57a | refs/heads/main | 2023-06-04T22:59:24.678920 | 2021-06-23T19:43:21 | 2021-06-23T19:43:21 | 320,902,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,060 | py | from os import makedirs
from numpy import expand_dims
from numpy import zeros
from numpy import ones
from numpy.random import randn
from numpy.random import randint
from keras.datasets.mnist import load_data
from keras.optimizers import Adam
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Reshape
from keras.layers import Flatten, Dropout
from keras.layers import Conv2D
from keras.layers import Conv2DTranspose
from keras.layers import LeakyReLU
from keras.layers import BatchNormalization
from keras.initializers import RandomNormal
from matplotlib import pyplot
# define the standalone discriminator model
def define_discriminator(in_shape=(28,28,1)):
# weight initialization
init = RandomNormal(stddev=0.02)
# define model
model = Sequential()
# downsample to 14x14
model.add(Conv2D(16, (4,4), strides=(2,2), padding="same", kernel_initializer=init, input_shape=in_shape))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
# downsample to 7x7
model.add(Conv2D(16, (3,3), strides=(2,2), padding="same", kernel_initializer=init))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
# classifier
model.add(Flatten())
model.add(Dense(1, activation="sigmoid"))
# compile model
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
return model
# define the standalone generator model
def define_generator(latent_dim):
# weight initialization
init = RandomNormal(stddev=0.02)
# define model
model = Sequential()
# foundation for 7x7 image
n_nodes = 128 * 7 * 7
model.add(Dense(n_nodes, kernel_initializer=init, input_dim=latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(Reshape((7, 7, 128)))
# upsample to 14x14
model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding="same", kernel_initializer=init))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.2))
# upsample to 28x28
model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding="same", kernel_initializer=init))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.2))
# output 28x28x1
model.add(Conv2D(1, (7,7), activation="tanh", padding="same", kernel_initializer=init))
return model
# define the combined generator and discriminator model, for updating the generator
def define_gan(generator, discriminator):
# make weights in the discriminator not trainable
discriminator.trainable = False
# connect them
model = Sequential()
# add generator
model.add(generator)
# add the discriminator
model.add(discriminator)
# compile model
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(loss="binary_crossentropy", optimizer=opt)
return model
# load mnist images
def load_real_samples():
# load dataset
(trainX, trainy), (_, _) = load_data()
# expand to 3d, e.g. add channels
X = expand_dims(trainX, axis=-1)
# select all of the examples for a given class
selected_ix = trainy == 8
X = X[selected_ix]
# convert from ints to floats
X = X.astype("float32")
# scale from [0,255] to [-1,1]
X = (X - 127.5) / 127.5
return X
# select real samples
def generate_real_samples(dataset, n_samples):
# choose random instances
ix = randint(0, dataset.shape[0], n_samples)
# select images
X = dataset[ix]
# generate class labels
y = ones((n_samples, 1))
return X, y
# generate points in latent space as input for the generator
def generate_latent_points(latent_dim, n_samples):
# generate points in the latent space
x_input = randn(latent_dim * n_samples)
# reshape into a batch of inputs for the network
x_input = x_input.reshape(n_samples, latent_dim)
return x_input
# use the generator to generate n fake examples, with class labels
def generate_fake_samples(generator, latent_dim, n_samples):
# generate points in latent space
x_input = generate_latent_points(latent_dim, n_samples)
# predict outputs
X = generator.predict(x_input)
# create class labels
y = zeros((n_samples, 1))
return X, y
# generate samples and save as a plot and save the model
def summarize_performance(step, g_model, latent_dim, n_samples=100):
# prepare fake examples
X, _ = generate_fake_samples(g_model, latent_dim, n_samples)
# scale from [-1,1] to [0,1]
X = (X + 1) / 2.0
# plot images
for i in range(10 * 10):
# define subplot
pyplot.subplot(10, 10, 1 + i)
# turn off axis
pyplot.axis("off")
# plot raw pixel data
pyplot.imshow(X[i, :, :, 0], cmap="gray_r")
# save plot to file
pyplot.savefig("__ganResults\\StableGAN\\results_baseline\\generated_plot_%03d.png" % (step+1))
pyplot.close()
# save the generator model
g_model.save("_models\\_StableGAN\\results_baseline\\model_%03d.h5" % (step+1))
# create a line plot of loss for the gan and save to file
def plot_history(d1_hist, d2_hist, g_hist, a1_hist, a2_hist):
# plot loss
pyplot.subplot(2, 1, 1)
pyplot.plot(d1_hist, label="✬d-real✬")
pyplot.plot(d2_hist, label="✬d-fake✬")
pyplot.plot(g_hist, label="✬gen✬")
pyplot.legend()
# plot discriminator accuracy
pyplot.subplot(2, 1, 2)
pyplot.plot(a1_hist, label="✬acc-real✬")
pyplot.plot(a2_hist, label="✬acc-fake✬")
pyplot.legend()
# save plot to file
pyplot.savefig("__ganResults\\StableGAN\\results_baseline\\plot_line_plot_loss.png")
pyplot.close()
# train the generator and discriminator
def train(g_model, d_model, gan_model, dataset, latent_dim, n_epochs=10, n_batch=128):
# calculate the number of batches per epoch
bat_per_epo = int(dataset.shape[0] / n_batch)
# calculate the total iterations based on batch and epoch
n_steps = bat_per_epo * n_epochs
# calculate the number of samples in half a batch
half_batch = int(n_batch / 2)
# prepare lists for storing stats each iteration
d1_hist, d2_hist, g_hist, a1_hist, a2_hist = list(), list(), list(), list(), list()
# manually enumerate epochs
for i in range(n_steps):
# get randomly selected ✬real✬ samples
X_real, y_real = generate_real_samples(dataset, half_batch)
# update discriminator model weights
d_loss1, d_acc1 = d_model.train_on_batch(X_real, y_real)
# generate ✬fake✬ examples
X_fake, y_fake = generate_fake_samples(g_model, latent_dim, half_batch)
# update discriminator model weights
d_loss2, d_acc2 = d_model.train_on_batch(X_fake, y_fake)
# prepare points in latent space as input for the generator
X_gan = generate_latent_points(latent_dim, n_batch)
# create inverted labels for the fake samples
y_gan = ones((n_batch, 1))
# update the generator via the discriminator✬s error
g_loss = gan_model.train_on_batch(X_gan, y_gan)
# summarize loss on this batch
print(">%d, d1=%.3f, d2=%.3f g=%.3f, a1=%d, a2=%d" % (i+1, d_loss1, d_loss2, g_loss, int(100*d_acc1), int(100*d_acc2)))
# record history
d1_hist.append(d_loss1)
d2_hist.append(d_loss2)
g_hist.append(g_loss)
a1_hist.append(d_acc1)
a2_hist.append(d_acc2)
# evaluate the model performance every ✬epoch✬
if (i+1) % bat_per_epo == 0:
summarize_performance(i, g_model, latent_dim)
plot_history(d1_hist, d2_hist, g_hist, a1_hist, a2_hist)
# size of the latent space
latent_dim = 50
# create the discriminator
discriminator = define_discriminator()
# create the generator
generator = define_generator(latent_dim)
# create the gan
gan_model = define_gan(generator, discriminator)
# load image data
dataset = load_real_samples()
print(dataset.shape)
# train model
train(generator, discriminator, gan_model, dataset, latent_dim)
| [
"ismetberke@gmail.com"
] | ismetberke@gmail.com |
b89e80dfa0cc39a704abda127e10ede67b971951 | 9cdab664a653b70d61d4d453d5fd3e1e5f37af62 | /89.py | 3552c4a9fb3e1394952f3fea640c96e81619b31b | [] | no_license | Devikd/devi | 5ca77ad2a60470b470434cd13c1cbf87757ec0f1 | 60f0f7cdb09c43456003f47c837a494545c19e7f | refs/heads/master | 2020-12-25T15:17:40.053844 | 2019-01-23T06:32:24 | 2019-01-23T06:32:24 | 66,335,503 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 58 | py | print("".join(sorted(sorted(input()), key=string.upper)))
| [
"noreply@github.com"
] | noreply@github.com |
789b54608551ecc1777ca44be7846156b06717dd | c44e80bf9d3b12f22d87dabf005eda6e76ef651e | /tango_with_django_project/rango/migrations/0003_auto_20170121_0107.py | 1b7ebdd89200944e22e7a0ad414ed984652a11e5 | [] | no_license | TasosAg/rango | ba636fd45bc0934f4f50c23c3577e8f09d5cbda1 | 65677a77e8addd3a91ba7deb3339f35718a01c0c | refs/heads/master | 2021-01-11T18:33:24.550363 | 2017-01-24T02:20:32 | 2017-01-24T02:20:32 | 79,568,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-21 01:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rango', '0002_auto_20170121_0057'),
]
operations = [
migrations.AddField(
model_name='category',
name='likes',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='category',
name='views',
field=models.IntegerField(default=0),
),
]
| [
"tasosagathokleous@gmail.com"
] | tasosagathokleous@gmail.com |
0369784a34cd085bb1a8b0d7a1105c483726a2dd | 552a29d8b2f8e4a035ae88275e02978e7c350970 | /train/views.py | 72be4944301568942767f01ef10c801f14db3e11 | [] | no_license | arunraj753/chatbot | be6906acbfd557a5a2665bafe851da3cdb9a6478 | 444a72995ae907c2e0fbc644144a051a3c5c8d6f | refs/heads/main | 2023-03-03T10:33:02.029447 | 2021-02-04T07:32:10 | 2021-02-04T07:32:10 | 333,600,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,461 | py | from django.shortcuts import render
from rest_framework.response import Response
from rest_framework.decorators import api_view
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import json
from .base import NueralNetWeb,tokenize,stem,bag_of_words
def loadCustomerJSON():
with open('cust_intents.json', 'r') as f:
return json.load(f)
def loadAuthJSON():
with open('auth_intents.json', 'r') as f:
return json.load(f)
def loadAdminJSON():
with open('admin_intents.json','r') as f:
return json.load(f)
@api_view(['GET'])
def train(request):
cust_intents = loadCustomerJSON()
auth_intents = loadAuthJSON()
admin_intents = loadAdminJSON()
intents_dict = {"cust_data":cust_intents,"auth_data":auth_intents,"admin_data":admin_intents}
for key in intents_dict:
intents = intents_dict[key]
all_words = []
tags = []
xy = []
for intent in intents['intents']:
tag = intent['tag']
tags.append(tag)
for pattern in intent['patterns']:
w = tokenize(pattern)
all_words.extend(w)
xy.append((w, tag))
ignore_words = ['?', '.', '!']
stemmed_unique_words = [stem(w) for w in all_words if w not in ignore_words]
stemmed_unique_words = sorted(set(stemmed_unique_words))
tags=sorted(tags)
X_train = []
y_train = []
for (pattern_sentence, tag) in xy:
bag = bag_of_words(pattern_sentence, stemmed_unique_words)
X_train.append(bag)
label = tags.index(tag)
y_train.append(label)
X_train = np.array(X_train)
y_train = np.array(y_train)
print("Xtrain",X_train.shape,y_train.shape)
input_size = len(X_train[0])
output_size = len(tags)
hidden_size = 8
num_epochs = 1000
batch_size = 8
learning_rate = 0.001
class WebDataset(Dataset):
def __init__(self):
self.n_samples = len(X_train)
self.x_data = X_train
self.y_data = y_train
def __getitem__(self,index):
return self.x_data[index],self.y_data[index]
def __len__(self):
return self.n_samples
dataset = WebDataset()
train_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0)
basic_model = NueralNetWeb(input_size,hidden_size,output_size)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(basic_model.parameters(),lr=learning_rate)
for epoch in range(num_epochs):
for (words,labels) in train_loader:
outputs = basic_model(words)
loss =criterion(outputs,labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 100 == 0:
print (f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
print(f'final loss: {loss.item():.4f}')
print(f"Training COmplete - {key}")
basic_model_data = {
"model_state": basic_model.state_dict(),
"input_size": input_size,
"hidden_size": hidden_size,
"output_size": output_size,
"stemmed_unique_words": stemmed_unique_words,
"tags": tags
}
print(tags)
filename = key+'.pth'
FILE = filename
torch.save(basic_model_data, FILE)
print(f'Training complete. File saved to {FILE}')
return Response({'Bot':'Training complete. File saved'})
| [
"arunraj753@gmail.com"
] | arunraj753@gmail.com |
eb6a8f7da9c4bcaff2db10a52426f6a119af66c9 | a1c9c55e1520356113a320be18e8fcb31654a944 | /archive/0.9/generated/seaborn-violinplot-1.py | 87adeb3dba3ac608c3ceeb5db1a699327d0c16bb | [] | no_license | seaborn/seaborn.github.io | bac12a9255b41c7971e9e94ea393d372ef66ef62 | f70445bc3456f0216169806c2daf03452ca1eba4 | refs/heads/master | 2023-01-06T10:50:10.789810 | 2022-12-30T19:59:55 | 2022-12-30T19:59:55 | 70,731,605 | 16 | 5 | null | 2022-06-28T00:32:07 | 2016-10-12T18:56:12 | HTML | UTF-8 | Python | false | false | 123 | py | import seaborn as sns
sns.set(style="whitegrid")
tips = sns.load_dataset("tips")
ax = sns.violinplot(x=tips["total_bill"])
| [
"mwaskom@nyu.edu"
] | mwaskom@nyu.edu |
8f70fe0ad38628cf5ee741fd548122e15bd679dd | ed6591c2fe4d13c6f6f236b66f01c04ade8e717a | /rpi_sensehat_mqtt.py | f3cd620e152748a1deea45bbdd45fb6f70d89f1c | [] | no_license | mirkodcomparetti/rpi-sensehat_mqtt | 77e3113a1f13bb54dcd95cd70835e1b455668725 | d6fe4c50f287cad14f65815ec648bdfc0685b17d | refs/heads/main | 2023-02-27T12:09:52.388579 | 2021-02-06T14:29:28 | 2021-02-06T14:29:28 | 334,609,204 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,190 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# This scripts reads sensors from SenseHAT and streams them on MQTT
from sense_hat import SenseHat
import logging
import os
import paho.mqtt.client as mqtt
import uuid
import json
from rfc3986 import urlparse
import signal
from threading import Event
import socket
import time
class RpiSenseHatMqtt:
"""Main app."""
def __init__(self):
"""Init RpiSenseHatMqtt class."""
self.logger = logging.getLogger('rpi_sensehat_mqtt.RpiSenseHatMqtt')
self.initialized = False
topic_prefix = os.environ.get('RPI_SENSEHAT_MQTT_TOPIC_PREFIX', "sensehat")
self.topic_prefix = topic_prefix if topic_prefix.endswith("/") else (topic_prefix + "/")
self.logger.info("Begin initialize class RpiSenseHatMqtt")
self.logger.debug("Capturing signals")
signal.signal(signal.SIGINT, self.cleanup)
signal.signal(signal.SIGTERM, self.cleanup)
self.broker_url = None
self.broker_port = None
self.broker_user = None
if not self._validate_info(
os.environ.get('RPI_SENSEHAT_MQTT_BROKER', "mqtt://test.mosquitto.org:1883")
):
self.logger.error("Broker information not valid")
else:
self.logger.info("Initialize MQTT")
self.mqtt_client = mqtt.Client(client_id=str(uuid.uuid4()))
self.mqtt_client.on_connect = self._on_connect
self.mqtt_client.on_message = self._on_message
self.mqtt_client.on_publish = self._on_publish
self.hostname = socket.gethostname()
self.location = os.environ.get('RPI_SENSEHAT_MQTT_LOCATION', "studio")
self.measurement = os.environ.get('RPI_SENSEHAT_MQTT_MEASUREMENT', "environment")
self.logger.info("Initialize SenseHAT")
self.sense = SenseHat()
self.sense.clear()
self.streaming_cycle = int(os.environ.get('RPI_SENSEHAT_MQTT_CYCLE', 60))
self.streaming_exit = Event()
self.initialized = True
self.sense.show_message(os.environ.get('RPI_SENSEHAT_MQTT_WELCOME', "Loaded!"))
self.sense.low_light = True
self.logger.info("Done initialize class RpiSenseHatMqtt")
def cleanup(self, signum, frame):
self.logger.info("Cleanup")
self.streaming_exit.set()
if not self.initialized:
return None
if self.mqtt_client.is_connected():
self.mqtt_client.disconnect()
self.mqtt_client.loop_stop()
def _validate_info(self, broker_info):
self.logger.debug("Validating " + broker_info)
parseduri = urlparse(broker_info)
if not (parseduri.scheme in ["mqtt", "ws"]):
return False
self.broker_url = parseduri.host
self.broker_port = parseduri.port
self.broker_user = parseduri.userinfo
self.logger.debug("broker_user {}".format(self.broker_user))
self.logger.debug("broker_url {}, broker_port: {}".format(self.broker_url, self.broker_port))
if not (self.broker_url and self.broker_port):
return False
return True
def _on_connect(self, client, userdata, flags, rc):
self.logger.info("Connected with result code " + str(rc))
self.mqtt_client.subscribe(self.topic_prefix + "commands")
def _on_message(self, client, userdata, msg):
self.logger.debug(msg.topic + " " + str(msg.payload))
if msg.topic in [self.topic_prefix + "commands"]:
command = json.loads(msg.payload)
if 'ledwall' in command.keys():
self.logger.debug("Writing message on the LedWall: {}".format(command["ledwall"]))
self.sense.show_message(command["ledwall"])
def _on_publish(self, client, userdata, result):
pass
def connect(self):
if self.initialized and self.broker_url and self.broker_port:
self.logger.debug("{}:{}".format(self.broker_url, self.broker_port))
self.mqtt_client.connect(self.broker_url, self.broker_port, 30)
def _stream_sensors(self):
while not self.streaming_exit.is_set():
js_on_message = self._read_sensors()
js_on_message["measurement"] = self.measurement
js_on_message["source"] = self.hostname
js_on_message["location"] = self.location
js_on_message = json.dumps(js_on_message)
self.logger.debug("js_on_message {}".format(js_on_message))
self.mqtt_client.publish(self.topic_prefix + "readings", payload=js_on_message, qos=0, retain=False)
self.streaming_exit.wait(self.streaming_cycle)
def _read_sensors(self):
sensor_reading = {
"time": int(round(time.time() * 1000)),
"pressure": round(self.sense.get_pressure(), 3),
"temperature": {
"01": round(self.sense.get_temperature(), 3),
"02": round(self.sense.get_temperature_from_pressure(), 3),
},
"humidity": round(self.sense.get_humidity(), 3),
"acceleration": {
"x": round(self.sense.get_accelerometer_raw().get("x") * 9.80665, 3),
"y": round(self.sense.get_accelerometer_raw().get("y") * 9.80665, 3),
"z": round(self.sense.get_accelerometer_raw().get("z") * 9.80665, 3),
}
}
return sensor_reading
def start(self):
if not self.initialized:
return None
self.mqtt_client.loop_start()
self._stream_sensors()
logging.basicConfig(
filename='/var/log/rpi_broadcaster/rpi_sensehat_mqtt.log',
format='%(asctime)s.%(msecs)03d %(levelname)s\t[%(name)s] %(message)s',
datefmt='%Y-%m-%dT%H:%M:%S'
)
logger = logging.getLogger("rpi_sensehat_mqtt")
logger.setLevel(os.environ.get('RPI_SENSEHAT_MQTT_LOGLEVEL', logging.DEBUG))
if __name__ == "__main__":
# Start RpiSenseHatMqtt app
logger.info("Starting RpiSenseHatMqtt service")
root = RpiSenseHatMqtt()
root.connect()
logger.info("Run main loop - wait for stop signal")
root.start()
logger.info("Stopping main loop")
| [
"comparetti.mirko@gmail.com"
] | comparetti.mirko@gmail.com |
4408f2da3cc0458926f976eb6d208f94a4dbb331 | 05a2097cbc167c0d8cfde5a039600c6994a34232 | /custom/penn_state/constants.py | 74aac3cb2025c0fd4a0abd1312f7931d10a6287f | [] | no_license | shashanks/commcare-hq | 9c641a4d830cd523410be150c2d341c4edbce38a | 44c2bd56bcb746f1f6c7b624ddefbe4215fc791c | refs/heads/master | 2020-12-11T06:12:36.705418 | 2013-12-17T08:35:23 | 2013-12-17T08:35:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | DOMAIN = 'psu-legacy-together'
DAILY_DATA_XMLNS = 'http://openrosa.org/formdesigner/B6E92793-CB42-449C-ACE7-99B0E65FE3AE'
COACH_RESPONSE_XMLNS = 'http://openrosa.org/formdesigner/D42C8CAB-F17C-4E9C-921C-CA47E6AECE15'
WEEKLY_SCHEDULE_XMLNS = 'http://openrosa.org/formdesigner/F2F7A739-BDEF-4D14-B60F-371AFE901B71'
| [
"esoergel@gmail.com"
] | esoergel@gmail.com |
d43603f5c98e5e8ab97362f215fce6ccdbfaa8b2 | b43863a2d04e9000a411a488b42823092c65d067 | /skeleton_code/configure_and_build.py | 18d94139ada1d8c874bef3d1733221d15b8298ef | [] | no_license | elskorda/scifopyFinal | 3d4a60295361a77fa64c4e99f0db01785e3309f1 | 58ac035d555492c3e811dfc19cc19237f6e0c2ae | refs/heads/master | 2022-12-28T06:43:53.729754 | 2020-10-16T21:10:13 | 2020-10-16T21:10:13 | 296,747,966 | 0 | 0 | null | 2020-09-22T09:51:15 | 2020-09-18T23:17:02 | Fortran | UTF-8 | Python | false | false | 2,803 | py | #!/usr/bin/env python
import os, sys, subprocess, platform, shutil
pydistutils_cfg_template = """[build]
compiler=mingw32"""
def cmd(cmd):
cp = subprocess.run(cmd, shell=True, capture_output=True)
if platform.system() == "Windows":
return cp.stdout.split("\r\n")
else:
return cp.stdout.split("\n")
def cmd_l(cmd):
cp = subprocess.run(cmd, shell=True)
def create_build_dir(build_dir="build"):
if os.path.exists(build_dir):
print("Removing existing build directory")
shutil.rmtree("./%s" % build_dir)
os.mkdir(build_dir)
return os.path.abspath(build_dir)
def check_build_dir(build_dir="build"):
if not os.path.exists(build_dir):
print("No build directory. Please configure first.")
return ""
else:
return os.path.abspath(build_dir)
def setup_pydistutils_win():
user_profile_path = os.environ["USERPROFILE"]
with open(os.path.join(user_profile_path, "pydistutils.cfg"), "w") as f:
f.write(pydistutils_cfg_template)
def configure_and_build(build_dir="build"):
curr_cwd = os.getcwd()
os.chdir(build_dir)
if platform.system() == "Windows":
setup_pydistutils_win()
cmd_l('cmake -G"MinGW Makefiles" ..')
cmd_l('mingw32-make')
else:
cmd_l('cmake ..')
cmd_l('make')
os.chdir(curr_cwd)
def build(build_dir="build"):
curr_cwd = os.getcwd()
os.chdir(build_dir)
if platform.system() == "Windows":
cmd_l('mingw32-make')
else:
cmd_l('make')
os.chdir(curr_cwd)
def setup_run_dir(run_dir="bin"):
if os.path.exists(run_dir):
print("Removing existing run directory")
shutil.rmtree("./%s" % run_dir)
os.mkdir(run_dir)
return os.path.abspath(run_dir)
def copy_runtime_files(build_dir, run_dir):
if platform.system() == "Windows":
cmd_l("copy %s\\*.pyd %s" % (build_dir, run_dir))
cmd_l("copy %s\\*.dll %s" % (build_dir, run_dir))
cmd_l("copy %s\\*.exe %s" % (build_dir, run_dir))
else:
cmd_l("cp %s/*.so %s" % (build_dir, run_dir))
cmd_l("cp %s/particles %s" % (build_dir, run_dir))
if __name__ == "__main__":
if len(sys.argv)==1:
print("Configuring and building application...")
build_dir = create_build_dir()
print("Builddir:", build_dir)
configure_and_build(build_dir)
run_dir = setup_run_dir()
copy_runtime_files(build_dir, run_dir)
elif sys.argv[1]=="build":
print("Building application...")
build_dir = check_build_dir()
if build_dir == "":
sys.exit(-1)
print("Builddir:", build_dir)
build(build_dir)
run_dir = setup_run_dir()
copy_runtime_files(build_dir, run_dir)
| [
"eleni.skorda@cern.ch"
] | eleni.skorda@cern.ch |
b649bb21ea563e3765210bd62d99d5b730a5b950 | 8fb2668de046fb47ffb3e0964746b400e75b7c83 | /crawl/fake_spider/tushare/kData.py | 79515c39159d08946ce04bb198cc6e7d8deaf6af | [] | no_license | reinhardtken/backtest-py | 5d8f080861851882d954f4bb944a8d374220498e | 6d14b10918c018081ab228030d2b3ac38eea267c | refs/heads/master | 2020-12-06T17:01:33.284011 | 2020-02-11T15:07:42 | 2020-02-11T15:07:42 | 232,512,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,473 | py | # -*- encoding: utf-8 -*-
# sys
import json
import datetime
# thirdpart
import pandas as pd
import tushare as ts
from pymongo import MongoClient
# this project
##########################
import util.crawl as util
import const.crawl as const
#http://tushare.org/classifying.html#id8
# code :股票代码
# name :股票名称
# date :日期
# weight:权重
def getLastK(code):
end = util.today().strftime('%Y-%m-%d')
start = util.weekAgo().strftime('%Y-%m-%d')
try:
df = ts.get_k_data(code, start=start, end=end)
df.loc[:, 'date'] = pd.to_datetime(df.loc[:, 'date'])
df.set_index('date', inplace=True)
df.drop('code', axis=1, inplace=True)
return df
except Exception as e:
print(e)
def getKData(code, starts='2001-01-01'):
try:
df = ts.get_k_data(code, start=starts, index=False)
df.loc[:, 'date'] = pd.to_datetime(df.loc[:, 'date'])
df.set_index('date', inplace=True)
df.drop('code', axis=1, inplace=True)
return df
except Exception as e:
print(e)
def getKDataRecent(code):
try:
now = datetime.datetime.now()
starts = now - datetime.timedelta(days=15)
starts = starts.strftime('%Y-%m-%d')
df = ts.get_k_data(code, start=starts, index=False)
df.loc[:, 'date'] = pd.to_datetime(df.loc[:, 'date'])
df.set_index('date', inplace=True)
df.drop('code', axis=1, inplace=True)
return df
except Exception as e:
print(e)
def getKDataNoneRecent(code):
try:
now = datetime.datetime.now()
starts = now - datetime.timedelta(days=15)
starts = starts.strftime('%Y-%m-%d')
df = ts.get_k_data(code, start=starts, autype=None, index=False)
df.loc[:, 'date'] = pd.to_datetime(df.loc[:, 'date'])
df.set_index('date', inplace=True)
df.drop('code', axis=1, inplace=True)
return df
except Exception as e:
print(e)
def getKDataNone(code, starts='2001-01-01', index=False):
try:
df = ts.get_k_data(code, start=starts, autype=None, index=index)
df.loc[:, 'date'] = pd.to_datetime(df.loc[:, 'date'])
df.set_index('date', inplace=True)
df.drop('code', axis=1, inplace=True)
return df
except Exception as e:
print(e)
def saveDB(data: pd.DataFrame, code, handler=None):
def callback(result):
# handler.send_message(handler.project_name, result, self._date + '_' + result['_id'])
pass
re = util.updateMongoDB(data, util.genKeyCodeFunc('date'), const.KData.DB_NAME,
const.KData.COLLECTION_D_HEAD + code, True, callback)
# util.everydayChange(re, 'gpfh')
#这个是前复权
def RunOne(code, force=False):
#dblist = MongoClient.list_database_names()
client = MongoClient()
db = client['stock_all_kdata']
collectionLIst = db.list_collection_names()
if not force and code in collectionLIst:
print("exist {}".format(code))
else:
#如果强制更新,删除已有数据
if force and code in collectionLIst:
db.drop_collection(code)
re = getKData(code)
saveDB2(re, code)
def saveDB2(data: pd.DataFrame, code, handler=None):
def callback(result):
pass
util.updateMongoDB(data, util.genKeyCodeFunc('date'), "stock_all_kdata",
const.KData.COLLECTION_D_HEAD + code, True, callback)
#这个是不复权
def RunOneNone(code):
client = MongoClient()
db = client['stock_all_kdata_none']
collectionList = db.list_collection_names()
if code in collectionList:
print("exist {}".format(code))
else:
re = getKDataNone(code)
saveDB3(re, code)
#最近一个月的数据
def RunOneNoneRecent(code):
now = datetime.datetime.now()
starts = now - datetime.timedelta(days=31)
#starts = datetime.datetime(now.year, now.month, 1)
starts = starts.strftime('%Y-%m-%d')
re = getKDataNone(code, starts)
saveDB3(re, code)
def RunHS300IndexRecent():
now = datetime.datetime.now()
starts = now - datetime.timedelta(days=15)
# starts = datetime.datetime(now.year, now.month, 1)
starts = starts.strftime('%Y-%m-%d')
re = getKDataNone('000300', starts, index=True)
saveDB3(re, '000300')
def RunHS300Index():
re = getKDataNone('000300', starts='2001-01-01', index=True)
saveDB3(re, '000300')
def saveDB3(data: pd.DataFrame, code, handler=None):
def callback(result):
pass
util.updateMongoDB(data, util.genKeyCodeFunc('date'), "stock_all_kdata_none",
const.KData.COLLECTION_D_HEAD + code, True,
callback)
| [
"reinhardtken@hotmail.com"
] | reinhardtken@hotmail.com |
88e53339a9db9c2268bfe7d1a0e74a0f75ea5325 | c8b19983b149c8ac5418f745fd5c09d121542565 | /2021/Day9/main.py | a66112cb0d7696c136fa506145cc5bcaecf9ea21 | [] | no_license | nathan-castlehow/Advent-of-Code | 33eb144f54e308c6bdcd4e764c84d044c22f8821 | 9bf2c05f159d25d3c7b5477441da43265ce7de12 | refs/heads/master | 2022-12-12T16:09:00.297495 | 2022-12-03T12:35:04 | 2022-12-03T12:35:04 | 160,012,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | import os
from read_input import read_input_as_string
def part_one():
abs_file_path = os.path.join(os.path.dirname(__file__), "input")
input_data = read_input_as_string(abs_file_path)
height_map = [[int(height) for height in line] for line in input_data]
low_points = []
for y in range(0, len(height_map)):
for x in range(0, len(height_map[y])):
current_val = height_map[y][x]
if (
is_lower_location(x, y, x, y - 1, height_map)
and is_lower_location(x, y, x, y + 1, height_map)
and is_lower_location(x, y, x + 1, y, height_map)
and is_lower_location(x, y, x - 1, y, height_map)
):
low_points.append(current_val)
else:
pass
risk_level_total = sum(low_points) + len(low_points)
print(f"Total risk level: {risk_level_total}")
def is_lower_location(x, y, unsafe_x, unsafe_y, height_map):
return (
(not (0 <= unsafe_y < len(height_map) and 0 <= unsafe_x < len(height_map[unsafe_y])))
or (height_map[y][x] < height_map[unsafe_y][unsafe_x])
)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
part_one()
| [
"nathan_castlehow@me.com"
] | nathan_castlehow@me.com |
e352ffcfee2d7d2105a792b34e0d2a1a53fbdbe9 | 3c7ffa1bbc840ed65e46e9c0d750d8d5aaaf98ce | /microblog/config.py | f9aef962180c7d1929268ffe307be88fee9409be | [] | no_license | Aiumi/flask_app | c304716bbd4a5b741bf0899f63667b66b3dd2940 | cb42024646d1008ce58143bfc01c4fef60e3264a | refs/heads/master | 2020-04-24T21:27:48.186963 | 2019-03-23T23:37:24 | 2019-03-23T23:37:24 | 172,278,861 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = os.environ.get('MAIL_SERVER')
MAIL_PORT = int(os.environ.get('MAIL_PORT') or 25)
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
ADMINS = ['your-email@example.com']
POSTS_PER_PAGE = 25 | [
"brandonaiumiyen@gmail.com"
] | brandonaiumiyen@gmail.com |
c0b21454b14d5c5f5a92746deb05883f597628c0 | 25f6a4f4f52ed4f20545b1568629d20688f70d3c | /measurement/hostname2ip_processor.py | 80747165e9f89452bb6103baab350c245820cc66 | [
"MIT"
] | permissive | akashlevy/CDN-Measurement | 8ee6c4010e6e63d7606093f010d2e1bb06a2e3a0 | 9d862a9b8de62ebdcfc18daf04bc39b02952f413 | refs/heads/master | 2020-03-15T23:58:26.080163 | 2018-05-07T07:08:20 | 2018-05-07T07:08:20 | 132,404,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | import json
with open('hostname2ip_clean', 'w') as clean_out, open('ipaddrs', 'w') as ip_out:
for line in open('hostname2ip'):
spl = line.split()
for ip in spl[1:]:
clean_out.write('%s,%s\n' % (spl[0], ip))
ip_out.write('%s\n' % ip)
| [
"akashlevy@gmail.com"
] | akashlevy@gmail.com |
177511eb917f0c04de3ac00852473301adffedd1 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Scraper/scrapy/tests/test_command_version.py | f8c4ac141c2766133ad886ccb9a77791d7dbb1dc | [
"BSD-3-Clause"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:73dce6f404541d9151c420cb22ff641258ce3d66e825df13aa289ff4a5c1f1ad
size 1058
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
48fbf91914aa9dc043a228725ccfaf3904c3d812 | d363ee05c8946166b4bb0cc4d74c737d4f2d7f44 | /1.Tuples and sets/7.battles_of_names.py | 270b16c7177b7b072d0c81da1479bc30027afc80 | [] | no_license | Ivan-Ivanoff/SoftUni | bf0f2b8140fb9445774996eab4a672574dc3bc69 | d2348a3269435de20c377de463c2d97e23b41e64 | refs/heads/master | 2022-10-22T12:22:33.066671 | 2020-06-16T17:58:54 | 2020-06-16T17:58:54 | 268,321,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | N = int(input())
odd_set = set()
even_set = set()
for i in range(1, N + 1):
name = input()
the_sum = sum(ord(char) for char in name) // i
if the_sum % 2 == 0:
even_set.add(the_sum)
else:
odd_set.add(the_sum)
odd_sum = sum(odd_set)
even_sum = sum(even_set)
if odd_sum == even_sum:
union_values = odd_set.union(even_set)
print(", ".join([str(x) for x in union_values]))
elif odd_sum > even_sum:
different_values = odd_set.difference(even_set)
print(", ".join([str(x) for x in different_values]))
else:
symetric_values = odd_set.symmetric_difference(even_set)
print(", ".join([str(x) for x in symetric_values])) | [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.