blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0e2f70099440b615923d57dd03854b52d18673ef | bb9896781f055b83b97251f1a6c0e87b2f025ac6 | /news/migrations/0001_initial.py | e538de1f1454d9dfe4871b4abb9201e5ed7aea0c | [] | no_license | PeteWillmott/milestone4 | 0d2d7c7df60c6cf0cad508aa8347f2940f10a45a | da2cf03297618aea25b784988f7f521dac000cff | refs/heads/master | 2022-12-04T19:30:32.600926 | 2019-10-06T07:26:24 | 2019-10-06T07:26:24 | 204,655,484 | 0 | 0 | null | 2022-11-22T04:16:31 | 2019-08-27T08:22:50 | Python | UTF-8 | Python | false | false | 1,778 | py | # Generated by Django 2.2.5 on 2019-09-30 16:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('catalogue', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField(null=True)),
('pub_date', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('productID', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='catalogue.Catalogue')),
],
),
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField(null=True)),
('image', models.ImageField(blank=True, upload_to='images/')),
('pub_date', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'News',
},
),
]
| [
"bastior@yahoo.co.uk"
] | bastior@yahoo.co.uk |
250c4c8ab7899fc1b2ec647def496cdb675324f8 | 3c40ca2134b712506d636ac2216908d741281d8a | /mybb_online | 516db73003a87de442130541c4264cabaedd732f | [
"Unlicense"
] | permissive | Cameron-D/munin-mybb | 8f19872d04b25b28f86331fb7dbaa90b35857c6b | fa3a5b5d4030da8007685719aba1617c1ec83761 | refs/heads/master | 2021-01-25T10:43:45.623752 | 2013-10-12T10:30:49 | 2013-10-12T10:30:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | #!/usr/bin/env python
import sys, urllib2, re, os
# Check for configuration
board_url = os.environ.get('board_url')
if(board_url == None):
exit(0)
if(len(sys.argv) > 1 and sys.argv[1] == "config"):
print """
graph_title Online Users
graph_vlabel current users
graph_args -l 0
graph_category MyBB
graph_total Total
members.label Members
guests.label Guests
members.draw AREA
guests.draw STACK
"""
exit(0)
# Load the main page
response = urllib2.urlopen(board_url)
html = response.read()
# Extract the data
re_online = re.compile(r"([\d\,]+) members?, [\d\,]+ of whom (are|is) invisible, and ([\d\,]+) guests?")
online = re_online.search(html)
# Output values
if(online != None):
print ("members.value %s" % online.group(1)).replace(',', '')
print ("guests.value %s" % online.group(3)).replace(',', '') | [
"camerondew@live.com"
] | camerondew@live.com | |
73e17f513b68e689f3237af1b2aeec2bc805442b | 6c55eb879fd547bae1a8168f1fd884791e729876 | /MultiLabel_test.py | 1fee1c911c955d7b0ba136dde39beba4db123f30 | [] | no_license | yfhanhust/multilabel | b5ac8f6ab37d75d407a62d615bef736186b19684 | 61907d25991cadd4e8df181a82cfb8ea3ffa37d2 | refs/heads/master | 2021-01-20T09:57:23.711654 | 2018-01-12T09:27:47 | 2018-01-12T09:27:47 | 90,312,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,726 | py | import numpy as np
import scipy as sp
import downhill
import theano
from sklearn.metrics import roc_auc_score
def baselinePU(Y,label_loc,alpha,vlambda,kx):
#random_mat = np.random.random(Y.shape)
#label_loc = np.where(random_mat < label_fraction) ## locate the masked entries in the label matrix
#### print statistics
#print np.where(Y[label_loc] > 0)[0].shape[0] / float(np.where(Y > 0)[0].shape[0]) ## the ratio of "1" entries being masked
#print np.where(Y[label_loc] < 1)[0].shape[0] / float(np.where(Y < 1)[0].shape[0]) ## the ratio of "0" entries being masked
W = theano.shared(np.random.random((Y.shape[0],kx)),name='W')
H = theano.shared(np.random.random((Y.shape[1],kx)),name='H')
labelmask = np.ones(Y.shape)
labelmask[label_loc] = 0
Y_masked = Y.copy()
Y_masked[label_loc] = 0
reconstruction = theano.tensor.dot(W, H.T)
X_symbolic = theano.tensor.matrix(name="Y_masked", dtype=Y_masked.dtype)
difference = theano.tensor.sqr((X_symbolic - reconstruction)) * (1 - alpha)
positive_difference = theano.tensor.sqr((X_symbolic - reconstruction) * labelmask) * (2*alpha-1.)
mse = difference.mean() + positive_difference.mean()
loss = mse + vlambda * (W * W).mean() + vlambda * (H * H).mean()
downhill.minimize(
loss=loss,
train=[Y_masked],
patience=0,
algo='rmsprop',
batch_size=Y_masked.shape[0],
max_gradient_norm=1,
learning_rate=0.06,
min_improvement = 0.00001)
return W.get_value(),H.get_value()
def acc_label(Y,W,H,label_loc):
Y_reconstructed = np.dot(W,H.T)
ground_truth = Y[label_loc].tolist()
reconstruction = Y_reconstructed[label_loc].tolist()
auc_score = roc_auc_score(np.array(ground_truth),np.array(reconstruction))
return auc_score
def acc_feature(X,U,V,fea_loc):
X_reconstruction = U.dot(V.T)
return np.linalg.norm(X[fea_loc] - X_reconstruction[fea_loc])
def completionLR(X,kx,fea_loc,lambdaU,lambdaV):
mask = np.ones(X.shape)
mask[fea_loc] = 0.
#### Theano and downhill
U = theano.shared(np.random.random((X.shape[0],kx)),name='U')
V = theano.shared(np.random.random((X.shape[1],kx)),name='V')
X_symbolic = theano.tensor.matrix(name="X", dtype=X.dtype)
reconstruction = theano.tensor.dot(U, V.T)
difference = X_symbolic - reconstruction
masked_difference = difference * mask
err = theano.tensor.sqr(masked_difference)
mse = err.mean()
xloss = mse + lambdaU * (U * U).mean() + lambdaV * (V * V).mean()
#### optimisation
downhill.minimize(
loss= xloss,
train = [X],
patience=0,
algo='rmsprop',
batch_size=X.shape[0],
max_gradient_norm=1,
learning_rate=0.1,
min_improvement = 0.0001)
return U.get_value(),V.get_value()
def completionPUV(X,Y,fea_loc,label_loc,alpha,lambda0,lambda1,lambda2,delta,kx):
#delta = 0.3
### masking out some entries from feature and label matrix
mask = np.ones(X.shape)
mask[fea_loc] = 0.
labelmask = np.ones(Y.shape)
labelmask[label_loc] = 0
#### Theano and downhill
U = theano.shared(np.random.random((X.shape[0],kx)),name='U')
V = theano.shared(np.random.random((X.shape[1],kx)),name='V')
W = theano.shared(np.random.random((Y.shape[0],kx)),name='W')
H = theano.shared(np.random.random((Y.shape[1],kx)),name='H')
X_symbolic = theano.tensor.matrix(name="X", dtype=X.dtype)
reconstruction = theano.tensor.dot(U, V.T)
difference = X_symbolic - reconstruction
masked_difference = difference * mask
err = theano.tensor.sqr(masked_difference)
mse = err.mean()
xloss = mse + lambda0 * ((U * U).mean() + (V * V).mean())
Y_symbolic = theano.tensor.matrix(name="Y", dtype=Y.dtype)
Y_reconstruction = theano.tensor.dot(U, H.T)
Ydifference = theano.tensor.sqr((Y_symbolic - Y_reconstruction)) * (1 - alpha)
positive_difference = theano.tensor.sqr((Y_symbolic - Y_reconstruction) * labelmask) * (2*alpha-1.)
Ymse = Ydifference.mean() + positive_difference.mean()
global_loss = xloss + delta * Ymse + lambda1 * ((W * W).mean() + (H * H).mean()) + lambda2 * theano.tensor.sqr((U-W)).mean()
#### optimisation
downhill.minimize(
loss=global_loss,
train = [X,Y],
inputs = [X_symbolic,Y_symbolic],
patience=0,
algo='rmsprop',
batch_size=Y.shape[0],
max_gradient_norm=1,
learning_rate=0.1,
min_improvement = 0.0001)
return U.get_value(),V.get_value(),W.get_value(),H.get_value()
def TPAMI(X,Y,fea_loc_x,fea_loc_y,label_loc_x,label_loc_y,miu,lambda0,kx):
### X: feature matrix
### Y: label matrix
### fea_loc_x, fea_loc_y: masked entries in feature matrix
### label_loc_x, label_loc_y: masked entries in label matrix
### miu: regularisation parameter on matrix rank
### lambda0: regularisation parameter on label reconstruction
### kx: dimensionality of latent variables used for solving nuclear norm based regularisation
M = np.concatenate((Y,X),axis=1)
M = M.T
label_dim = Y.shape[1]
fea_dim = X.shape[1]
gamma = 15.
featuremask = np.ones(M.shape)
labelmask = np.ones(M.shape)
for i in range(len(label_loc_x)):
labelmask[label_loc_y[i],label_loc_x[i]] = 0.
for i in range(len(fea_loc_x)):
featuremask[fea_loc_y[i]+label_dim,fea_loc_x[i]] = 0.
#### Theano and downhill
U = theano.shared(np.random.random((M.shape[0],kx)),name='U')
V = theano.shared(np.random.random((M.shape[1],kx)),name='V')
#### feature loss
M_symbolic = theano.tensor.matrix(name="M", dtype=M.dtype)
reconstruction = theano.tensor.dot(U, V.T)
difference = M_symbolic - reconstruction
masked_difference = difference * featuremask
err = theano.tensor.sqr(masked_difference)
mse = err.mean()
xloss = (1./float(len(fea_loc_x))) * mse + miu * ((U * U).mean() + (V * V).mean())
#### label loss
label_reconstruction_kernel = -1 * gamma * (2 * M - 1) * (reconstruction - M)
label_reconstruction_difference = (1./gamma) * theano.tensor.log(1 + theano.tensor.exp(label_reconstruction_kernel)) * labelmask
label_err = (1./float(len(label_loc_x))) * label_reconstruction_difference.mean()
global_loss = xloss + lambda0 * label_err
#### optimisation
downhill.minimize(
loss=global_loss,
train = [M],
inputs = [M_symbolic],
patience=0,
algo='rmsprop',
batch_size= M.shape[0],
max_gradient_norm=1,
learning_rate=0.1,
min_improvement = 0.01)
return U.get_value(),V.get_value()
#### generate data
#### yeast: classes 14, data: 1500+917, dimensionality: 103
train_file = open('Mediamill_data.txt','r')
train_file_lines = train_file.readlines(100000000000000000)
train_file.close()
train_fea = np.zeros((43907,120),dtype=float)
train_label = np.zeros((43907,101),dtype=int)
for k in range(1,len(train_file_lines)):
data_segs = train_file_lines[k].split(' ')
label_line = data_segs[0]
labels = label_line.split(',')
if (len(labels) == 0) or (labels[0] == ''):
train_label[k-1,0] = 0
else:
for i in range(len(labels)):
train_label[k-1,int(labels[i])-1] = 1
for i in range(1,len(data_segs)):
fea_pair = data_segs[i].split(':')
fea_idx = int(fea_pair[0])
fea_val = float(fea_pair[1])
train_fea[k-1,fea_idx] = fea_val
### test
gd_reconstruction_error_list = []
gd_auc_score_list = []
reconstruction_error_list = []
auc_score_list = []
kx = 10
alpha = (1. + 0.5)/2
fea_fraction = 0.8
label_fraction = 0.8
for lambda0 in [10,1,0.1,0.01]:
for lambda1 in [10,1,0.1,0.01]:
for lambda2 in [10,1,0.1,0.01]:
for delta in [10,1,0.1]:
for iround in range(10): ### repeat for 10 times
fea_mask = np.random.random(train_fea.shape)
fea_loc = np.where(fea_mask < fea_fraction)
random_mat = np.random.random(train_label.shape)
label_loc = np.where(random_mat < label_fraction) ## locate the masked entries in the label matrix
W_pu,H_pu = baselinePU(train_label,label_loc,alpha,lambda1,kx)
auc_score = acc_label(train_label,W_pu,H_pu,label_loc)
gd_auc_score_list.append(auc_score)
U,V,W,H = completionPUV(train_fea,train_label,fea_loc,label_loc,alpha,lambda0,lambda1,lambda2,delta,kx) #(X,Y,fea_loc,label_loc,alpha,lambda0,lambda1,lambda2,delta,kx)
auc_score = acc_label(train_label,W,H,label_loc)
reconstruction_error = acc_feature(train_fea,U,V,fea_loc)
auc_score_list.append(auc_score)
reconstruction_error_list.append(reconstruction_error)
U_lr, V_lr = completionLR(train_fea,kx,fea_loc,lambda0,lambda0)
reconstruction_error = acc_feature(train_fea,U_lr,V_lr,fea_loc)
gd_reconstruction_error_list.append(reconstruction_error)
parameters_setting = []
for lambda0 in [10,1,0.1,0.01]:
for lambda1 in [10,1,0.1,0.01]:
for lambda2 in [10,1,0.1,0.01]:
for delta in [10,1,0.1]:
parameters_setting.append((lambda0,lambda1,lambda2,delta))
import pickle
with open('results_15.pickle','wb') as f:
pickle.dump([gd_reconstruction_error_list,gd_auc_score_list,reconstruction_error_list,auc_score_list,parameters_setting],f)
| [
"yfhan.hust@gmail.com"
] | yfhan.hust@gmail.com |
716119ca0680e969a5c9b15d2f93c196e377873b | 7b4e9342d42be2b55af5dc23a8abedd672d68e99 | /MobileApps/libs/flows/web/jweb/eventing_plugin.py | 83d4c6d1bad5eef4658ff26f41ebc08452999a87 | [] | no_license | Amal548/QAMA | af5bb335c92a90b461f1ee9a3870435d83d46802 | b5230c51d3bc7bb04b3448d1a1fe5a076d8898d5 | refs/heads/master | 2023-07-12T09:17:04.624677 | 2021-08-06T08:01:11 | 2021-08-06T08:01:11 | 389,595,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,065 | py | from MobileApps.libs.flows.web.jweb.jweb_flow import JwebFlow
import json
class EventingPlugin(JwebFlow):
flow_name = "eventing_plugin"
########################################################################################################################
# #
# ACTION FLOWS #
# #
########################################################################################################################
def select_eventing_dispatch_open(self):
"""
clicks the eventing dispatch open item
:return:
"""
self.driver.click("eventing_dispatch_open_item")
def select_eventing_dispatch_open(self):
"""
clicks the eventing dispatch close item
:return:
"""
self.driver.click("eventing_dispatch_close_item")
def select_eventing_plugin_test(self):
"""
clicks the eventing plugin test button
:return:
"""
self.driver.swipe(direction="up")
self.driver.click("eventing_test_button")
def eventing_test_result(self):
"""
:return: eventing test result text
"""
return self.driver.wait_for_object("eventing_test_result_txt").text
def add_listener_multiple_event_results(self):
"""
:return: add multiple event result text
"""
return self.driver.wait_for_object("multiple_event_result_text").text
def add_listener_event_result(self):
"""
:return: add listener test result
"""
return json.loads(self.driver.get_attribute(obj_name="add_listener_test_result_txt", attribute="value"))
def add_listener_test_result(self):
"""
:return: add listener test result text
"""
self.driver.swipe(direction="down")
return self.driver.wait_for_object("add_listener_test_result_text").text
def select_add_listener_pop_up_close_btn(self):
"""
clicks the add listener pop up close btn
:return:
"""
self.driver.click("add_listener_pop_up_close_btn")
def get_add_listener_pop_up_toast_text(self):
"""
:return: main and sub text found from the toast pop up notification
"""
pop_up_toast_text = {}
pop_up_toast_text['main_text'] = self.driver.wait_for_object("pop_up_toast_text", index=0).text
pop_up_toast_text['sub_text'] = self.driver.wait_for_object("pop_up_toast_text", index=1).text
return pop_up_toast_text
def select_add_listener_test_btn(self):
"""
clicks the add listener test btn
:return:
"""
self.driver.click("eventing_add_listener_btn")
def enter_add_listener_event(self, option):
"""
sends name of event listener in Eventing.addListener() tab
:param option:
:return:
"""
self.driver.send_keys("eventing_native_element_listener_field", option)
def enter_name_field(self,option):
"""
sends the name field
:param option:
:return:
"""
self.driver.send_keys("eventing_name_field", option)
def enter_data_field(self,option):
"""
sends the data field
:param option:
:return:
"""
self.driver.send_keys("eventing_data_field", option)
def select_jarvis_event_option_test(self):
"""
clicks the send jarvis event test btn
:return:
"""
self.driver.click("eventing_send_jarvis_test_btn")
def jarvis_event_option_test_result(self):
"""
:return: text after clicking jarvis event option test btn
"""
return self.driver.find_object("eventing_jarvis_options_test_result").text | [
"amal.muthiah@hp.com"
] | amal.muthiah@hp.com |
14355c1414d251292712c30f688f2025f929116d | 8f9fdc8730aa11f5f0a29b0399fa73a53d9530ca | /Assignment 7 (functions)/dæmi1.py | f98d34d264773b3243e5ac2e5b5fb5fba4aa880d | [] | no_license | ballib/Forritun1 | 0048aa41dbd4a829814232df9d653ef6e845b549 | 7f4041a5ac974d5622f005498efffcfc452b3f1a | refs/heads/master | 2020-07-20T05:21:24.022182 | 2019-12-13T19:10:08 | 2019-12-13T19:10:08 | 206,579,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | # find_min function definition goes here
def find_min():
if first > second:
return second
else:
return first
first = int(input("Enter first number: "))
second = int(input("Enter second number: "))
# Call the function here
print("Minimum: ", find_min()) | [
"baldurb2@gmail.com"
] | baldurb2@gmail.com |
f675d32ecf2cf6d0afef4b975674d4adfdfd0bb2 | 6e990147c181182ef244b0d3682f17eca1dab012 | /Python/classes.py | 0f802e8b7a1d72f4a19c2f82912d9d495837496c | [] | no_license | alisheryuldashev/playground | 3cda8df3c365d8631a87d923ae4bfce07e5cd77e | 39fa0f4016c6938cbef10373406030aee81ff90d | refs/heads/master | 2020-03-20T21:48:02.887530 | 2018-06-18T17:22:17 | 2018-06-18T17:22:17 | 137,761,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | #this snippet shows how to create a class in Python.
#this program will prompt for information, append same into a class, and output a message.
#create a class called Student
class Student:
def __init__(self,name,dorm):
self.name = name
self.dorm = dorm
#import custom functions from CS50 library used in Harvard Introduction to Computer Science course.
from cs50 import get_string
#import class Student
from test14class import Student
students = []
dorms = []
#prompt user for information and append it to the class called Student
for i in range(3):
name = get_string("Name: ")
dorm = get_string("Dorm: ")
s = Student(name, dorm)
students.append(s)
for student in students:
print(f"{student.name} lives in {student.dorm}")
| [
"noreply@github.com"
] | alisheryuldashev.noreply@github.com |
342b6e3b971f495b6deaed0145cfca4d01b9b70c | dd02978fe562ddac0932025598d7983666f07c2d | /Trash/WikiScrapy/wikiscrapy/settings.py | 2508e8af685164cf2aa4889900dfd2cd10cb71c3 | [] | no_license | Bakser/PikaPika | d13378b2d5f13fa2ce89bbfca2d13a7b8807d3a9 | 158dbf8a1dd93ac83955575c13e77da2fa394872 | refs/heads/master | 2020-05-25T17:50:51.750731 | 2017-06-17T12:24:53 | 2017-06-17T12:24:53 | 84,950,857 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,166 | py | # -*- coding: utf-8 -*-
# Scrapy settings for wikiscrapy project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'wikiscrapy'
SPIDER_MODULES = ['wikiscrapy.spiders']
NEWSPIDER_MODULE = 'wikiscrapy.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'wikiscrapy (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'wikiscrapy.middlewares.WikiscrapySpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'wikiscrapy.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'wikiscrapy.pipelines.WikiscrapyPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"maohanyang789@163.com"
] | maohanyang789@163.com |
a9439346ef7e1b1ee5f9def2882beee1c0816dbb | 8b50a864c02507a3fb63094e055b3acf3eda157a | /code_file/models/language_model.py | ac5ccb203bc4ba2c64684ef454b5324006d9bbce | [] | no_license | lixiangpengcs/PSAC | 950ef43f76d8cf6acadac26cdf9733a4273d7fe5 | 249c36393120352bfa7af4ac1e9182bc63ee6152 | refs/heads/master | 2021-06-13T22:09:51.206860 | 2021-04-15T06:28:43 | 2021-04-15T06:28:43 | 156,211,385 | 26 | 2 | null | null | null | null | UTF-8 | Python | false | false | 12,048 | py | import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from dataset import *
import torch.nn.init as init
from .model_utils import *
from torch.nn.utils.weight_norm import weight_norm
Lv = 100
Lq = 20
Lc = 36
ctx_dim = 2048
ctx_dim_m=512
class WordEmbedding(nn.Module):
"""Word Embedding
The ntoken-th dim is used for padding_idx, which agrees *implicitly*
with the definition in Dictionary.
"""
def __init__(self, ntoken, ntoken_c, emb_dim, c_emb_dim, dropout):
super(WordEmbedding, self).__init__()
self.emb = nn.Embedding(ntoken, emb_dim, padding_idx=0)
self.c_emb = nn.Embedding(ntoken_c, c_emb_dim, padding_idx=0)
self.dropout = nn.Dropout(dropout)
self.ntoken = ntoken
self.ntoken_c = ntoken_c
self.emb_dim = emb_dim
self.c_emb_dim = c_emb_dim
def init_embedding(self,dict, glove_file, task):
if not os.path.exists('./data/%s_glove6b_init_300d.npy'%task):
print('Construct initial embedding...')
weight_init, word2emb = create_glove_embedding_init(dict.idx2word, glove_file)
np.save(os.path.join('./data','%s_glove6b_init_300d.npy'% task), weight_init)
weight_init = torch.from_numpy(weight_init)
weight_init_char = torch.from_numpy(np.random.normal(loc=0.0, scale=1, size=(self.ntoken_c, self.c_emb_dim)))
np.save(os.path.join('./data','%s_char_glove6b_init_300d.npy'% task), weight_init_char)
else:
print('loading glove from ./data/%s_glove6b_init_300d.npy'%task)
weight_init = torch.from_numpy(np.load('./data/%s_glove6b_init_300d.npy'%task))
weight_init_char = torch.from_numpy(np.load('./data/%s_char_glove6b_init_300d.npy'%task))
assert weight_init.shape == (self.ntoken, self.emb_dim)
assert weight_init_char.shape == (self.ntoken_c, self.c_emb_dim)
# self.emb.weight.data[:self.ntoken] = weight_init
# self.c_emb.weight.data[:self.ntoken_c] = weight_init_char
return weight_init, weight_init_char
def forward(self, x, x_c):
emb = self.emb(x)
emb = self.dropout(emb)
emb_c = self.c_emb(x_c)
emb_c = self.dropout(emb_c)
return emb, emb_c
class QuestionEmbedding(nn.Module):
def __init__(self, in_dim, num_hid, nlayers, bidirect, dropout, out = 'last_layer', rnn_type='LSTM'):
"""Module for question embedding
"""
super(QuestionEmbedding, self).__init__()
assert rnn_type == 'LSTM' or rnn_type == 'GRU'
rnn_cls = nn.LSTM if rnn_type == 'LSTM' else nn.GRU
self.rnn = rnn_cls(
in_dim, num_hid, nlayers,
bidirectional=bidirect,
dropout=dropout,
batch_first=True)
self.in_dim = in_dim
self.num_hid = num_hid
self.nlayers = nlayers
self.out = out
self.rnn_type = rnn_type
self.ndirections = 1 + int(bidirect)
def init_hidden(self, batch):
# just to get the type of tensor
weight = next(self.parameters()).data
hid_shape = (self.nlayers * self.ndirections, batch, self.num_hid)
if self.rnn_type == 'LSTM':
return (Variable(weight.new(*hid_shape).zero_()),
Variable(weight.new(*hid_shape).zero_()))
else:
return Variable(weight.new(*hid_shape).zero_())
def forward(self, x):
# x: [batch, sequence, in_dim]
batch = x.size(0)
hidden = self.init_hidden(batch)
self.rnn.flatten_parameters()
output, hidden = self.rnn(x, hidden)
if self.ndirections == 1 and self.out == 'last_layer':
return output[:, -1]
else:
return output
forward_ = output[:, -1, :self.num_hid]
backward = output[:, 0, self.num_hid:]
return torch.cat((forward_, backward), dim=1)
def forward_all(self, x):
# x: [batch, sequence, in_dim]
batch = x.size(0)
hidden = self.init_hidden(batch)
self.rnn.flatten_parameters()
output, hidden = self.rnn(x, hidden)
return output
class EncoderLayer(nn.Module):
def __init__(self, d_model, d_inner_hid, n_head, d_k, d_v, dropout=0.1):
super(EncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout) # 8, 512, 64, 64
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner_hid, dropout=dropout)
def forward(self, enc_input, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(enc_input, enc_input, enc_input, attn_mask=slf_attn_mask) # mb x len_v x d_model
enc_output = self.pos_ffn(enc_output) # batch_size x v_len x ctx_dim
return enc_output, enc_slf_attn
class Encoder(nn.Module):
def __init__(self, n_layer=6, n_head=8, d_k=64, d_v=64, v_len=36, v_emb_dim=300,
d_model=2048, d_inner_hid=512, dropout=0.1):
super(Encoder, self).__init__()
self.d_model= d_model
self.position_enc = nn.Embedding(v_len, v_emb_dim)
self.position_enc.weight.data = position_encoding_init(v_len, v_emb_dim)
self.layer_stack = nn.ModuleList([EncoderLayer(d_model, d_inner_hid, n_head, d_k, d_v, dropout)
for _ in range(n_layer)])
self.pos_linear = nn.Linear(300, 2048)
def forward(self, src_seq, return_attns=False): # src_seq: batch_size x steps x ctx_dim
# visual info
# step 1: position embedding
seq_batch_size, seq_len, v_feat_dim = src_seq.size() # batch_size:128 steps:35 ctx_dim:2048
seq_mask = get_v_mask(seq_batch_size, seq_len).cuda() # batch_size x steps : position mask
pos_emb = self.position_enc(seq_mask) # batch_size x v_len x v_emb_dim
# print('ok')
# print(pos_emb)
# print(pos_emb.shape)
pos_emb = self.pos_linear(pos_emb)
enc_input = src_seq + pos_emb # position embedding error
# enc_input = src_seq # no position embedding
if return_attns:
enc_slf_attns = []
enc_output = enc_input
enc_slf_attn_mask = get_attn_padding_mask(src_seq, src_seq) # batch_size x v_len
for enc_layer in self.layer_stack:
enc_output, enc_slf_attn = enc_layer( # batch_size x v_len x d_v
enc_output, slf_attn_mask=enc_slf_attn_mask)
if return_attns:
enc_slf_attns += [enc_slf_attn]
if return_attns:
return enc_output, enc_slf_attns
else:
return enc_output
class DepthwiseSeperableConv(nn.Module):
def __init__(self, in_word, out_word, k, dim=1, bias=True):
super(DepthwiseSeperableConv, self).__init__()
if dim ==1:
self.depthwise_conv = nn.Conv1d(in_channels=in_word, out_channels=in_word, kernel_size=k, groups=in_word, padding=k//2, bias=bias)
self.pointwise_conv = nn.Conv1d(in_channels=in_word, out_channels=out_word, kernel_size=1, padding=0, bias=bias)
elif dim ==2:
self.depthwise_conv = nn.Conv2d(in_channels=in_word, out_channels=in_word, kernel_size=k, groups=in_word, padding=k//2, bias=bias)
self.pointwise_conv = nn.Conv2d(in_channels=in_word, out_channels=out_word, kernel_size=1, padding=0,
bias=bias)
else:
raise Exception("Wrong dimension for Depthwise Separable Convolution!")
nn.init.kaiming_normal_(self.depthwise_conv.weight)
nn.init.constant_(self.depthwise_conv.bias, 0.0)
nn.init.kaiming_normal_(self.pointwise_conv.weight)
nn.init.constant_(self.pointwise_conv.bias, 0.0)
def forward(self, x):
return self.pointwise_conv(self.depthwise_conv(x))
class VQAttention(nn.Module):
def __init__(self):
super(VQAttention, self).__init__()
w4V = torch.empty(ctx_dim_m, 1)
w4Q = torch.empty(D, 1)
w4mlu = torch.empty(1, 1, ctx_dim_m)
nn.init.xavier_uniform_(w4V)
nn.init.xavier_uniform_(w4Q)
nn.init.xavier_uniform_(w4mlu)
self.w4V = nn.Parameter(w4V)
self.w4Q = nn.Parameter(w4Q)
self.w4mlu = nn.Parameter(w4mlu)
self.trans = weight_norm(nn.Linear(ctx_dim, ctx_dim_m))
# self.trans = Initialized_Conv1d(ctx_dim, ctx_dim_m)
bias = torch.empty(1)
nn.init.constant_(bias, 0)
self.bias = nn.Parameter(bias)
def forward(self, Vid_enc, Ques_enc, V_mask, Q_mask):
# Vid_enc = self.trans(Vid_enc.transpose(1, 2))
Vid_enc = self.trans(Vid_enc)
Ques_enc = Ques_enc.transpose(1, 2)
batch_size = Vid_enc.size()[0]
# Vid_enc = Vid_enc.transpose(1,2)
S = self.trilinear_for_attention(Vid_enc, Ques_enc)
V_mask = V_mask.view(batch_size, Lc, 1)
Q_mask = Q_mask.view(batch_size, 1, Lq)
S1 = F.softmax(mask_logits(S, Q_mask), dim=2)
S2 = F.softmax(mask_logits(S, V_mask), dim=1)
A = torch.bmm(S1, Ques_enc)
B = torch.bmm(torch.bmm(S1, S2.transpose(1,2)), Vid_enc)
out = torch.cat([Vid_enc, A, torch.mul(Vid_enc, A), torch.mul(Vid_enc, B)], dim=2)
return out.transpose(1, 2)
def trilinear_for_attention(self, Vid_enc, Ques_enc):
V = F.dropout(Vid_enc, p=dropout, training=self.training)
Q = F.dropout(Ques_enc, p=dropout, training=self.training)
subres0 = torch.matmul(V, self.w4V).expand([-1, -1, Lq])
subres1 = torch.matmul(Q, self.w4Q).transpose(1, 2).expand([-1, Lc, -1 ])
subres2 = torch.matmul(V * self.w4mlu, Q.transpose(1,2))
res = subres0 + subres1 + subres2
res += self.bias
return res
class Pointer(nn.Module):
def __init__(self):
super().__init__()
self.w1 = Initialized_Conv1d(D*3, 1)
def forward(self, M1, M2, M3, mask):
X1 = torch.cat([M1, M2, M3], dim=1)
Y1 = mask_logits(self.w1(X1).squeeze(), mask)
return Y1
class Ques_Encoder(nn.Module):
def __init__(self, word_mat, char_mat, pretrained_char=False):
super(Ques_Encoder, self).__init__()
# add embedding matric for word and char
if pretrained_char:
self.char_emb = nn.Embedding.from_pretrained(torch.Tensor(char_mat))
else:
char_mat = char_mat.float()
char_mat = torch.Tensor(char_mat)
self.char_emb = nn.Embedding.from_pretrained(char_mat, freeze=False)
self.word_emb = nn.Embedding.from_pretrained(torch.Tensor(word_mat), freeze=True)
self.emb = Embedding()
self.emb_enc = EncoderBlock(conv_num=4, ch_num=D, k=7)
self.vqatt = VQAttention()
self.vq_resizer = Initialized_Conv1d(D*4, D)
self.model_enc_blks = nn.ModuleList(EncoderBlock(conv_num=2, ch_num=D, k=5) for _ in range(7))
self.out = Pointer()
def forward(self, vid_enc, q_w, q_c):
mask = ((torch.ones_like(q_w)* 0)!=q_w).float()
mask_c = (torch.ones_like(q_c)*0 != q_c).float()
q_w_emb = self.word_emb(q_w) # batch_size x q_len x w_dim
q_c_emb = self.char_emb(q_c) # batch_size x q_len x c_len x c_dim
Q = self.emb(q_c_emb, q_w_emb, Lq) # batch_size x D x q_len
Cq = self.emb_enc(Q, mask, 1, 1)
maskV = torch.ones(vid_enc.shape[0], vid_enc.shape[1]).cuda()
X = self.vqatt(vid_enc, Cq, maskV, mask)
M0 = self.vq_resizer(X)
out = M0.mean(-1)
# for i, blk in enumerate(self.model_enc_blks):
# M0 = blk(M0, mask, i*(2+2)+1, 7)
# M1 = M0
# for i, blk in enumerate(self.model_enc_blks):
# M0 = blk(M0, mask, i*(2+2)+1, 7)
# M2 = M0
# M0 = F.dropout(M0, p=dropout, training=self.training)
# for i, blk in enumerate(self.model_enc_blks):
# M0 = blk(M0, mask, i*(2+2)+1, 7)
# M3 = M0
# out = self.out(M1, M2, M3, mask)
return out
| [
"noreply@github.com"
] | lixiangpengcs.noreply@github.com |
2436b31efc26c12e07608f1340aa81ef216cd897 | 6550140daf76d430f13ff6cc3ca4e71db8ebd7da | /test_sort.py | 745fa7dd389d54e95abd84b7cb01e8db02a57289 | [] | no_license | NilE2503/project | 19c3ddc34593b266d1e337f8216f155b0a045a42 | da1452a08da7c020d7a882337d2781a182873ff8 | refs/heads/main | 2023-02-27T05:25:55.768019 | 2021-02-08T15:08:17 | 2021-02-08T15:08:17 | 329,330,532 | 0 | 0 | null | 2021-01-13T14:32:36 | 2021-01-13T14:19:10 | null | UTF-8 | Python | false | false | 912 | py | '''
Тесты.
'''
import sorting
import pytest
from random import randint
RANGE = 1000
def rand_gen(amount: int) -> list:
return[randint(-RANGE, RANGE) for _ in range(amount)]
DEFAULT_LIST = rand_gen(10)
EMPTY_LIST = []
NEGATIVE_LIST = [3, -1, 4, 5, -2]
NONVALID_LIST = [3, 'one', 4, 5, '-2']
@pytest.mark.parametrize('test_list', [DEFAULT_LIST, NEGATIVE_LIST])
@pytest.mark.parametrize('test_func', [sorting.bubble_sort, sorting.selection_sort, sorting.insert_sort])
def test_all(test_list, test_func):
result = test_func(test_list)
assert result == sorted(test_list)
@pytest.mark.parametrize('test_list', [NONVALID_LIST, EMPTY_LIST])
@pytest.mark.parametrize('test_func', [sorting.bubble_sort, sorting.selection_sort, sorting.insert_sort])
def test_selection_not_integer(test_list, test_func):
test_list = test_list
with pytest.raises(RuntimeError):
test_func(test_list) | [
"gvb@gmail.com"
] | gvb@gmail.com |
cb2be266247d1a7439dd9738a44eda334951f271 | 6b163125b7d2f3ea5c2b107e6451e423ac7f1f3a | /app/forms/login_form.py | 1b499f6e83b990f3fdf45d98b52e3f2496815194 | [] | no_license | guny12/Capstone-Mise-En | a1d6e689230ad2e49cce7a09bad52d6243808d15 | b45d510adc04a69c73cf738a97c3a68d7166eebd | refs/heads/main | 2023-06-14T02:13:24.280617 | 2021-07-15T06:30:39 | 2021-07-15T06:30:39 | 363,795,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,171 | py | from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.validators import DataRequired, ValidationError
from app.models import User
def user_exists(form, field):
print("Checking if user exists", field.data)
credential = field.data
user = User.query.filter(User.email == credential).first()
if not user:
user = User.query.filter(User.username == credential).first()
if not user:
raise ValidationError("Invalid Credentials.")
def password_matches(form, field):
print("Checking if password matches")
password = field.data
credential = form.data["credential"]
user = User.query.filter(User.email == credential).first()
if not user:
user = User.query.filter(User.username == credential).first()
if not user:
raise ValidationError("Invalid Credentials.")
if not user.check_password(password):
raise ValidationError("Invalid Credentials.")
class LoginForm(FlaskForm):
credential = StringField("Email / Username", validators=[DataRequired(), user_exists])
password = StringField("Password", validators=[DataRequired(), password_matches])
| [
"Jimjnguy@gmail.com"
] | Jimjnguy@gmail.com |
5effb4f8168c2ae2b22c3d5bdf47fbc2371234a7 | 08c7f146d82da572731f6ad0fd7d96bd4553f3d8 | /backend/wispy_bread_26347/settings.py | 440dca6d8ada9cc66236256b5fe96e07ed38d97b | [] | no_license | crowdbotics-apps/wispy-bread-26347 | 9c7b081b280e709f6eb5dccd3d38e7be306c18a8 | 04532cb6c4ac227bd104c2210e9997cdc5ff530d | refs/heads/master | 2023-05-01T09:20:01.995863 | 2021-05-07T19:06:03 | 2021-05-07T19:06:03 | 365,329,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,117 | py | """
Django settings for wispy_bread_26347 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'wispy_bread_26347.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wispy_bread_26347.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
ddff5be5033ac45571c5aeff80f944fcc3cdbfb1 | f14f11929dfa7a5b2dacaf330719507d20975c3f | /ml/dataset/features.py | f991f6cdf745de3f8a18670608ac1fdf49c68039 | [] | no_license | mani3/ml-shogi | ba7d7b9e53cbc31066272f1887ab8e6128cbe3da | 7369f1ff2af60ee37ca15a2bf39fc498f09d289d | refs/heads/main | 2023-01-19T20:22:50.828060 | 2020-11-26T15:49:56 | 2020-11-26T15:49:56 | 304,924,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,411 | py | import numpy as np
import shogi
import copy
import ml.dataset.common as common
from ml.dataset.common import MOVE_DIRECTION, MOVE_DIRECTION_PROMOTED
from ml.dataset.common import (
UP, UP2_LEFT, UP2_RIGHT, UP_LEFT, UP_RIGHT,
LEFT, RIGHT, DOWN, DOWN_LEFT, DOWN_RIGHT
)
def make_input_features(piece_bb, occupied, pieces_in_hand):
features = []
for color in shogi.COLORS:
# pieces on board
for piece_type in shogi.PIECE_TYPES_WITH_NONE[1:]:
bb = piece_bb[piece_type] & occupied[color]
feature = np.zeros(9 * 9, dtype=np.uint8)
for pos in shogi.SQUARES:
if bb & shogi.BB_SQUARES[pos] > 0:
feature[pos] = 1
features.append(feature.reshape((9, 9)))
# pieces in hand
for piece_type in range(1, 8):
for n in range(shogi.MAX_PIECES_IN_HAND[piece_type]):
if piece_type in pieces_in_hand[color] and n < pieces_in_hand[color][piece_type]: # noqa: E501
feature = np.ones(9 * 9, dtype=np.uint8)
else:
feature = np.zeros(9 * 9, dtype=np.uint8)
features.append(feature.reshape((9, 9)))
return np.array(features).transpose([1, 2, 0])
def make_input_features_from_board(board):
if board.turn == shogi.BLACK:
piece_bb = copy.deepcopy(board.piece_bb)
occupied = copy.deepcopy(
(board.occupied[shogi.BLACK],
board.occupied[shogi.WHITE])
)
pieces_in_hand = copy.deepcopy(
(board.pieces_in_hand[shogi.BLACK],
board.pieces_in_hand[shogi.WHITE])
)
else:
piece_bb = [common.bb_rotate_180(bb) for bb in board.piece_bb]
occupied = (
common.bb_rotate_180(board.occupied[shogi.WHITE]),
common.bb_rotate_180(board.occupied[shogi.BLACK])
)
pieces_in_hand = (
board.pieces_in_hand[shogi.WHITE],
board.pieces_in_hand[shogi.BLACK]
)
return make_input_features(piece_bb, occupied, pieces_in_hand)
def make_output_label(move, color):
move_to = move.to_square
move_from = move.from_square
if color == shogi.WHITE:
move_to = common.SQUARES_R180[move_to]
if move_from is not None:
move_from = common.SQUARES_R180[move_from]
move_direction = None
if move_from is not None:
to_y, to_x = divmod(move_to, 9)
from_y, from_x = divmod(move_from, 9)
dir_x = to_x - from_x
dir_y = to_y - from_y
if dir_y < 0 and dir_x == 0:
move_direction = UP
elif dir_y == -2 and dir_x == -1:
move_direction = UP2_LEFT
elif dir_y == -2 and dir_x == 1:
move_direction = UP2_RIGHT
elif dir_y < 0 and dir_x < 0:
move_direction = UP_LEFT
elif dir_y < 0 and dir_x > 0:
move_direction = UP_RIGHT
elif dir_y == 0 and dir_x < 0:
move_direction = LEFT
elif dir_y == 0 and dir_x > 0:
move_direction = RIGHT
elif dir_y > 0 and dir_x == 0:
move_direction = DOWN
elif dir_y > 0 and dir_x < 0:
move_direction = DOWN_LEFT
elif dir_y > 0 and dir_x > 0:
move_direction = DOWN_RIGHT
if move.promotion:
move_direction = MOVE_DIRECTION_PROMOTED[move_direction]
else:
# 持ち駒
move_direction = len(MOVE_DIRECTION) + move.drop_piece_type - 1
move_label = 9 * 9 * move_direction + move_to
return move_label
def make_features(position):
piece_bb, occupied, pieces_in_hand, move, win = position
features = make_input_features(piece_bb, occupied, pieces_in_hand)
return (features, move, win)
| [
"kazuya.4da@gmail.com"
] | kazuya.4da@gmail.com |
9902ebd2e00cc805ec5bdc9703e6ca797ea372dc | 41ede4fd3bfba1bff0166bca7aee80dcf21434c6 | /suvari/gtk2chain/reverses/xcb-util/actions.py | 25adb86a956a71e443321f8a2ef6661d3e2d6833 | [] | no_license | pisilinux/playground | a7db4b42559a21cc72fd4c8649e0231ab6a3eb3c | e4e12fff8a847ba210befc8db7e2af8556c3adf7 | refs/heads/master | 2022-08-12T23:03:27.609506 | 2022-08-11T18:28:19 | 2022-08-11T18:28:19 | 8,429,459 | 16 | 22 | null | 2022-08-11T18:28:20 | 2013-02-26T09:37:11 | Python | UTF-8 | Python | false | false | 572 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import shelltools
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.autoreconf("-vif")
autotools.configure("--disable-static \
--with-pic")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("README")
| [
"suvarice@gmail.com"
] | suvarice@gmail.com |
ffcba6143e262725a508c37f6c97afb7bce54205 | 797ef824d1d60b55ea132b7a65df09ec8d20119e | /viscode-api-server/app/api/users.py | 234a2654b7a855fc91c73bd8ff781b916bdef47f | [] | no_license | ncu-csie-kslab/VisCode | 8c661141ef346bffe3bb59a986d08b8a3e84a6ed | ceb5f7ab7b72b64a592f075fc7cecbdfb83d8d08 | refs/heads/master | 2021-07-13T05:50:12.568619 | 2020-09-27T16:58:43 | 2020-09-27T16:58:43 | 204,051,431 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,344 | py | from flask import Blueprint, jsonify, request
from app.db import get_pg_pool
import psycopg2
users = Blueprint('users', __name__,
template_folder='templates')
pg_pool = get_pg_pool()
@users.route('/users', methods=['POST'])
def handle_users():
res_data = {}
conn = pg_pool.getconn()
try:
if conn is None:
return jsonify({
'msg': 'Database connection error!',
'isError': True
})
if request.method == 'POST':
post_data = request.get_json()
account = post_data['account']
password = post_data['password']
session_id = post_data['sessionId']
try:
cur = conn.cursor()
# cur.execute('INSERT INTO viscode.public.system_announcements(type, content) VALUES (%s, %s) ON CONFLICT (type)', ('system'))
cur.execute(
'SELECT admin FROM oauth_access_tokens AS a, users AS b WHERE a.user_id = b.id AND a.session_id = %s AND b.admin = true', (session_id,))
is_admin = cur.fetchone()
if is_admin:
cur.execute('SELECT * FROM user_passwords WHERE name = %s', (account,))
is_existed = cur.fetchone()
if is_existed is None:
cur.execute('INSERT INTO user_passwords(name, password) VALUES (%s, %s)',
(account, password))
conn.commit()
count = cur.rowcount
res_data = {
'msg': 'Add account success.',
'isError': False,
'count': count
}
else:
res_data = {
'msg': 'Account exsited',
'isError': True,
}
else:
res_data = {
'msg': 'Permission denied',
'isError': True,
}
cur.close()
except (Exception, psycopg2.Error) as error:
print(error)
res_data = {
'msg': error,
'isError': True
}
finally:
pg_pool.putconn(conn)
return jsonify(res_data)
@users.route('/users/<string:account>', methods=['PATCH'])
def patch_user(account):
res_data = {}
conn = pg_pool.getconn()
try:
if conn is None:
return jsonify({
'msg': 'Database connection error!',
'isError': True
})
post_data = request.get_json()
password = post_data['password']
session_id = post_data['sessionId']
cur = conn.cursor()
# cur.execute('INSERT INTO viscode.public.system_announcements(type, content) VALUES (%s, %s) ON CONFLICT (type)', ('system'))
cur.execute(
'SELECT admin FROM oauth_access_tokens AS a, users AS b WHERE a.user_id = b.id AND a.session_id = %s AND b.admin = true', (session_id,))
is_admin = cur.fetchone()
if is_admin:
cur.execute('SELECT * FROM user_passwords WHERE name = %s', (account,))
is_existed = cur.fetchone()
if is_existed:
cur.execute('UPDATE user_passwords SET password = %s WHERE name = %s',
(password, account))
conn.commit()
count = cur.rowcount
res_data = {
'msg': 'Update account success.',
'isError': False,
'count': count
}
else:
res_data = {
'msg': 'Account do not exsited',
'isError': True,
}
else:
res_data = {
'msg': 'Permission denied',
'isError': True,
}
cur.close()
except (Exception, psycopg2.Error) as error:
print(error)
res_data = {
'msg': error,
'isError': True
}
finally:
pg_pool.putconn(conn)
return jsonify(res_data) | [
"p12355668@gmail.com"
] | p12355668@gmail.com |
75ae39b4872390d4f7033db10dda678dda4d2daf | 4daab5ba90185bae65169ebb8183c635385ab3f8 | /autode/path/__init__.py | d90d12446b2816f5c49dee17a412f0ff5c3d899f | [
"MIT"
] | permissive | duartegroup/autodE | bcf69440bd04411f97d39df0df0ae1f2bf6feb8c | 4d6667592f083dfcf38de6b75c4222c0a0e7b60b | refs/heads/master | 2023-09-01T15:08:16.028378 | 2023-07-25T08:09:05 | 2023-07-25T08:09:05 | 196,085,570 | 132 | 42 | MIT | 2023-09-12T15:20:54 | 2019-07-09T21:20:27 | Python | UTF-8 | Python | false | false | 117 | py | from autode.path.path import Path
from autode.path.adaptive import AdaptivePath
__all__ = ["Path", "AdaptivePath"]
| [
"noreply@github.com"
] | duartegroup.noreply@github.com |
23e5b76816566cd052be9a482db95b33fa9bcaf6 | 431249e033aacb911e8e5d553affd0432a4e5d3c | /blog/urls.py | f7ed9ab3a961cc3e626077414a6610613dd3173c | [] | no_license | Shivam0403/blogging_web_app | 1935da5174db7591e15a20cd75bf54e5dbc797c3 | d35c1ae55373baf4f83e091c443857e151850470 | refs/heads/master | 2020-07-31T16:34:55.579424 | 2019-09-24T19:02:46 | 2019-09-24T19:02:46 | 210,676,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | from django.urls import path
from .views import (PostListView,
PostDetailView,
PostCreateView,
PostUpdateView,
PostDeleteView
)
from . import views
urlpatterns = [
path('', PostListView.as_view(),name='blog-home'),
path('post/<int:pk>/', PostDetailView.as_view(),name='post-detail'),
path('post/new/', PostCreateView.as_view(),name='post-create'),
path('post/<int:pk>/update', PostUpdateView.as_view(),name='post-update'),
path('post/<int:pk>/delete', PostDeleteView.as_view(),name='post-delete'),
path('about/', views.about,name='blog-about'),
] | [
"shivammahto108@gmail.com"
] | shivammahto108@gmail.com |
edfdaa5a38f3e0881df1da7afca07026b1feefcb | 967e5eb9a6b3d417392b37b8a33b1e717cfac830 | /tkinter103.py | 970204c2971047143572dc5a3622f724874b527d | [] | no_license | masterpy/warm-up-py | bf0ea34f78da9cb3fd5b145e62451b9d72d1701b | 8e2a3fc569748cd578f299fc30e9305a7517fc9b | refs/heads/master | 2021-01-10T07:18:43.985564 | 2016-01-29T01:59:44 | 2016-01-29T01:59:44 | 50,627,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | from tkinter import *
from tkinter.messagebox import showinfo
def reply(name):
showinfo(title='Reply', message='Hello %s' % name)
top = Tk()
top.title('Echo')
# 图标与平台相关,下面这个图标为windows平台上图标
# top.iconbitmap('py-blue-trans-out.ico')
Label(top, text='Enter your name:').pack(side=TOP)
ent = Entry(top)
ent.pack(side=TOP)
btn = Button(top, text="submit", command=(lambda: reply(ent.get())))
btn.pack(side=LEFT)
top.mainloop() | [
"hutaishi@gmail.com"
] | hutaishi@gmail.com |
bddd9d900949bd5cb7f5fa96d34c40eee96a5b63 | 878b721121d04ff22ad716e03b4f484c05b31d33 | /silab/login/migrations/0003_auto_20200605_1412.py | 112f9454a0a6f25ca37587e7280f6a26f0d1002c | [] | no_license | andrs99/Silab | c62118cfab6d74f77944fdc95cf640f56d335991 | 29b9c0f02afd8e856bb73b3c8e367dcc1c206c9c | refs/heads/master | 2022-10-03T11:22:47.242241 | 2020-06-05T20:17:17 | 2020-06-05T20:17:17 | 268,733,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | # Generated by Django 3.0.4 on 2020-06-05 19:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0002_auto_20200527_0449'),
]
operations = [
migrations.AlterField(
model_name='usuarios',
name='area',
field=models.CharField(choices=[('biblioteca', 'Biblioteca'), ('laboratorio de computo', 'Laboratorio de computo'), ('laboratorio de quimica', 'Laboratorio de quimica'), ('laller de electronica', 'Taller de electronica'), ('taller de industrial', 'Taller de industrial')], max_length=50, verbose_name='Area'),
),
migrations.AlterField(
model_name='usuarios',
name='tipo',
field=models.CharField(choices=[('escolares', 'Control Escolar'), ('laboratorios', 'Laboratorios')], max_length=50, verbose_name='Tipo'),
),
]
| [
"darklink2901@gmail.com"
] | darklink2901@gmail.com |
044880f3f6aca9724958c7447782e1dcd7da6819 | 45572ad2cad79d2f8dd97c16593ea15ebde69d57 | /edge_detector.py | a4a7d3a35037f14739f43fa17836c446bf1c96ee | [] | no_license | okkhoury/Computer-Vision | 500d55b0fc634b4e7ca8c3786a700f04924242b2 | 27283bc83513d84abd805a6112b4e2b56393b885 | refs/heads/master | 2021-01-21T10:05:12.944179 | 2017-05-01T03:18:11 | 2017-05-01T03:18:11 | 83,376,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,774 | py | import skimage
from skimage import io
from scipy.ndimage.filters import gaussian_filter
import numpy as np
import math
from itertools import product, starmap
from scipy import signal
import os
import matplotlib.pyplot as plt
# Read in image and convert it from uint8 to float64
file = input( "Enter the name of the file ")
building = io.imread(file)
building = skimage.img_as_float(building)
# Remove the 3 channels. Convert channel to only have one channel: Luminance
building = skimage.color.rgb2grey(building)
# Smooth image by convolving it with 7x7 gaussian kernel
gaussian_kernel = np.array([[0.003765, 0.015019, 0.023792, 0.015019, 0.003765],
[0.015019, 0.059912, 0.094907, 0.059912, 0.015019],
[0.023792, 0.094907, 0.150342, 0.094907, 0.023792],
[0.015019, 0.059912, 0.094907, 0.059912, 0.015019],
[0.003765, 0.015019, 0.023792, 0.015019, 0.003765]], dtype=np.float)
filtered_image = signal.convolve2d(building, gaussian_kernel)
# Find the gradient of the smoothed image
x_gradient, y_gradient = np.gradient(filtered_image)
# At each pixel, compute the edge strength and edge orientation
edge_strengths = np.zeros(building.shape, dtype=float)
edge_orientations = np.zeros(building.shape, dtype=float)
pi = 3.1415926
for point, val in np.ndenumerate(building):
# Formula for magnitue -> sqrt(a^2 + b^2)
magnitude = np.sqrt(x_gradient[point]**2 + y_gradient[point]**2)
edge_strengths[point] = magnitude
# Formula for orientation -> arctan(y_gradient / x_gradient) ""CHECK IF I NEED TO WORRY ABOUT DIVIDE BY 0
# plots points between -pi/2 and pi/2
orientation = np.arctan(y_gradient[point] / x_gradient[point])
edge_orientations[point] = orientation
print("magnitude and orientation calculated")
# Determine the D* matrix, check each value in edge_orientations and store the angle it's closest to (0, pi/4, pi/2, 3pi/4)
angles = [0, np.divide(pi, 4), np.divide(pi, 2), -1 * np.divide(pi,4), -1 *np.divide(pi,2)]
minIndex = 0
minDiff = 10
for point, val in np.ndenumerate(edge_orientations):
# Iterate through the 4 options, choose the one that has the least angle difference. Assign the index to the edge_orientations array
for angle in angles:
if np.absolute(val - angle) < minDiff:
minIndex = angles.index(angle)
minDiff = np.absolute(val - angle)
edge_orientations[point] = minIndex
#print(edge_orientations[point])
minDiff = 10
minIndex=0
print("angle assignment done")
# Don't modify edge_strengths directly. Make a copy.
edge_strengths_copy = np.copy(edge_strengths)
# Thin the edges by doing non-maximum supression
# If the strength of neighboring points along the current pixels
for row in range(1, edge_orientations.shape[0]-1): # -----> Vertical edge
for col in range(1, edge_orientations.shape[1]-1):
if edge_orientations[(row,col)] == 2 or edge_orientations[(row,col)] == 4: # 0
if (edge_strengths_copy[(row, col+1)] < edge_strengths_copy[(row,col)]):
edge_strengths[(row, col+1)] = 0
if (edge_strengths_copy[(row, col-1)] < edge_strengths_copy[(row,col)]):
edge_strengths[(row, col-1)] = 0
elif edge_orientations[(row,col)] == 3: # pi / 4
if (edge_strengths_copy[(row-1, col+1)] < edge_strengths_copy[(row,col)]):
edge_strengths[(row-1, col+1)] = 0
if (edge_strengths_copy[(row+1, col-1)] < edge_strengths_copy[(row,col)]):
edge_strengths[(row+1, col-1)] = 0
elif edge_orientations[(row,col)] == 0: # or edge_orientations[(row, col)] == 4: pi /2
if (edge_strengths_copy[(row+1, col)] < edge_strengths_copy[(row,col)]):
edge_strengths[(row+1, col)] = 0
if (edge_strengths_copy[(row-1, col)] < edge_strengths_copy[(row,col)]):
edge_strengths[(row-1, col)] = 0
elif edge_orientations[(row,col)] == 1: # -pi/4
if (edge_strengths_copy[(row-1, col-1)] < edge_strengths_copy[(row,col)]):
edge_strengths[(row-1, col-1)] = 0
if (edge_strengths_copy[(row+1, col+1)] < edge_strengths_copy[(row,col)]):
edge_strengths[(row+1, col+1)] = 0
# check that the current pixel I am looking at is within the image array
def in_bounds(x, y):
lower_bound = 0
upper_x_bound = building.shape[0]
upper_y_bound = building.shape[1]
if (x < 0 or y < 0 or x >= upper_x_bound or y >= upper_y_bound):
return False
else:
return True
# Thresholds determine how many edges will be detected. Weak edges are chained to strong edges
marked_points = np.zeros(building.shape) #Flower -> .015, .008
strong_edge_thresh = .02
weak_edge_thresh = .012
# Iterative dfs to chain weak edges pixels to strong edge pixels
stack = []
for x in range(building.shape[0]):
for y in range(building.shape[1]):
if (edge_strengths[(x,y)] >= strong_edge_thresh):
stack.append((x,y))
elif (edge_strengths[(x,y)] < weak_edge_thresh):
marked_points[(x,y)] = 1
building[(x,y)] = 0
while len(stack) != 0:
current_point = stack.pop()
marked_points[current_point] = 1 # mark this point so that we don't come back to it
# Some code I found to quickly get the neighbors of any point in a matrix
cells = starmap(lambda a,b: (current_point[0]+a, current_point[1]+b), product((0,-1,+1), (0,-1,+1)))
for point in cells:
if in_bounds(point[0], point[1]) and edge_strengths[point] >= weak_edge_thresh and marked_points[point] == 0:
building[point] = 1
stack.append(point)
# If a point has not yet been marked, then it must be a weak edge that does not chain to a strong edge. Remove it.
for x in range(building.shape[0]):
for y in range(building.shape[1]):
point = (x,y)
if marked_points[point] == 0:
building[point] = 0
plt.imshow(building, cmap='gray')
plt.show()
| [
"noreply@github.com"
] | okkhoury.noreply@github.com |
a5b980ef7e57ec36f4eb154a34c94d54a8c68995 | 44dc0f91d3f8df9e18f79584369b6a057e1fac78 | /sharma_lakshay_a1/master_a1.py | a793bf5f6d576a8751f03116a35bf04680882765 | [] | no_license | sharmalakshay93/nyu-cv | 879dff99058771157bd0987ff252aa9e48539b8f | 2858a4777a474a71a8f8e9efb652ec48bb4c58b2 | refs/heads/master | 2021-05-12T10:44:19.458792 | 2018-01-13T17:12:29 | 2018-01-13T17:12:29 | 117,361,121 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,273 | py | import numpy as np
import torch
import torch.nn as nn
import scipy.signal
import scipy.io
import matplotlib.pyplot as plt
import cv2
from mpl_toolkits.mplot3d import Axes3D
def apply_gauss(img, width):
# check if width is odd number
if (width%2 == 0):
raise ValueError('width parameter must be an odd number')
# given filter
g1 = np.array([[1,2,1]])
g1 = g1 / np.sum(g1)
filt = np.copy(img)
# row-filtering, followed by column filtering
for i in range(width):
filt = np.apply_along_axis(np.convolve, 1, filt, np.ravel(g1), mode="same")
filt = np.apply_along_axis(np.convolve, 0, filt, np.ravel(g1), mode="same")
return filt
def image_blurring(filename, width):
img = scipy.ndimage.imread(filename, flatten=False, mode=None)
plt.imshow(img, cmap='gray')
plt.title('original')
plt.show()
filtered = apply_gauss(img, width)
plt.imshow(filtered, cmap='gray')
plt.title('filtered')
plt.show()
def getDescriptors(filename1, filename2):
img1 = cv2.imread(filename1)
img2 = cv2.imread(filename2)
gray1 = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(gray1,None)
kp2, des2 = sift.detectAndCompute(gray2,None)
img1=cv2.drawKeypoints(gray1,kp1,None)
img2=cv2.drawKeypoints(gray2,kp2,None)
# cv2.imwrite('scene_sift_keypoints.jpg',img1)
# cv2.imwrite('book_sift_keypoints.jpg',img2)
print("number of regions in book.pgm: ", len(des1))
print("number of regions in scene.pgm: ", len(des2))
print("shape of each descriptor vector: ", des1[0].shape)
plt.imshow(img1),plt.show()
plt.imshow(img2),plt.show()
return (kp1, des1, kp2, des2, img1, img2, gray1, gray2)
def getMatches(des1, des2, kp1, kp2, img1, img2):
# img1 = cv2.imread(filename1)
# img2 = cv2.imread(filename2)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1,des2, k=2)
coords = []
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.9*n.distance:
good.append([m])
idx1 = m.queryIdx
idx2 = m.trainIdx
pt1 = kp1[idx1].pt
pt2 = kp2[idx2].pt
if ( ((pt1, pt2)) not in coords):
coords.append((pt1, pt2))
# cv2.drawMatchesKnn expects list of lists as matches.
img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,flags=2,outImg=None)
# cv2.imwrite('matches.png',img3)
plt.imshow(img3),plt.show()
return coords
def get3RandPoints(coords):
randNums = []
for i in range(3):
this_rand = np.random.randint(0,len(coords))
while this_rand in randNums: #this ensures the same set of coordinates are not picked twice
this_rand = np.random.randint(0,len(coords))
randNums.append(this_rand)
randCoords = []
for idx in randNums:
randCoords.append(coords[idx])
return randCoords
def constructMatrices(randCoords):
x = np.zeros((6,6))
x_prime = np.zeros((6,1))
for i in range(6):
x_c = randCoords[int(i/2)][0][0]
y_c = randCoords[int(i/2)][0][1]
xp_c = randCoords[int(i/2)][1][0]
yp_c = randCoords[int(i/2)][1][1]
if(i%2 == 0):
x[i][0] = x_c
x[i][1] = y_c
x[i][4] = 1
else:
x[i][2] = x_c
x[i][3] = y_c
x[i][5] = 1
if(i%2 == 0):
x_prime[i] = xp_c
x_prime[i+1] = yp_c
return (x, x_prime)
def getAffTrans(x, x_prime):
transformation = np.linalg.solve(x, x_prime)
m = np.asarray([[transformation[0][0],transformation[1][0]],[transformation[2][0],transformation[3][0]]])
t = np.asarray([transformation[4],transformation[5]])
return (m, t)
def getBestM(coords):
max_radius = 10.0
max_inliers = 0
best_M = np.zeros((2,3))
for n in range(100):
randCoords = get3RandPoints(coords)
x, x_prime = constructMatrices(randCoords)
m, t = getAffTrans(x, x_prime)
this_inliers = 0
for item in coords:
x_p = item[1][0]
y_p = item[1][1]
x_t = item[0][0]
y_t = item[0][1]
actual_pos = np.asarray((x_p, y_p))
new_pos = (np.dot(m, np.asarray([ [x_t], [y_t]])) + t).T
if (np.absolute(np.linalg.norm(new_pos - actual_pos)) < max_radius):
this_inliers += 1
if (this_inliers > max_inliers):
best_M = np.hstack((m, t))
max_inliers = this_inliers
print("max_inliers", max_inliers)
print("best_M: \n", best_M)
return best_M
def affineTrans(best_M, gray1, gray2):
rows,cols = gray1.shape
dst = cv2.warpAffine(gray1,best_M,(cols,rows))
plt.imshow(gray2, cmap='gray')
plt.title("Actual")
plt.show()
plt.subplot(121),plt.imshow(gray1, cmap='gray'),plt.title('Input')
plt.subplot(122),plt.imshow(dst, cmap='gray'),plt.title('Output')
plt.show()
def image_alignment(filename1, filename2):
kp1, des1, kp2, des2, img1, img2, gray1, gray2 = getDescriptors(filename1, filename2)
coords = getMatches(des1, des2, kp1, kp2, img1, img2)
best_M = getBestM(coords)
affineTrans(best_M, gray1, gray2)
def homogeneousCoords(image, world):
image_h = np.vstack((image, np.ones((1,image.shape[1]))))
world_h = np.vstack((world, np.ones((1,world.shape[1]))))
return (image_h, world_h)
def getA(image_h, world_h):
A = np.zeros((20,12))
for i in range(image_h.shape[1]):
x = image_h.T[i][0]
y = image_h.T[i][1]
w = image_h.T[i][2]
x_world = x * world_h.T[i]
y_world = y * world_h.T[i]
w_world = w * world_h.T[i]
A[i*2][4:8] = -w_world
A[i*2][8:12] = y_world
A[i*2 + 1][0:4] = w_world
A[i*2 + 1][8:12] = -x_world
return A
def getAndVerifyP(A, world_h, image):
p = np.linalg.svd(A)[2][-1]
P = p.reshape((3,4))
print("P: \n", P)
zero_prod = np.dot(A, p)
avg_zero_prod_error = np.average( zero_prod - np.zeros((zero_prod.shape)))
print("average error in A.p calculation:", avg_zero_prod_error)
img_calc_hom = np.dot(P, world_h)
img_calc_cart = np.asarray([ img_calc_hom[0]/img_calc_hom[2], img_calc_hom[1]/img_calc_hom[2] ])
avg_projection_error = np.average(np.abs(img_calc_cart - image))
print("average world-to-image projection error:", avg_projection_error)
return P
def getAndVerifyC(P):
C = np.linalg.svd(P)[2][-1]
zero_vec = np.dot(P, C)
print("average error in PC=0: ", np.average(np.absolute(zero_vec)))
print("C_homogenous: \n", C)
C_inhom = np.asarray(([ C[0]/C[3], C[1]/C[3], C[2]/C[3] ]))
return C_inhom
def camParams(filename1, filename2):
image = np.loadtxt(filename1)
world = np.loadtxt(filename2)
image_h, world_h = homogeneousCoords(image, world)
A = getA(image_h, world_h)
P = getAndVerifyP(A, world_h, image)
C = getAndVerifyC(P)
print("C: \n", C)
def getCenters(image_points):
x_centers = np.mean(image_points[0], axis=0)
y_centers = np.mean(image_points[1], axis=0)
centers = np.vstack((x_centers, y_centers))
return centers
def getW(image_points, centers):
c_image_points = np.copy(image_points)
for i in range(len(image_points[0])):
c_image_points[0][i] -= centers[0]
c_image_points[1][i] -= centers[1]
W = np.vstack((c_image_points[0].T, c_image_points[1].T))
return W
def showStructMotResults(W, centers):
U, D, V = np.linalg.svd(W)
M_i_matrix = np.dot(U[:,0:3], np.diag(D[0:3]))
print("M1: \n", M_i_matrix[0:2,:])
print("t1: \n", centers[:,0])
print("3d coords of first 10 world points: \n", V.T[0:10,0:3])
x_s = V.T[:,0]
y_s = V.T[:,1]
z_s = V.T[:,2]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xs=x_s, ys=y_s, zs=z_s)
plt.show()
def structFromMot(filename1):
sfm_points = scipy.io.loadmat(filename1)
image_points = sfm_points['image_points']
centers = getCenters(image_points)
W = getW(image_points, centers)
showStructMotResults(W, centers)
print("problem 1: image filtering")
image_blurring("./assignment1/parrot_grey.png", 3)
print("problem 2: image alignment")
image_alignment("./assignment1/book.pgm", "./assignment1/scene.pgm")
print("problem 3: estimating camera parameters")
camParams("./assignment1/image.txt", "./assignment1/world.txt")
print("problem 4: structure from motion")
structFromMot("./assignment1/sfm_points.mat")
| [
"noreply@github.com"
] | sharmalakshay93.noreply@github.com |
cc21f39dcbd925fbf3d3f63670883ab6349ac2a5 | 6d7c7f0240dd31a032ee7ff729874ea74642a5c1 | /data_structures/queue/circular_queue.py | 55396bfc569765e4212c7ca61657edd4677e8843 | [] | no_license | gandoufu/algorithms-and-datastructures | 0494efa6fa41eaa21574fda2d6fbe43d922e5e01 | 0369b7ca82623eb8ecba27b15e882cdf6183582b | refs/heads/master | 2020-06-10T15:51:24.001221 | 2019-08-16T02:42:55 | 2019-08-16T02:42:55 | 193,664,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,913 | py | """
循环队列代码实现,特点:
1. 入队/出队时间复杂度 O(1)
2. 支持动态扩容缩容
"""
class CircularQueue(object):
def __init__(self, capacity=10):
self.capacity = capacity
self.entries = [None] * (capacity + 1) # 创建数组时多加了一个位置,是为了区分队列为空和队列为满的情况
self.head = 0
self.tail = 0
self.size = 0
def get_size(self):
"""队列中元素个数"""
return self.size
def get_capacity(self):
return self.capacity
def enqueue(self, item):
# 如果队列已满,先扩容
if (self.tail + 1) % len(self.entries) == self.head:
self.resize(self.capacity * 2)
self.entries[self.tail] = item
self.tail = (self.tail + 1) % len(self.entries)
self.size += 1
def dequeue(self):
if self.head == self.tail:
print("Can't dequeue from an empty queue")
return
dequeued = self.entries[self.head]
self.entries[self.head] = None
self.head = (self.head + 1) % len(self.entries)
self.size -= 1
# 队列不为空且有有效元素个数小于可容纳元素的1/4时,缩容
if self.size and self.size < self.capacity // 4:
self.resize(self.capacity // 2)
return dequeued
def resize(self, new_capacity):
new_entries = [None] * (new_capacity + 1)
for i in range(self.size):
new_entries[i] = self.entries[(i + self.head) % len(self.entries)]
self.capacity = new_capacity
self.entries = new_entries
self.head = 0
self.tail = self.size
def traversal(self):
"""遍历输出队列中元素"""
for i in range(self.size):
print(self.entries[(self.head+i) % len(self.entries)], end=' ')
print()
| [
"tangcugandoufu@163.com"
] | tangcugandoufu@163.com |
878a8d6f13a4d962da19b20180204a0a90f19306 | 74c368b2511fd62cb4f71db64bd728d0354d7191 | /refinenet/datasets.py | 6ea166c7a35a6e2ea5c30236b9881e9fa3bc3e65 | [] | no_license | nocotan/RefineNet | 318e8867eca263127e573323f0225934adcf77b8 | 05e5a465807016b913f1f2d58a14c0fdad72beed | refs/heads/master | 2021-04-03T06:07:40.295234 | 2018-03-20T14:48:44 | 2018-03-20T14:48:44 | 124,654,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,754 | py | # -*- coding: utf-8 -*-
import os
import random
import cv2
import numpy as np
import PIL.Image
from chainer.dataset import dataset_mixin
class ImageDataset(dataset_mixin.DatasetMixin):
def __init__(self, data_dir, data_list, crop_size=(300, 300)):
self.data_dir = data_dir
self.data_list = os.path.join(self.data_dir, data_list)
self.crop_size = crop_size
self.crop_h = self.crop_size[0]
self.crop_w = self.crop_size[1]
self.img_ids = [i_id.strip() for i_id in open(self.data_list)]
self.files = []
for name in self.img_ids:
img_file = os.path.join(self.data_dir, "images/%s.jpg" % name)
label_file = os.path.join(self.data_dir, "labels/%s.png" % name)
self.files.append({
"image": img_file,
"label": label_file,
"name": name,
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
f_scale = 0.5 + random.randint(0, 11) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale,
interpolation=cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale,
interpolation=cv2.INTER_NEAREST)
return image, label
def get_example(self, i):
datafiles = self.files[i]
image = cv2.imread(datafiles["image"], cv2.IMREAD_COLOR)
label = np.asarray(PIL.Image.open(datafiles["label"]), dtype=np.int32)
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.int32)
image -= (128, 128, 128)
img_h, img_w = label.shape
pad_h = max(self.crop_size[0] - img_h, 0)
pad_w = max(self.crop_size[1] - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(255,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image.transpose((2, 0, 1))
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy()
| [
"noconoco.lib@gmail.com"
] | noconoco.lib@gmail.com |
6fa115f7117e7df092818d0f955e4f3afd3608ee | 42a48a3c2ea681e1330a77bb8723543177dcbc7e | /week 10/inClass8.py | 2d1f64c055fb127e5163a7db92c0c46437272378 | [] | no_license | tarasewiczregan/EM-224-Informatics-and-Software-Development | e1da2497180877340022c25bf95cdbd3b21c6bca | 2f3fa06e9bd45102c2f03502c5050bf059ba6637 | refs/heads/master | 2021-01-07T00:05:10.439756 | 2020-10-03T15:26:27 | 2020-10-03T15:26:27 | 241,520,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py | #Regan Tarasewicz
#"I pledge my honor that I have abided by the Stevens Honor System."
#In Class 8 - Due 04-09-2020
#stackoverflow showed me how to strip things using apply, and the Counter
import matplotlib.pyplot as plt
import pandas as pd
from nltk.tokenize import word_tokenize
import string
from collections import Counter
df = pd.read_csv('hoboken_tweets.csv')
file = open('stopwords_en.txt', 'r')
stopw = file.read()
file.close()
stops = word_tokenize(stopw)
df['text'] = df['text'].str.lower() #makes words lowercase in text column
df['text1'] = df['text'].apply(lambda x: ' '.join([w for w in str(x).split() if not w in stops]))
df['text'] = df['text1'].apply(lambda x: ' '.join([w for w in str(x).split() if w.isalpha()]))
topWords = Counter(' '.join(df['text']).split()).most_common(10) #finds most common words from dataframe
print('\nThe following words were tweeted the most, with counts shown:')
for a, b in topWords:
print(a, b)
print('\nThe following five screen names tweeted the most, with number of tweets shown:')
print(df['screen_name'].value_counts().nlargest(10))
print('\nGraph for most common words:')
plt.bar(range(len(topWords)), [val[1] for val in topWords], align = 'center')
plt.xticks(range(len(topWords)), [val[0] for val in topWords])
plt.xticks(rotation = 70)
plt.show()
temp1 = df['screen_name'].value_counts().keys().tolist() #long way around getting top users into list
temp2 = df['screen_name'].value_counts().tolist()
topUsers = [[temp1[0], temp2[0]] , [temp1[1], temp2[1]] , [temp1[2], temp2[2]] , [temp1[3], temp2[3]] , [temp1[4], temp2[4]] , [temp1[5], temp2[5]] , [temp1[6], temp2[6]] , [temp1[7], temp2[7]] , [temp1[8], temp2[8]] , [temp1[9], temp2[9]]]
print('\nGraph for most common users:')
plt.bar(range(len(topUsers)), [val[1] for val in topUsers], align = 'center')
plt.xticks(range(len(topUsers)), [val[0] for val in topUsers])
plt.xticks(rotation = 70)
plt.show()
| [
"rtarasew@stevens.edu"
] | rtarasew@stevens.edu |
a99e3b4e2e14fbaf1cd838c3675c7067f8b11917 | 2d36cdee8ab997aef4dd15138f84fc03d99a12c3 | /codata.py | ec86df8c6521dc567a23ed0030aedd8f517d79b4 | [
"MIT"
] | permissive | vincentdavis/Colorado-Property-Data | 53524902cc9cea3958769303411edd27406338d5 | c2df135a152aa0a9392c5e0738ff22b2f9b9da3c | refs/heads/master | 2020-04-11T16:06:29.867479 | 2016-02-03T15:10:12 | 2016-02-03T15:10:12 | 48,894,032 | 0 | 1 | null | 2016-02-03T15:10:12 | 2016-01-02T00:40:37 | Python | UTF-8 | Python | false | false | 1,221 | py | from flask import Flask, render_template, g, request
from db import DB, Parcel, Account, LienAuction
#from flask_table import Table, Col
#DB.connect()
app = Flask(__name__)
@app.before_request
def before_request():
g.db = DB
g.db.connect()
@app.after_request
def after_request(response):
g.db.close()
return response
# # Declare your table
# class ItemTable(Table):
# name = Col('Name')
# description = Col('Description')
@app.route('/')
def index():
return "Hello World"
@app.route('/search')
def search():
search_query = request.args.get('q')
if search_query:
entries = LienAuction.select().where(LienAuction.Tax_Year == search_query)
#Parcel.get(Parcel.id == e.Parcel_ID).Parcel_ID
else:
entries = LienAuction.select().where(LienAuction.Tax_Year == 2013)
#sample = [(s.Winning_Bid, s.Face_Value) for s in LienAuction.select().where(LienAuction.Tax_Year == 2014)]
# entries = LienAuction.select().where(LienAuction.Tax_Year == 2013)
# entries = [1,2,3,4,5]
return render_template('accounts.html', entries=entries, Parcel=Parcel)
#return "Hello World, search"
if __name__ == '__main__':
#app.run()
app.run(debug=True)
| [
"vincent@vincentdavis.net"
] | vincent@vincentdavis.net |
dc4674f803794f7e51eeb77fef6368cc650bb9d5 | 8d783c8b9b054ef4bad484b476587eaca36465fd | /venv/lib/python3.6/site-packages/pip/req/req_install.py | 522735c7a758aeb1301acd3425219beaf69cb37d | [] | no_license | AndyHide/microblog | d7e2d0cc8022abda6e48b1c09643e01b2f3aeced | 7d78621530eb2403204815dacb1c63d28a2124b9 | refs/heads/master | 2022-12-23T22:21:55.405525 | 2019-12-11T17:46:44 | 2019-12-11T17:46:44 | 191,319,739 | 0 | 0 | null | 2022-12-08T05:16:04 | 2019-06-11T07:41:34 | Python | UTF-8 | Python | false | false | 46,537 | py | from __future__ import absolute_import
import logging
import os
import pip.wheel
import re
import shutil
import sys
import tempfile
import traceback
import warnings
import zipfile
from distutils import sysconfig
from distutils.util import change_root
from email.parser import FeedParser
from pip._vendor import pkg_resources, six
from pip._vendor.packaging import specifiers
from pip._vendor.packaging.markers import Marker
from pip._vendor.packaging.requirements import InvalidRequirement, Requirement
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.packaging.version import Version, parse as parse_version
from pip._vendor.six.moves import configparser
from pip.compat import native_str, get_stdlib, WINDOWS
from pip.download import is_url, url_to_path, path_to_url, is_archive_file
from pip.exceptions import (
InstallationError, UninstallationError,
)
from pip.locations import (
bin_py, running_under_virtualenv, PIP_DELETE_MARKER_FILENAME, bin_user,
)
from pip.req.req_uninstall import UninstallPathSet
from pip.utils import (
display_path, rmtree, ask_path_exists, backup_dir, is_installable_dir,
dist_in_usersite, dist_in_site_packages, egg_link_path,
call_subprocess, read_text_file, FakeFile, _make_build_dir, ensure_dir,
get_installed_version, normalize_path, dist_is_local,
)
from pip.utils.deprecation import RemovedInPip10Warning
from pip.utils.hashes import Hashes
from pip.utils.logging import indent_log
from pip.utils.setuptools_build import SETUPTOOLS_SHIM
from pip.utils.ui import open_spinner
from pip.vcs import vcs
from pip.wheel import move_wheel_files, Wheel
logger = logging.getLogger(__name__)
operators = specifiers.Specifier._operators.keys()
def _strip_extras(path):
m = re.match(r'^(.+)(\[[^\]]+\])$', path)
extras = None
if m:
path_no_extras = m.group(1)
extras = m.group(2)
else:
path_no_extras = path
return path_no_extras, extras
def _safe_extras(extras):
return set(pkg_resources.safe_extra(extra) for extra in extras)
class InstallRequirement(object):
def __init__(self, req, comes_from, source_dir=None, editable=False,
link=None, as_egg=False, update=True,
pycompile=True, markers=None, isolated=False, options=None,
wheel_cache=None, constraint=False):
self.extras = ()
if isinstance(req, six.string_types):
try:
req = Requirement(req)
except InvalidRequirement:
if os.path.sep in req:
add_msg = "It looks like a path. Does it exist ?"
elif '=' in req and not any(op in req for op in operators):
add_msg = "= is not a valid operator. Did you mean == ?"
else:
add_msg = traceback.format_exc()
raise InstallationError(
"Invalid requirement: '%s'\n%s" % (req, add_msg))
self.extras = _safe_extras(req.extras)
self.req = req
self.comes_from = comes_from
self.constraint = constraint
self.source_dir = source_dir
self.editable = editable
self._wheel_cache = wheel_cache
self.link = self.original_link = link
self.as_egg = as_egg
if markers is not None:
self.markers = markers
else:
self.markers = req and req.marker
self._egg_info_path = None
# This holds the pkg_resources.Distribution object if this requirement
# is already available:
self.satisfied_by = None
# This hold the pkg_resources.Distribution object if this requirement
# conflicts with another installed distribution:
self.conflicts_with = None
# Temporary build location
self._temp_build_dir = None
# Used to store the global directory where the _temp_build_dir should
# have been created. Cf _correct_build_location method.
self._ideal_build_dir = None
# True if the editable should be updated:
self.update = update
# Set to True after successful installation
self.install_succeeded = None
# UninstallPathSet of uninstalled distribution (for possible rollback)
self.uninstalled = None
# Set True if a legitimate do-nothing-on-uninstall has happened - e.g.
# system site packages, stdlib packages.
self.nothing_to_uninstall = False
self.use_user_site = False
self.target_dir = None
self.options = options if options else {}
self.pycompile = pycompile
# Set to True after successful preparation of this requirement
self.prepared = False
self.isolated = isolated
@classmethod
def from_editable(cls, editable_req, comes_from=None, default_vcs=None,
isolated=False, options=None, wheel_cache=None,
constraint=False):
from pip.index import Link
name, url, extras_override = parse_editable(
editable_req, default_vcs)
if url.startswith('file:'):
source_dir = url_to_path(url)
else:
source_dir = None
res = cls(name, comes_from, source_dir=source_dir,
editable=True,
link=Link(url),
constraint=constraint,
isolated=isolated,
options=options if options else {},
wheel_cache=wheel_cache)
if extras_override is not None:
res.extras = _safe_extras(extras_override)
return res
@classmethod
def from_line(
cls, name, comes_from=None, isolated=False, options=None,
wheel_cache=None, constraint=False):
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
"""
from pip.index import Link
if is_url(name):
marker_sep = '; '
else:
marker_sep = ';'
if marker_sep in name:
name, markers = name.split(marker_sep, 1)
markers = markers.strip()
if not markers:
markers = None
else:
markers = Marker(markers)
else:
markers = None
name = name.strip()
req = None
path = os.path.normpath(os.path.abspath(name))
link = None
extras = None
if is_url(name):
link = Link(name)
else:
p, extras = _strip_extras(path)
if (os.path.isdir(p) and
(os.path.sep in name or name.startswith('.'))):
if not is_installable_dir(p):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' "
"not found." % name
)
link = Link(path_to_url(p))
elif is_archive_file(p):
if not os.path.isfile(p):
logger.warning(
'Requirement %r looks like a filename, but the '
'file does not exist',
name
)
link = Link(path_to_url(p))
# it's a local file, dir, or url
if link:
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', link.url):
link = Link(
path_to_url(os.path.normpath(os.path.abspath(link.path))))
# wheel file
if link.is_wheel:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
req = "%s==%s" % (wheel.name, wheel.version)
else:
# set the req to the egg fragment. when it's not there, this
# will become an 'unnamed' requirement
req = link.egg_fragment
# a requirement specifier
else:
req = name
options = options if options else {}
res = cls(req, comes_from, link=link, markers=markers,
isolated=isolated, options=options,
wheel_cache=wheel_cache, constraint=constraint)
if extras:
res.extras = _safe_extras(
Requirement('placeholder' + extras).extras)
return res
def __str__(self):
if self.req:
s = str(self.req)
if self.link:
s += ' from %s' % self.link.url
else:
s = self.link.url if self.link else None
if self.satisfied_by is not None:
s += ' in %s' % display_path(self.satisfied_by.location)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += ' (from %s)' % comes_from
return s
def __repr__(self):
return '<%s object: %s editable=%r>' % (
self.__class__.__name__, str(self), self.editable)
def populate_link(self, finder, upgrade, require_hashes):
"""Ensure that if a link can be found for this, that it is found.
Note that self.link may still be None - if Upgrade is False and the
requirement is already installed.
If require_hashes is True, don't use the wheel cache, because cached
wheels, always built locally, have different hashes than the files
downloaded from the index server and thus throw false hash mismatches.
Furthermore, cached wheels at present have undeterministic contents due
to file modification times.
"""
if self.link is None:
self.link = finder.find_requirement(self, upgrade)
if self._wheel_cache is not None and not require_hashes:
old_link = self.link
self.link = self._wheel_cache.cached_wheel(self.link, self.name)
if old_link != self.link:
logger.debug('Using cached wheel link: %s', self.link)
@property
def specifier(self):
return self.req.specifier
@property
def is_pinned(self):
"""Return whether I am pinned to an exact version.
For example, some-package==1.2 is pinned; some-package>1.2 is not.
"""
specifiers = self.specifier
return (len(specifiers) == 1 and
next(iter(specifiers)).operator in ('==', '==='))
def from_path(self):
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += '->' + comes_from
return s
def build_location(self, build_dir):
if self._temp_build_dir is not None:
return self._temp_build_dir
if self.req is None:
# for requirement via a path to a directory: the name of the
# package is not available yet so we create a temp directory
# Once run_egg_info will have run, we'll be able
# to fix it via _correct_build_location
# Some systems have /tmp as a symlink which confuses custom
# builds (such as numpy). Thus, we ensure that the real path
# is returned.
self._temp_build_dir = os.path.realpath(
tempfile.mkdtemp('-build', 'pip-')
)
self._ideal_build_dir = build_dir
return self._temp_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
# FIXME: Is there a better place to create the build_dir? (hg and bzr
# need this)
if not os.path.exists(build_dir):
logger.debug('Creating directory %s', build_dir)
_make_build_dir(build_dir)
return os.path.join(build_dir, name)
def _correct_build_location(self):
"""Move self._temp_build_dir to self._ideal_build_dir/self.req.name
For some requirements (e.g. a path to a directory), the name of the
package is not available until we run egg_info, so the build_location
will return a temporary directory and store the _ideal_build_dir.
This is only called by self.egg_info_path to fix the temporary build
directory.
"""
if self.source_dir is not None:
return
assert self.req is not None
assert self._temp_build_dir
assert self._ideal_build_dir
old_location = self._temp_build_dir
self._temp_build_dir = None
new_location = self.build_location(self._ideal_build_dir)
if os.path.exists(new_location):
raise InstallationError(
'A package already exists in %s; please remove it to continue'
% display_path(new_location))
logger.debug(
'Moving package %s from %s to new location %s',
self, display_path(old_location), display_path(new_location),
)
shutil.move(old_location, new_location)
self._temp_build_dir = new_location
self._ideal_build_dir = None
self.source_dir = new_location
self._egg_info_path = None
@property
def name(self):
if self.req is None:
return None
return native_str(pkg_resources.safe_name(self.req.name))
@property
def setup_py_dir(self):
return os.path.join(
self.source_dir,
self.link and self.link.subdirectory_fragment or '')
@property
def setup_py(self):
assert self.source_dir, "No source dir for %s" % self
try:
import setuptools # noqa
except ImportError:
if get_installed_version('setuptools') is None:
add_msg = "Please install setuptools."
else:
add_msg = traceback.format_exc()
# Setuptools is not available
raise InstallationError(
"Could not import setuptools which is required to "
"install from a source distribution.\n%s" % add_msg
)
setup_py = os.path.join(self.setup_py_dir, 'setup.py')
# Python2 __file__ should not be unicode
if six.PY2 and isinstance(setup_py, six.text_type):
setup_py = setup_py.encode(sys.getfilesystemencoding())
return setup_py
def run_egg_info(self):
assert self.source_dir
if self.name:
logger.debug(
'Running setup.py (path:%s) egg_info for package %s',
self.setup_py, self.name,
)
else:
logger.debug(
'Running setup.py (path:%s) egg_info for package from %s',
self.setup_py, self.link,
)
with indent_log():
script = SETUPTOOLS_SHIM % self.setup_py
base_cmd = [sys.executable, '-c', script]
if self.isolated:
base_cmd += ["--no-user-cfg"]
egg_info_cmd = base_cmd + ['egg_info']
# We can't put the .egg-info files at the root, because then the
# source code will be mistaken for an installed egg, causing
# problems
if self.editable:
egg_base_option = []
else:
egg_info_dir = os.path.join(self.setup_py_dir, 'pip-egg-info')
ensure_dir(egg_info_dir)
egg_base_option = ['--egg-base', 'pip-egg-info']
call_subprocess(
egg_info_cmd + egg_base_option,
cwd=self.setup_py_dir,
show_stdout=False,
command_desc='python setup.py egg_info')
if not self.req:
if isinstance(parse_version(self.pkg_info()["Version"]), Version):
op = "=="
else:
op = "==="
self.req = Requirement(
"".join([
self.pkg_info()["Name"],
op,
self.pkg_info()["Version"],
])
)
self._correct_build_location()
else:
metadata_name = canonicalize_name(self.pkg_info()["Name"])
if canonicalize_name(self.req.name) != metadata_name:
logger.warning(
'Running setup.py (path:%s) egg_info for package %s '
'produced metadata for project name %s. Fix your '
'#egg=%s fragments.',
self.setup_py, self.name, metadata_name, self.name
)
self.req = Requirement(metadata_name)
def egg_info_data(self, filename):
if self.satisfied_by is not None:
if not self.satisfied_by.has_metadata(filename):
return None
return self.satisfied_by.get_metadata(filename)
assert self.source_dir
filename = self.egg_info_path(filename)
if not os.path.exists(filename):
return None
data = read_text_file(filename)
return data
def egg_info_path(self, filename):
if self._egg_info_path is None:
if self.editable:
base = self.source_dir
else:
base = os.path.join(self.setup_py_dir, 'pip-egg-info')
filenames = os.listdir(base)
if self.editable:
filenames = []
for root, dirs, files in os.walk(base):
for dir in vcs.dirnames:
if dir in dirs:
dirs.remove(dir)
# Iterate over a copy of ``dirs``, since mutating
# a list while iterating over it can cause trouble.
# (See https://github.com/pypa/pip/pull/462.)
for dir in list(dirs):
# Don't search in anything that looks like a virtualenv
# environment
if (
os.path.lexists(
os.path.join(root, dir, 'bin', 'python')
) or
os.path.exists(
os.path.join(
root, dir, 'Scripts', 'Python.exe'
)
)):
dirs.remove(dir)
# Also don't search through tests
elif dir == 'test' or dir == 'tests':
dirs.remove(dir)
filenames.extend([os.path.join(root, dir)
for dir in dirs])
filenames = [f for f in filenames if f.endswith('.egg-info')]
if not filenames:
raise InstallationError(
'No files/directories in %s (from %s)' % (base, filename)
)
assert filenames, \
"No files/directories in %s (from %s)" % (base, filename)
# if we have more than one match, we pick the toplevel one. This
# can easily be the case if there is a dist folder which contains
# an extracted tarball for testing purposes.
if len(filenames) > 1:
filenames.sort(
key=lambda x: x.count(os.path.sep) +
(os.path.altsep and x.count(os.path.altsep) or 0)
)
self._egg_info_path = os.path.join(base, filenames[0])
return os.path.join(self._egg_info_path, filename)
def pkg_info(self):
p = FeedParser()
data = self.egg_info_data('PKG-INFO')
if not data:
logger.warning(
'No PKG-INFO file found in %s',
display_path(self.egg_info_path('PKG-INFO')),
)
p.feed(data or '')
return p.close()
_requirements_section_re = re.compile(r'\[(.*?)\]')
@property
def installed_version(self):
return get_installed_version(self.name)
def assert_source_matches_version(self):
assert self.source_dir
version = self.pkg_info()['version']
if self.req.specifier and version not in self.req.specifier:
logger.warning(
'Requested %s, but installing version %s',
self,
self.installed_version,
)
else:
logger.debug(
'Source in %s has version %s, which satisfies requirement %s',
display_path(self.source_dir),
version,
self,
)
def update_editable(self, obtain=True):
if not self.link:
logger.debug(
"Cannot update repository at %s; repository location is "
"unknown",
self.source_dir,
)
return
assert self.editable
assert self.source_dir
if self.link.scheme == 'file':
# Static paths don't get updated
return
assert '+' in self.link.url, "bad url: %r" % self.link.url
if not self.update:
return
vc_type, url = self.link.url.split('+', 1)
backend = vcs.get_backend(vc_type)
if backend:
vcs_backend = backend(self.link.url)
if obtain:
vcs_backend.obtain(self.source_dir)
else:
vcs_backend.export(self.source_dir)
else:
assert 0, (
'Unexpected version control type (in %s): %s'
% (self.link, vc_type))
def uninstall(self, auto_confirm=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists():
raise UninstallationError(
"Cannot uninstall requirement %s, not installed" % (self.name,)
)
dist = self.satisfied_by or self.conflicts_with
dist_path = normalize_path(dist.location)
if not dist_is_local(dist):
logger.info(
"Not uninstalling %s at %s, outside environment %s",
dist.key,
dist_path,
sys.prefix,
)
self.nothing_to_uninstall = True
return
if dist_path in get_stdlib():
logger.info(
"Not uninstalling %s at %s, as it is in the standard library.",
dist.key,
dist_path,
)
self.nothing_to_uninstall = True
return
paths_to_remove = UninstallPathSet(dist)
develop_egg_link = egg_link_path(dist)
develop_egg_link_egg_info = '{0}.egg-info'.format(
pkg_resources.to_filename(dist.project_name))
egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info)
# Special case for distutils installed package
distutils_egg_info = getattr(dist._provider, 'path', None)
# Uninstall cases order do matter as in the case of 2 installs of the
# same package, pip needs to uninstall the currently detected version
if (egg_info_exists and dist.egg_info.endswith('.egg-info') and
not dist.egg_info.endswith(develop_egg_link_egg_info)):
# if dist.egg_info.endswith(develop_egg_link_egg_info), we
# are in fact in the develop_egg_link case
paths_to_remove.add(dist.egg_info)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata(
'installed-files.txt').splitlines():
path = os.path.normpath(
os.path.join(dist.egg_info, installed_file)
)
paths_to_remove.add(path)
# FIXME: need a test for this elif block
# occurs with --single-version-externally-managed/--record outside
# of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [
p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
paths_to_remove.add(path + '.pyo')
elif distutils_egg_info:
warnings.warn(
"Uninstalling a distutils installed project ({0}) has been "
"deprecated and will be removed in a future version. This is "
"due to the fact that uninstalling a distutils project will "
"only partially uninstall the project.".format(self.name),
RemovedInPip10Warning,
)
paths_to_remove.add(distutils_egg_info)
elif dist.location.endswith('.egg'):
# package installed by easy_install
# We cannot match on dist.egg_name because it can slightly vary
# i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg
paths_to_remove.add(dist.location)
easy_install_egg = os.path.split(dist.location)[1]
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif egg_info_exists and dist.egg_info.endswith('.dist-info'):
for path in pip.wheel.uninstallation_paths(dist):
paths_to_remove.add(path)
elif develop_egg_link:
# develop egg
with open(develop_egg_link, 'r') as fh:
link_pointer = os.path.normcase(fh.readline().strip())
assert (link_pointer == dist.location), (
'Egg-link %s does not match installed location of %s '
'(at %s)' % (link_pointer, self.name, dist.location)
)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
else:
logger.debug(
'Not sure how to uninstall: %s - Check: %s',
dist, dist.location)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, script))
if WINDOWS:
paths_to_remove.add(os.path.join(bin_dir, script) + '.bat')
# find console_scripts
if dist.has_metadata('entry_points.txt'):
if six.PY2:
options = {}
else:
options = {"delimiters": ('=',)}
config = configparser.SafeConfigParser(**options)
config.readfp(
FakeFile(dist.get_metadata_lines('entry_points.txt'))
)
if config.has_section('console_scripts'):
for name, value in config.items('console_scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, name))
if WINDOWS:
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe.manifest'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '-script.py'
)
paths_to_remove.remove(auto_confirm)
self.uninstalled = paths_to_remove
def rollback_uninstall(self):
if self.uninstalled:
self.uninstalled.rollback()
else:
logger.error(
"Can't rollback %s, nothing uninstalled.", self.name,
)
def commit_uninstall(self):
if self.uninstalled:
self.uninstalled.commit()
elif not self.nothing_to_uninstall:
logger.error(
"Can't commit %s, nothing uninstalled.", self.name,
)
def archive(self, build_dir):
assert self.source_dir
create_archive = True
archive_name = '%s-%s.zip' % (self.name, self.pkg_info()["version"])
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)bort ' %
display_path(archive_path), ('i', 'w', 'b', 'a'))
if response == 'i':
create_archive = False
elif response == 'w':
logger.warning('Deleting %s', display_path(archive_path))
os.remove(archive_path)
elif response == 'b':
dest_file = backup_dir(archive_path)
logger.warning(
'Backing up %s to %s',
display_path(archive_path),
display_path(dest_file),
)
shutil.move(archive_path, dest_file)
elif response == 'a':
sys.exit(-1)
if create_archive:
zip = zipfile.ZipFile(
archive_path, 'w', zipfile.ZIP_DEFLATED,
allowZip64=True
)
dir = os.path.normcase(os.path.abspath(self.setup_py_dir))
for dirpath, dirnames, filenames in os.walk(dir):
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zipdir = zipfile.ZipInfo(self.name + '/' + name + '/')
zipdir.external_attr = 0x1ED << 16 # 0o755
zip.writestr(zipdir, '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, self.name + '/' + name)
zip.close()
logger.info('Saved %s', display_path(archive_path))
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix + os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix)
)
name = name[len(prefix) + 1:]
name = name.replace(os.path.sep, '/')
return name
def match_markers(self, extras_requested=None):
if not extras_requested:
# Provide an extra to safely evaluate the markers
# without matching any extra
extras_requested = ('',)
if self.markers is not None:
return any(
self.markers.evaluate({'extra': extra})
for extra in extras_requested)
else:
return True
def install(self, install_options, global_options=[], root=None,
prefix=None):
if self.editable:
self.install_editable(
install_options, global_options, prefix=prefix)
return
if self.is_wheel:
version = pip.wheel.wheel_version(self.source_dir)
pip.wheel.check_compatibility(version, self.name)
self.move_wheel_files(self.source_dir, root=root, prefix=prefix)
self.install_succeeded = True
return
# Extend the list of global and install options passed on to
# the setup.py call with the ones from the requirements file.
# Options specified in requirements file override those
# specified on the command line, since the last option given
# to setup.py is the one that is used.
global_options += self.options.get('global_options', [])
install_options += self.options.get('install_options', [])
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
temp_location = tempfile.mkdtemp('-record', 'pip-')
record_filename = os.path.join(temp_location, 'install-record.txt')
try:
install_args = self.get_install_args(
global_options, record_filename, root, prefix)
msg = 'Running setup.py install for %s' % (self.name,)
with open_spinner(msg) as spinner:
with indent_log():
call_subprocess(
install_args + install_options,
cwd=self.setup_py_dir,
show_stdout=False,
spinner=spinner,
)
if not os.path.exists(record_filename):
logger.debug('Record file %s not found', record_filename)
return
self.install_succeeded = True
if self.as_egg:
# there's no --always-unzip option we can pass to install
# command so we unable to save the installed-files.txt
return
def prepend_root(path):
if root is None or not os.path.isabs(path):
return path
else:
return change_root(root, path)
with open(record_filename) as f:
for line in f:
directory = os.path.dirname(line)
if directory.endswith('.egg-info'):
egg_info_dir = prepend_root(directory)
break
else:
logger.warning(
'Could not find .egg-info directory in install record'
' for %s',
self,
)
# FIXME: put the record somewhere
# FIXME: should this be an error?
return
new_lines = []
with open(record_filename) as f:
for line in f:
filename = line.strip()
if os.path.isdir(filename):
filename += os.path.sep
new_lines.append(
os.path.relpath(
prepend_root(filename), egg_info_dir)
)
inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt')
with open(inst_files_path, 'w') as f:
f.write('\n'.join(new_lines) + '\n')
finally:
if os.path.exists(record_filename):
os.remove(record_filename)
rmtree(temp_location)
def ensure_has_source_dir(self, parent_dir):
"""Ensure that a source_dir is set.
This will create a temporary build dir if the name of the requirement
isn't known yet.
:param parent_dir: The ideal pip parent_dir for the source_dir.
Generally src_dir for editables and build_dir for sdists.
:return: self.source_dir
"""
if self.source_dir is None:
self.source_dir = self.build_location(parent_dir)
return self.source_dir
def get_install_args(self, global_options, record_filename, root, prefix):
install_args = [sys.executable, "-u"]
install_args.append('-c')
install_args.append(SETUPTOOLS_SHIM % self.setup_py)
install_args += list(global_options) + \
['install', '--record', record_filename]
if not self.as_egg:
install_args += ['--single-version-externally-managed']
if root is not None:
install_args += ['--root', root]
if prefix is not None:
install_args += ['--prefix', prefix]
if self.pycompile:
install_args += ["--compile"]
else:
install_args += ["--no-compile"]
if running_under_virtualenv():
py_ver_str = 'python' + sysconfig.get_python_version()
install_args += ['--install-headers',
os.path.join(sys.prefix, 'include', 'site',
py_ver_str, self.name)]
return install_args
def remove_temporary_source(self):
"""Remove the source files from this requirement, if they are marked
for deletion"""
if self.source_dir and os.path.exists(
os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)):
logger.debug('Removing source in %s', self.source_dir)
rmtree(self.source_dir)
self.source_dir = None
if self._temp_build_dir and os.path.exists(self._temp_build_dir):
rmtree(self._temp_build_dir)
self._temp_build_dir = None
def install_editable(self, install_options,
global_options=(), prefix=None):
logger.info('Running setup.py develop for %s', self.name)
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
if prefix:
prefix_param = ['--prefix={0}'.format(prefix)]
install_options = list(install_options) + prefix_param
with indent_log():
# FIXME: should we do --install-headers here too?
call_subprocess(
[
sys.executable,
'-c',
SETUPTOOLS_SHIM % self.setup_py
] +
list(global_options) +
['develop', '--no-deps'] +
list(install_options),
cwd=self.setup_py_dir,
show_stdout=False)
self.install_succeeded = True
def check_if_exists(self):
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.conflicts_with appropriately.
"""
if self.req is None:
return False
try:
# get_distribution() will resolve the entire list of requirements
# anyway, and we've already determined that we need the requirement
# in question, so strip the marker so that we don't try to
# evaluate it.
no_marker = Requirement(str(self.req))
no_marker.marker = None
self.satisfied_by = pkg_resources.get_distribution(str(no_marker))
if self.editable and self.satisfied_by:
self.conflicts_with = self.satisfied_by
# when installing editables, nothing pre-existing should ever
# satisfy
self.satisfied_by = None
return True
except pkg_resources.DistributionNotFound:
return False
except pkg_resources.VersionConflict:
existing_dist = pkg_resources.get_distribution(
self.req.name
)
if self.use_user_site:
if dist_in_usersite(existing_dist):
self.conflicts_with = existing_dist
elif (running_under_virtualenv() and
dist_in_site_packages(existing_dist)):
raise InstallationError(
"Will not install to the user site because it will "
"lack sys.path precedence to %s in %s" %
(existing_dist.project_name, existing_dist.location)
)
else:
self.conflicts_with = existing_dist
return True
@property
def is_wheel(self):
return self.link and self.link.is_wheel
def move_wheel_files(self, wheeldir, root=None, prefix=None):
move_wheel_files(
self.name, self.req, wheeldir,
user=self.use_user_site,
home=self.target_dir,
root=root,
prefix=prefix,
pycompile=self.pycompile,
isolated=self.isolated,
)
def get_dist(self):
"""Return a pkg_resources.Distribution built from self.egg_info_path"""
egg_info = self.egg_info_path('').rstrip('/')
base_dir = os.path.dirname(egg_info)
metadata = pkg_resources.PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
return pkg_resources.Distribution(
os.path.dirname(egg_info),
project_name=dist_name,
metadata=metadata)
@property
def has_hash_options(self):
"""Return whether any known-good hashes are specified as options.
These activate --require-hashes mode; hashes specified as part of a
URL do not.
"""
return bool(self.options.get('hashes', {}))
def hashes(self, trust_internet=True):
"""Return a hash-comparer that considers my option- and URL-based
hashes to be known-good.
Hashes in URLs--ones embedded in the requirements file, not ones
downloaded from an index server--are almost peers with ones from
flags. They satisfy --require-hashes (whether it was implicitly or
explicitly activated) but do not activate it. md5 and sha224 are not
allowed in flags, which should nudge people toward good algos. We
always OR all hashes together, even ones from URLs.
:param trust_internet: Whether to trust URL-based (#md5=...) hashes
downloaded from the internet, as by populate_link()
"""
good_hashes = self.options.get('hashes', {}).copy()
link = self.link if trust_internet else self.original_link
if link and link.hash:
good_hashes.setdefault(link.hash_name, []).append(link.hash)
return Hashes(good_hashes)
def _strip_postfix(req):
"""
Strip req postfix ( -dev, 0.2, etc )
"""
# FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req)
if match:
# Strip off -dev, -0.2, etc.
req = match.group(1)
return req
def parse_editable(editable_req, default_vcs=None):
"""Parses an editable requirement into:
- a requirement name
- an URL
- extras
- editable options
Accepted requirements:
svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
.[some_extra]
"""
from pip.index import Link
url = editable_req
extras = None
# If a file path is specified with extras, strip off the extras.
m = re.match(r'^(.+)(\[[^\]]+\])$', url)
if m:
url_no_extras = m.group(1)
extras = m.group(2)
else:
url_no_extras = url
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' not found." %
url_no_extras
)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
package_name = Link(url_no_extras).egg_fragment
if extras:
return (
package_name,
url_no_extras,
Requirement("placeholder" + extras.lower()).extras,
)
else:
return package_name, url_no_extras, None
for version_control in vcs:
if url.lower().startswith('%s:' % version_control):
url = '%s+%s' % (version_control, url)
break
if '+' not in url:
if default_vcs:
warnings.warn(
"--default-vcs has been deprecated and will be removed in "
"the future.",
RemovedInPip10Warning,
)
url = default_vcs + '+' + url
else:
raise InstallationError(
'%s should either be a path to a local project or a VCS url '
'beginning with svn+, git+, hg+, or bzr+' %
editable_req
)
vc_type = url.split('+', 1)[0].lower()
if not vcs.get_backend(vc_type):
error_message = 'For --editable=%s only ' % editable_req + \
', '.join([backend.name + '+URL' for backend in vcs.backends]) + \
' is currently supported'
raise InstallationError(error_message)
package_name = Link(url).egg_fragment
if not package_name:
raise InstallationError(
"Could not detect requirement name, please specify one with #egg="
)
if not package_name:
raise InstallationError(
'--editable=%s is not the right format; it must have '
'#egg=Package' % editable_req
)
return _strip_postfix(package_name), url, None
| [
"andyhidesds@gmail.com"
] | andyhidesds@gmail.com |
3a5c4082a2528983782135ceb2a79d92981409e9 | 369e7b1d96ae70a6aea75cdce577ce6091a95672 | /MessageSubscriber.py | 98a80005f2ef1b80ba2fbb74d7b34b7f61f5d979 | [] | no_license | vladmosin/RabbitMQChat | 5ca756ee1d917db0b8923a2f9160ac98a9f51fa3 | 9814b3f6a318af2137053cf516d5e10b0424a458 | refs/heads/master | 2022-04-24T22:58:58.117423 | 2020-04-30T12:15:14 | 2020-04-30T12:15:14 | 259,902,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | from abc import abstractmethod
class MessageSubscriber:
@abstractmethod
def receive_message(self, text, channel):
pass
| [
"surkovmax007@mail.ru"
] | surkovmax007@mail.ru |
9935830816782ca4bbe14f5537a51ca72ff16bc6 | b109001ec3ca8aa4b2cfc4d4520d8644c58ad5e0 | /navigation/Mappers.py | e6b134df0a24b3ea97c7ed69c07d70c972f65cf3 | [] | no_license | Chandanpanda/navigation-benchmark | b3e25e3672150413299a3d2566ad601156317acf | d83431d6648ac1147f53056ed32ce2caae4f702d | refs/heads/master | 2021-10-24T04:42:56.436909 | 2019-01-31T12:43:48 | 2019-01-31T12:43:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,626 | py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import ceil,floor
import math
from .Reprojection import getMapSizeInCells, project2dPClIntoWorldMap, ReprojectLocal2Global
def DepthToLocal3D(depth, fx, fy, cx, cy):
r"""Projects depth map to 3d point cloud
with origin in the camera focus
"""
device = depth.device
h,w = depth.squeeze().size()
npts = h*w
x = torch.linspace(0, w-1, w).to(device)
y = torch.linspace(0, h-1, h).to(device)
xv, yv = torch.meshgrid([x, y])
dfl = depth.t().flatten()
return torch.cat([(dfl *(xv.flatten() - cx) / fx).unsqueeze(-1), #x
(dfl *(yv.flatten() - cy) / fy).unsqueeze(-1), #y
dfl.unsqueeze(-1)], dim = 1) #z
def pointCloud2ObstaclesNonDifferentiable(pts3D,
map_size = 40,
cell_size = 0.2):
r"""Counts number of 3d points in 2d map cell
height is sum-pooled.
"""
device = pts3D.device
map_size_in_cells = getMapSizeInCells(map_size,cell_size) - 1
init_map = torch.zeros((map_size_in_cells,map_size_in_cells), device = device)
if len(pts3D) <= 1:
return init_map
num_pts,dim = pts3D.size()
pts2D = torch.cat([pts3D[:,2:3],pts3D[:,0:1]], dim = 1)
data_idxs = torch.round(project2dPClIntoWorldMap(pts2D, map_size, cell_size))
if len(data_idxs) > 10:
u, counts = np.unique(data_idxs.detach().cpu().numpy(), axis=0, return_counts = True)
init_map[u[:,0],u[:,1] ] = torch.from_numpy(counts).to(dtype=torch.float32, device=device)
return init_map
class DirectDepthMapper(nn.Module):
r"""Estimates obstacle map given the depth image
ToDo: replace numpy histogram counting with differentiable
pytorch soft count like in
https://papers.nips.cc/paper/7545-unsupervised-learning-of-shape-and-pose-with-differentiable-point-clouds.pdf
"""
def __init__(self,
#fx = 0,
#fy = 0,
#cx = 0,
#cy = 0,
camera_height = 0,
near_th = 0.1, far_th = 4.0, h_min = 0.0, h_max = 1.0,
map_size = 40, map_cell_size = 0.1,
device = torch.device('cpu'),
**kwargs):
super(DirectDepthMapper, self).__init__()
self.device = device
#self.fx = fx
#self.fy = fy
#self.cx = cx
#self.cy = cy
self.near_th = near_th
self.far_th = far_th
self.h_min_th = h_min
self.h_max_th = h_max
self.camera_height = camera_height
self.map_size_meters = map_size
self.map_cell_size = map_cell_size
return
def forward(self, depth, pose = torch.eye(4).float()):
self.device = depth.device
#Works for FOV = 45 degrees in minos/sensors.yml. Should be adjusted, if FOV changed
self.fx = float(depth.size(1))# / 2.0
self.fy = float(depth.size(0))# / 2.0
self.cx = int(self.fx)//2 - 1
self.cy = int(self.fy)//2 - 1
pose = pose.to(self.device)
local_3d_pcl = DepthToLocal3D(depth, self.fx, self.fy, self.cx, self.cy)
idxs = (torch.abs(local_3d_pcl[:,2]) < self.far_th) * (torch.abs(local_3d_pcl[:,2]) >= self.near_th)
survived_points = local_3d_pcl[idxs]
if len(survived_points) < 20:
map_size_in_cells = getMapSizeInCells(self.map_size_meters,self.map_cell_size) - 1
init_map = torch.zeros((map_size_in_cells,map_size_in_cells), device = self.device)
return init_map
global_3d_pcl = ReprojectLocal2Global(survived_points, pose)[:,:3]
#Because originally y looks down and from agent camera height
global_3d_pcl[:,1] = -global_3d_pcl[:,1] + self.camera_height
idxs = (global_3d_pcl[:,1] > self.h_min_th) * (global_3d_pcl[:,1] < self.h_max_th)
global_3d_pcl = global_3d_pcl[idxs]
obstacle_map = pointCloud2ObstaclesNonDifferentiable(
global_3d_pcl,
self.map_size_meters,
self.map_cell_size)
return obstacle_map
class SparseDepthMapper(nn.Module):
r"""Estimates obstacle map given the 3d points from ORBSLAM
Does not work well.
"""
def __init__(self,
fx = 0,
fy = 0,
cx = 0,
cy = 0,
camera_height = 0,
near_th = 0.1, far_th = 4.0, h_min = 0.0, h_max = 1.0,
map_size = 40, map_cell_size = 0.1,
device = torch.device('cpu'),
**kwargs):
super(SparseDepthMapper, self).__init__()
self.device = device
self.fx = fx
self.fy = fy
self.cx = cx
self.cy = cy
self.near_th = near_th
self.far_th = far_th
self.h_min_th = h_min
self.h_max_th = h_max
self.camera_height = camera_height
self.map_size_meters = map_size
self.map_cell_size = map_cell_size
return
def forward(self, sparse_depth, pose = torch.eye(4).float()):
global_3d_pcl = sparse_depth
#Because originally y looks down and from agent camera height
global_3d_pcl[:,1] = -global_3d_pcl[:,1]# + self.camera_height
idxs = (global_3d_pcl[:,1] > self.h_min_th) * (global_3d_pcl[:,1] < self.h_max_th)
global_3d_pcl = global_3d_pcl[idxs]
obstacle_map = pointCloud2ObstaclesNonDifferentiable(
global_3d_pcl,
self.map_size_meters,
self.map_cell_size)
return obstacle_map | [
"ducha.aiki@gmail.com"
] | ducha.aiki@gmail.com |
9c6b3aec40fc686e3dfec87ca54b17cfe5471915 | 16378afe654be057bb039159eba01f93f3bfb19e | /gui.py | 36b3887bdf0a9ea319f61855611c8a2162be696c | [] | no_license | NataliyaDemyanenko/Goldenapp-10-02-2021 | 3d75af84dfc8ea3ec7dad81c8c6e1c3eabbeae71 | eede268331ab0ecde5025af8cd73abec434b0e39 | refs/heads/main | 2023-03-03T18:55:17.386850 | 2021-02-10T17:22:43 | 2021-02-10T17:22:43 | 337,798,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,255 | py | #!/usr/bin/env python3
import tkinter as tk
from tkinter import filedialog
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import asksaveasfilename
from tkinter import *
import pickle
from goldenapp3 import *
entries={}
entry={}
entry_var={}
errmsg = 'Error!'
def load_file():
FILENAME=askopenfilename()
try:
ent = pickle.load( open( FILENAME, "rb" ) )
except:
ent={}
for k in range(0,27):
for j in range(0,5):
try:
entries[k,j]=ent[k,j]
except:
entries[k,j]=''
def save_file():
FILENAME = asksaveasfilename()
for k in range(0,27):
for j in range(0,5):
entries[k,j]=entry_var[k,j].get()
pickle.dump(entries, open(FILENAME, "wb"))
pickle.dump(entries, open('save.p', "wb"))
def populate():
load_file()
for j in range(0,5):
entry_var[0,j] = tk.StringVar(root, entries[0,j])
entry[0,0] = tk.Entry(root, width=10, textvariable=entry_var[0,0]).grid(row=0,column=1)
entry[1,0] = tk.Entry(root, width=10, textvariable=entry_var[0,1]).grid(row=1,column=1)
for k in range(1,27):
for j in range(0,5):
entry_var[k,j] = tk.StringVar(root, entries[k,j])
entry[k,j] = tk.Entry(root, width=10, textvariable=entry_var[k,j]).grid(row=k+2,column=j+1)
# set the WM_CLASS
root = Tk(className="Goldenapp")
# set the window title
root.wm_title("GoldenApp Political Analytics Tool")
tk.Label(root, text="Country").grid(row=0)
tk.Label(root, text="Poll").grid(row=1)
tk.Label(root, text="Party Name").grid(row=2, column=1)
tk.Label(root, text="Party Label").grid(row=2, column=2)
tk.Label(root, text="Party Color").grid(row=2, column=3)
tk.Label(root, text="Seats Proportion").grid(row=2, column=4)
tk.Label(root, text="Distance").grid(row=2, column=5)
tk.Label(root, text="A").grid(row=3)
tk.Label(root, text="B").grid(row=4)
tk.Label(root, text="C").grid(row=5)
tk.Label(root, text="D").grid(row=6)
tk.Label(root, text="E").grid(row=7)
tk.Label(root, text="F").grid(row=8)
tk.Label(root, text="G").grid(row=9)
tk.Label(root, text="H").grid(row=10)
tk.Label(root, text="I").grid(row=11)
tk.Label(root, text="J").grid(row=12)
tk.Label(root, text="K").grid(row=13)
tk.Label(root, text="L").grid(row=14)
tk.Label(root, text="M").grid(row=15)
tk.Label(root, text="N").grid(row=16)
tk.Label(root, text="O").grid(row=17)
tk.Label(root, text="P").grid(row=18)
tk.Label(root, text="Q").grid(row=19)
tk.Label(root, text="R").grid(row=20)
tk.Label(root, text="S").grid(row=21)
tk.Label(root, text="T").grid(row=22)
tk.Label(root, text="U").grid(row=23)
tk.Label(root, text="V").grid(row=24)
tk.Label(root, text="W").grid(row=25)
tk.Label(root, text="X").grid(row=26)
tk.Label(root, text="Y").grid(row=27)
tk.Label(root, text="Z").grid(row=28)
populate()
entry[28] = tk.Button(root, text='Load', command= lambda:populate()).grid(row=30,column=0)
entry[29] = tk.Button(root, text='Save', command= lambda:save_file()).grid(row=30,column=1)
entry[30] = tk.Button(root, text='Run', command= lambda:goldenapp()).grid(row=30,column=2)
entry[31] = tk.Button(root, text='Quit', command= root.quit).grid(row=30,column=3)
root.mainloop()
| [
"noreply@github.com"
] | NataliyaDemyanenko.noreply@github.com |
f38fe04087f81b2c9fb2f420c7a6a4e634999f91 | 46595ed0e3943d30a149c3599f11f8e2e604f968 | /ver1_0/openassembly/pirate_messages/tasks.py | f7feb6c5633e4346afbd8bd79b8fd28b214d48ea | [] | no_license | W3SS/Open-Assembly | 7242f823c33849883168d1963c8dcb6901cb76c0 | e9679ff5e7ae9881fa5781d763288ed2f40b014d | refs/heads/master | 2021-05-08T15:34:32.943969 | 2012-12-11T20:37:32 | 2012-12-11T20:37:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | from celery.task import task
@task(ignore_results=True)
def set_to_read(notes):
for i in notes:
i.is_read = True
i.save()
| [
"fragro@fragro-computa.(none)"
] | fragro@fragro-computa.(none) |
d399b2d3a8ff12446dacbf96a4e46f7b8f5d2e92 | 52555a17cdb6058565696585c978c9012b0bfad7 | /examples/synthetic/park2_4/park2_4_mf.py | b8d64549b8c4f770d2f4fd70d7fcabdc1ba4bee4 | [
"MIT"
] | permissive | kirthevasank/dragonfly | 8685d6aff272bd262d9b47c455fc1f1dc77a42aa | 8e09d5ba602d14922455bf09bdd4ca0fa09ef3ee | refs/heads/master | 2020-05-02T00:38:35.252889 | 2019-05-17T03:40:23 | 2019-05-17T03:40:23 | 177,675,339 | 3 | 0 | MIT | 2019-05-06T04:07:41 | 2019-03-25T22:39:37 | Python | UTF-8 | Python | false | false | 489 | py | """
Parkd function with multi-fidelity.
-- kandasamy@cs.cmu.edu
"""
# pylint: disable=invalid-name
from park2_4 import park2_4_z
# Write a function like this called 'obj'.
def park2_4_mf(z, x):
""" Computes the Parkd function. """
return park2_4_z(z[0], x)
def objective(z, x):
""" Objective. """
return park2_4_mf(z, x)
def cost(z):
""" Cost function. """
return 0.05 + 0.95 * z[0]**1.5
def main(z, x):
""" main function. """
return park2_4_mf(z, x), cost(z)
| [
"kandasamy@cs.cmu.edu"
] | kandasamy@cs.cmu.edu |
1f5cf0f82724f2a88bf1b3f3fb976a08820ae79d | 0324b2869c0bb89ae294040a97d811e6bea938e9 | /code/Mail_to_multi_receiver/Csv2Email.py | 1b57da3914e05f3353db5eb4a33bf09a797d67c7 | [] | no_license | yohee2015/ibszjgsu.github.io | 240dc88f4353685705be998eb3e108c691b0c71d | 4b6dfc57149aa438794aa49aef21ffb2e94649ac | refs/heads/master | 2020-03-19T11:15:18.432254 | 2018-06-04T14:42:37 | 2018-06-04T14:42:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,024 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 30 23:15:52 2018
@author: NI He
"""
import smtplib
from email.mime.text import MIMEText
import os
import re
import mysql.connector
import time
import csv
def table_exist(tab_name):
cur.execute('show tables') # 罗列所有当前库里面的所有表格
tables = cur.fetchall()
selectab = re.compile(r'\w*\w*')
tabnames = selectab.findall(str(tables))
res = tab_name in tabnames
return res
#==============================================================================
#mail_host="smtp.163.com" #使用的邮箱的smtp服务器地址,这里是163的smtp地址
#mail_user="xinihe" #用户名
#mail_pass=input('Please enter the password of the sending mailbox:') #密码
#mail_postfix="163.com" #邮箱的后缀,网易就是163.com
#==============================================================================
mail_host="mail.zjgsu.edu.cn" #使用的邮箱的smtp服务器地址,这里是163的smtp地址
mail_user="recruit.ibs" #用户名
mail_pass="$Ibs11031103" #密码
mail_postfix="zjgsu.edu.cn" #邮箱的后缀,网易就是163.com
#==============================================================================
#
def send_mail(to_list,sub,content):
me="International Business School in Zhejiang Gongshang Univ "+"<"+mail_user+"@"+mail_postfix+">"
msg = MIMEText(content,_subtype='plain')
msg['Subject'] = sub
msg['From'] = me
msg['To'] = to_list #将收件人列表以‘;’分隔
try:
server = smtplib.SMTP()
server.connect(mail_host, 25) #连接服务器
server.login(mail_user,mail_pass) #登录操作
server.sendmail(me, to_list, msg.as_string())
server.close()
return True
except:
return False
#==============================================================================
f = open(os.getcwd()+'\\phdcontent.txt','r') # 读取正文内容
mailcontent = f.read()
f.close()
f = open(os.getcwd()+'\\sub.txt','r') # 读取邮件主题
mailsub = f.read()
f.close()
# Log
flog = open(os.getcwd() + '\\log.txt', 'a+') # 读取日志内容
flog.writelines('\n \n Date Updating Log on ' + time.strftime('%Y-%m-%d',time.localtime(time.time())) + '\n')
flog.writelines('Start from: '+ time.strftime('%H:%M:%S',time.localtime(time.time())) + '\n')
#
#==============================================================================
'''
Use information from Database and update another table
'''
conn = mysql.connector.connect(host="10.23.0.2",port=3306,user="root",\
password= '11031103',database="journalcontact",charset="utf8")
cur = conn.cursor()
if not table_exist('rec_email_univ'):
#build a new table named by the journal title
sql_new = "create table rec_email_univ (id int not null unique auto_increment, name varchar(100) Null,"
sql_new+="email varchar(100) Null,"
sql_new+="response int Null,"
sql_new+="country varchar(500) Null,"
sql_new+="university varchar(500) Null,"
sql_new+="major varchar(1000) Null,"
sql_new+="year varchar(100) Null,"
sql_new+="attempt varchar(1000) Null,"
sql_new+="primary key(id))"
cur.execute(sql_new)
conn.commit()
#==============================================================================
'''
Load information from CSV files
'''
mailto_list = []
rec_name = []
rec_univ = []
rec_major = []
f = csv.reader(open(os.getcwd()+'\\phdlist.csv')) # 读取收件人 邮箱和姓名信息
for rows in f:
mailto_list.append(rows[1])
rec_name.append(rows[0])
rec_univ.append(rows[5])
rec_major.append(rows[6])
#
mailto_list.pop(0) # 删除行号
rec_name.pop(0)
rec_univ.pop(0)
rec_major.pop(0)
#找到收件人,然后检索是否发送过邮件
#sql_select = "select * from email_jour_auth3"
#cur.execute(sql_select)
#info = cur.fetchall()
#=======================
suc = 0
fails = 0
for i in range(len(rec_name)):
content = mailcontent.split('XXX')[0] + rec_name[i].split(' ')[-1] + mailcontent.split('XXX')[1]
# content = 'Dear Dr. ' + rec_name[i].split(' ')[-1]+'\n' + mailcontent #发送1封,上面的列表是几个人,这个就填几
# Check if the author has been in touched (No. 2)
receiver = rec_name[i].split(' ')[-1] + ',' + rec_name[i].split(' ')[:-1][0]
# now_name = info[i][1].split('\'')[1]
# sql_select = "select * from rec_email2"
# cur.execute(sql_select)
# rec_info = cur.fetchall()
sql_find = 'select * from rec_email_univ where rec_email_univ.name=\"%s\"'%receiver
# sql_find = 'select * from rec_email3, rec_email_univ where rec_email3.author= %s or rec_email_univ.name= %s'
# cur.execute(sql_find, (receiver, receiver))
cur.execute(sql_find)
cnt = cur.fetchone()
time.sleep(10)#睡眠2秒
if(cnt == None): #若未发送过邮件
#将该作者信息添加到已发送表格中
if send_mail(mailto_list[i],mailsub,content):
print("Mail sent to "+mailto_list[i]+' successfully!')
suc = suc + 1
# update the table No. 3
sql_add = "insert into rec_email_univ(name,email,university,attempt)values("
sql_add+="\"%s\","%receiver # author name
sql_add+="\"%s\","%mailto_list[i] # email address
# sql_add+="\"1\"," # Num of attempts
sql_add+="\"%s\","%rec_univ[i] #university
# sql_add+="\"%s\","%rec_major[i] # major
# sql_add+="\"%s\","%info[i][6].split('\'')[1] # country
# sql_add+="\"%s\","%info[i][7].split('\'')[1] # journal
# sql_add+="\"%s\","%info[i][8].split('\'')[1] # citation
# sql_add+="\"%s\","%info[i][9].split('\'')[1] # volume
# sql_add+="\"%s\","%info[i][10].split('\'')[1] # year
# sql_add+="\"%s\","%info[i][11].split('\'')[1] # title
sql_add+="\"%s\")"%time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())) # last attempt date
cur.execute(sql_add)
conn.commit()
else:
print("failed to be received by "+mailto_list[i]+'!')
fails = fails + 1
flog.writelines('failed to be received by '+mailto_list[i]+'!' + '\n')
# #==============================================================================
flog.writelines('In total, there is '+ str(suc) + ' messages has been sent successfully while ' +str(fails)+ ' messages can not be sent. \n')
flog.writelines('End at: '+ time.strftime('%H:%M:%S',time.localtime(time.time())) + '\n')
#send_mail('ibs@zjgsu.edu.cn','Mail Log', flog.read())
flog.close()
#==============================================================================
| [
"ni.he@qq.com"
] | ni.he@qq.com |
50b6850399802b4c26d8204b660c997e56c67b3b | b4e4cd7eae81f27b006fc28f79631db3e894572f | /Budgetsystem/Budgetsystem/urls (2021_07_03 17_35_37 UTC).py | fa7ccfa6a1aed7eb645c59bc59ef56cb1e81632c | [] | no_license | mariachacko93/recipe-budget-bill | effe369c7a873b7e59e4e22cacb7e247fb44bfa7 | 6ad231febe9f3c837536067a9ddd096a2ae6a2bf | refs/heads/master | 2023-06-18T06:41:02.536377 | 2021-07-14T05:54:39 | 2021-07-14T05:54:39 | 385,829,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 828 | py | """Budgetsystem URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path("budget/",include("budget.urls")),
]
| [
"you@example.com"
] | you@example.com |
bc778745a1b3a5fff9ef3615ce55a5d22f8d8c91 | d6a617eae82fa5a663ce2f514438a1e654880dde | /EPS-main/Model/migrations/0021_qqqqq.py | 377f072562435ecbe6704240017a617119e165f7 | [] | no_license | AbdoAbosamra/EPS-main | 25f9077ffdc2c0b98587bd8058999ed1a03c36e6 | eebb14215cd796886a0c552f6ae6f8058d43a16f | refs/heads/master | 2023-06-22T14:52:02.038821 | 2021-07-09T18:35:00 | 2021-07-09T18:35:00 | 369,046,913 | 0 | 0 | null | 2021-07-09T03:55:52 | 2021-05-20T01:37:18 | CSS | UTF-8 | Python | false | false | 765 | py | # Generated by Django 3.2 on 2021-04-19 21:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Model', '0020_delete_qqqqq'),
]
operations = [
migrations.CreateModel(
name='QQQQQ',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('Bady', models.CharField(max_length=1000, null=True)),
('Correct_Answer', models.CharField(max_length=20, null=True)),
('User_Answer', models.CharField(max_length=20, null=True)),
('Status', models.CharField(choices=[('Indoor', 'Indoor'), ('Out Door', 'Out Door')], max_length=200, null=True)),
],
),
]
| [
"67236632+AbdoAbosamra@users.noreply.github.com"
] | 67236632+AbdoAbosamra@users.noreply.github.com |
8028c10ee0079c4df8f7287c79b49d862b226f32 | 9e9a6bf6ac1912c03b20afb3a8712194b439a92f | /ex06.py | 720f73d71e971f077958a61f66a76ac52ca9624a | [] | no_license | seanflannery10/learn_python3_the_hard_way | 11a709872f8bedc3383541951269f250c680d893 | d64830882fd29250fdb9c7dfdf93f4d11578ce53 | refs/heads/master | 2020-05-16T05:53:53.670365 | 2019-05-08T22:31:23 | 2019-05-08T22:31:23 | 182,828,987 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | types_of_people = 10
x = f"There are {types_of_people} type of people."
binary = "binary"
do_not = "don't"
y = f"Those who know {binary} and those who {do_not}."
print(x)
print(y)
print(f"I said: {x}")
print(f"I also said: '{y}'")
hilarious = False
joke_evaluation = "Isn't that joke so funny! {}"
print(joke_evaluation.format(hilarious))
w = "This is the left side of..."
e = "a string with a right side."
print (w + e)
| [
"seanflannery10@gmail.com"
] | seanflannery10@gmail.com |
1a2a244f5a7ffd2c4a3c4534e593dc75e9823e55 | 49b827bb587d50c5092837749a7d5b88c024e854 | /experiments/ACOSlite/HDF5_to_GeoJSON.py | 722867db0ee2e86786f2b64806e22f0365deda70 | [] | no_license | SpaceAppsXploration/oco-2-data-network | 7d836bf77cf79a5aac1cd22b02c75af316432b56 | 7d1fd709c7c219c83b7ea9f8075f7df46b460f23 | refs/heads/master | 2020-12-11T05:43:45.979066 | 2015-07-18T08:56:29 | 2015-07-18T08:56:29 | 34,137,221 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,793 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 15:23:47 2015
@author: jacopo
"""
import json
from pprint import pprint
import h5py
#
# TO DOs
#
# 1. Add the reference to Sensors ontology
# ACOS LITE file in the same directory
f = h5py.File('ACOSv3.4r02_L3_20100101_000000_20130515_000000.h5', libver='earliest')
xco2 = f['xco2']
lon = f['lon']
lat = f['lat']
lon_bnds = f['lon_bnds']
lat_bnds = f['lat_bnds']
xco2_set = xco2[0,0,0,:]
geo = {"type" : "FeatureCollection",
"features" : [
{
"type" : "Feature",
"geometry" : {"type": "Point",
"coordinates" : [lat[0], lon[0]]
}
},
{
"type" : "Feature",
"geometry" : {
"type" : "polygon",
"coordinates" : [
[
lon_bnds[0,0],
lat_bnds[0,0]
],
[
lon_bnds[0,0],
lat_bnds[0,1]
],
[
lon_bnds[0,1],
lat_bnds[0,0]
],
[
lon_bnds[0,1],
lat_bnds[0,1]
]
]
},
"properties": {
"xco2" : xco2_set[12]
}
}
]
}
#with open('geo.json', 'w') as outfile:
#json.dump(geo, outfile)
# print a JSON with the quantity of xco2 for the given geometry
print(json.dumps(geo, indent=4))
| [
"tunedconsulting@gmail.com"
] | tunedconsulting@gmail.com |
de6f06a997f2d5589ece90ce63789b0322f7e068 | d290201b2897d0e2c65334c385b15b30281856d6 | /main/migrations/0004_auto_20191031_1607.py | 524d53104aef9ca9f3576e6f3c8c868e11f75424 | [] | no_license | atosorigin/fixture-scheduler | d7c52f9d632833dacec4148a21481aff3d3c507b | 9fad692d17a4e8811a5e7798b425b6877ffaf812 | refs/heads/dev | 2023-04-27T16:54:21.312664 | 2019-12-11T16:01:32 | 2019-12-11T16:01:32 | 227,348,826 | 0 | 0 | null | 2023-04-21T20:42:16 | 2019-12-11T11:22:55 | Python | UTF-8 | Python | false | false | 492 | py | # Generated by Django 2.2.6 on 2019-10-31 16:07
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0003_auto_20191031_1601'),
]
operations = [
migrations.AlterField(
model_name='tutorial',
name='tutorial_published',
field=models.DateTimeField(default=datetime.datetime(2019, 10, 31, 16, 7, 22, 44433), verbose_name='date published'),
),
]
| [
"terryn.booth@atos.net"
] | terryn.booth@atos.net |
696019e0846f9b0756fa1d3d7d8afce4fe155348 | 918d9d94935cdf91f15bd6b59afcac46d605d0c5 | /codecs/transformer-xl/pytorch/utils/adaptive_softmax.py | e5da0869e98c4c67370e4b2d835dbd7807d66a38 | [
"Apache-2.0"
] | permissive | TrellixVulnTeam/nlp_ME8J | 34b4154c6b0080b3458568f51c5fc8be9c42eaf5 | 339b41e25c925026f94ec4b284c035e81db62ba2 | refs/heads/master | 2023-03-16T12:33:54.535282 | 2020-08-22T15:02:15 | 2020-08-22T15:02:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,402 | py | from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class AdaptiveLogSoftmax(nn.Module):
def __init__(self, in_features, n_classes, cutoffs, keep_order=False):
super(AdaptiveLogSoftmax, self).__init__()
cutoffs = list(cutoffs)
if (cutoffs != sorted(cutoffs)) \
or (min(cutoffs) <= 0) \
or (max(cutoffs) >= (n_classes - 1)) \
or (len(set(cutoffs)) != len(cutoffs)) \
or any([int(c) != c for c in cutoffs]):
raise ValueError("cutoffs should be a sequence of unique, positive "
"integers sorted in an increasing order, where "
"each value is between 1 and n_classes-1")
self.in_features = in_features
self.n_classes = n_classes
self.cutoffs = cutoffs + [n_classes]
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.in_features))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
self.keep_order = keep_order
def forward(self, hidden, target, weight, bias, keep_order=False):
if hidden.size(0) != target.size(0):
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
head_weight = torch.cat(
[weight[:self.shortlist_size], self.cluster_weight], dim=0)
head_bias = torch.cat(
[bias[:self.shortlist_size], self.cluster_bias], dim=0)
head_logit = F.linear(hidden, head_weight, bias=head_bias)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target,
dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, h_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < h_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
weight_i = weight[l_idx:h_idx]
bias_i = bias[l_idx:h_idx]
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = F.linear(hidden_i, weight_i, bias=bias_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i] \
+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
if (hasattr(self, 'keep_order') and self.keep_order) or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset:offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return nll
| [
"hjm15718800930@163.com"
] | hjm15718800930@163.com |
1481d8d1055944438faae5311e149a22bb41fc6a | 68fc65f2d27495ef251629c351018dfb9c67d2c5 | /janome/version.py | a60ed74569ae6bc1324171de1889dccfd7c2b2e8 | [
"NAIST-2003",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mocobeta/janome | 5f1b7ec2046d3916069ebeefce35ce98102c4205 | 9d82248e0c0815e367b9604d83ef0de198e017bc | refs/heads/master | 2023-07-12T22:48:47.528908 | 2023-07-01T11:31:23 | 2023-07-01T11:31:23 | 30,792,770 | 837 | 69 | Apache-2.0 | 2023-07-01T11:25:28 | 2015-02-14T09:47:00 | Python | UTF-8 | Python | false | false | 29 | py | JANOME_VERSION = '0.5.1-dev'
| [
"tomoko.uchida.1111@gmail.com"
] | tomoko.uchida.1111@gmail.com |
98d14c528f3d8a2f41a89520b83d1b12c47f106c | 4fb0bbd08babd2a4dd139dedcb31a0b2a1ddb79d | /setup.py | 126e921c18f22156681dc3e0259e061492553ef5 | [] | no_license | Abhijeet-Patil-GH/A-Simple-Files-Organizer | cec772de95525883e82f3b819c06d0fb4ecbe5f1 | 93a9a8be004b7c5876391fc72e60822d8d058c7d | refs/heads/main | 2023-02-28T12:29:31.894869 | 2021-02-04T09:51:53 | 2021-02-04T09:51:53 | 333,042,580 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | import cx_Freeze
import sys
import os
base = None
if sys.platform == 'win64':
base = "Win64GUI"
os.environ['TCL_LIBRARY'] = r"C:\Users\Abhijeet\AppData\Local\Programs\Python\Python38\tcl\tcl8.6"
os.environ['TK_LIBRARY'] = r"C:\Users\Abhijeet\AppData\Local\Programs\Python\Python38\tcl\tk8.6"
executables = [cx_Freeze.Executable("simple_files_organizer.py", base=base, icon="icon.ico")]
cx_Freeze.setup(
name = "Simple Files Organizer",
options = {"build_exe": {"packages":["tkinter","os"], "include_files":["icon.ico",'tcl86t.dll','tk86t.dll']}},
version = "0.01",
description = "Tkinter Application",
executables = executables
)
| [
"shadyrick20@gmail.com"
] | shadyrick20@gmail.com |
b273893e978e13abeafced16eeeb0af79e1528b5 | 4eee9a7c01d0ed7499dfc69336155569b0768738 | /opencv-test.py | e2c1c3d6d57a2116ee544eed742b82a97b79c306 | [] | no_license | mfkiwl/stereo-camera-security | 54f7bc6e343c70ae4e51b8655b8814b79f7d97e2 | bb5d6a178b3bfc0998a5b4e916eb22ab309e44f7 | refs/heads/master | 2023-01-05T03:56:07.909301 | 2020-11-05T04:35:32 | 2020-11-05T04:35:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py | import argparse
import datetime
import imutils
import time
import cv2
import numpy as np
vLeft = cv2.VideoCapture("./left.mpeg")
vRight = cv2.VideoCapture("./right.mpeg")
def processFrame():
frameLeft = vLeft.read()[1]
frameRight = vRight.read()[1]
if frameLeft is None or frameRight is None:
return False
# resize the frame, convert it to grayscale, and blur it
#frameLeft = imutils.resize(frameLeft, width=500)
#gray = cv2.cvtColor(frameLeft, cv2.COLOR_BGR2GRAY)
#gray = cv2.GaussianBlur(gray, (21, 21), 0)
frameLeft = cv2.cvtColor(frameLeft, cv2.COLOR_BGR2GRAY)
frameRight = cv2.cvtColor(frameRight, cv2.COLOR_BGR2GRAY)
sift = cv2.SIFT()
kp1, des1 = sift.detectAndCompute(frameLeft, None)
kp2, des2 = sift.detectAndCompute(frameRight, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k = 2)
good = []
pts1 = []
pts2 = []
cv2.imshow("Left", frameLeft)
cv2.imshow("Right", frameRight)
cv2.imshow("Disparity", disp)
return True
#while True:
# if not processFrame():
# break
# key = cv2.waitKey(16) & 0xFF
# if key == ord("q"):
# break
processFrame()
cv2.waitKey(0)
| [
"whupdup@github.com"
] | whupdup@github.com |
8e08de6864ffcc30fe91ddd5cbed2453a66142e1 | e2e482acc7bc7c6539aa0f8f00168fab0bd9110f | /timer.py | edbf8b113dd7086059ff7c637b96e9ade22d3b8d | [] | no_license | mtizhoush/pacman_portals | 884a9b0b87862a553dd33ce13ecfbf095dbb688a | 52bdb127ce8de865cd7ba2dc8b5d9f852b5e9d97 | refs/heads/master | 2020-04-03T13:23:04.722847 | 2018-10-29T21:25:47 | 2018-10-29T21:25:47 | 155,282,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | import pygame
class Timer:
def __init__(self, frames1, frames2, wai1t=100, wait2=100, wait_switch_timers=1000,
frameindex1=0, frameindex2= 0, step1 = 1, step2 = 1, looponce=False): # imagerect frames
self.frames = frames
self.wait = wait
self.frameindex = frameindex
self.looponce = looponce
self.finished = False
self.lastframe = len(frames) - 1
self.last = None
def frame_index(self):
now = pygame.time.get_ticks()
if self.last is None:
self.last = now
self.frameindex = 0
return 0
elif not self.finished and now - self.last > self.wait:
self.frameindex += 1
if self.looponce and self.frameindex == self.lastframe:
self.finished = True
else:
self.frameindex %= len(self.frames)
self.last = now
return self.frameindex
def reset(self):
self.last = None
self.finished = False
def __str__(self): return 'Timer(frames=' + self.frames +\
', wait=' + str(self.wait) + ', index=' + str(self.frameindex) + ')'
def imagerect(self):
return self.frames[self.frame_index()]
| [
"noreply@github.com"
] | mtizhoush.noreply@github.com |
56a1ae931a0725e26b1a73860c6543e66b71ca66 | 6e5c625136c36d845c72f7a4fdea482f05384590 | /flaskr/resources/Arena.py | 80f75710073b63af3ddb2c10ff8ceaf1b89b255c | [] | no_license | DvdCp/BattleWebApp | 26fe9e714c090add337d951a1672ef7747a1db33 | f8eeeccdb0e73bd4bcc9529adfe74436d8cf5c13 | refs/heads/master | 2023-07-08T09:53:10.640382 | 2021-08-08T18:09:13 | 2021-08-08T18:09:13 | 379,716,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,829 | py | from os import stat
from flaskr.challangers.Challenger import Challenger
from flaskr.challangers.Hero import Hero
from flaskr.challangers.Monster import Monster
from flaskr.challangers.lobbyparser.LobbyParser import LobbyParser
from flask import request, render_template, make_response
from flask_restful import Resource
from random import randint
from flaskr import app
from flaskr import battleLogger
import flaskr.utils.DatabaseHelper as DBHelper
class Arena(Resource):
print("-----> INIZIALIZZAZIONE DELL'ARENA")
def get(self):
return make_response(render_template("index.html").encode())
# ----- REDIRECT METHODS ----- #
@staticmethod
@app.route("/Arena/fight", methods=["POST"])
def fight():
# qua effettua la battaglia e restituisce i risultati
lobbyRecieved = request.data
lobby = LobbyParser.parseJSONtoLobby(lobbyRecieved)
return Arena.startBattle(lobby)
@staticmethod
@app.route("/Arena/results")
def results():
# Recupero delle battaglie da DB
results = DBHelper.Battle.query.all()
return make_response(render_template('results.html', results=results)), 200
@staticmethod
@app.route("/Arena/about")
def goToAbout():
return make_response(render_template('about.html')), 200
@staticmethod
@app.route("/Arena/instruction")
def goToInstruction():
return make_response(render_template('instruction.html')), 200
@staticmethod
@app.route("/Arena/createLobby")
def create_lobby():
return make_response(render_template('createlobby.html'))
@staticmethod
@app.route("/Arena/createChallanger")
def create_challanger():
return make_response(render_template('createchallanger.html'))
# ----- BUSINESS METHODS ----- #
@classmethod
def startBattle(cls, challangerLobby: tuple[list[Hero], list[Monster]]):
# Questo metodo fa partire una battaglia tra eroi e mostri.
# Reset del BattleLog
battleLogger.clearBattleLog()
# Ottenimento dei membri della tupla
_heroes, _monsters = challangerLobby
while True:
# TURNO DEGLI EROI: finchè c'è almeno un eroe e mostro vivi...
if cls.checkIfSquadIsAlive(_heroes) and cls.checkIfSquadIsAlive(_monsters):
# Seleziona un eroe a caso che attaccherà un mostro a caso
randomHero = cls.getRandomHero(_heroes)
randomMonster = cls.getRandomMonster(_monsters)
randomHero.attack(randomMonster)
# TURNO DEI MOSTRI: finchè c'è almeno un eroe e mostro vivi...
if cls.checkIfSquadIsAlive(_heroes) and cls.checkIfSquadIsAlive(_monsters):
# Seleziona un mostro a caso che attaccherà un eroe a caso
randomHero = cls.getRandomHero(_heroes)
randomMonster = cls.getRandomMonster(_monsters)
randomMonster.attack(randomHero)
if not cls.checkIfSquadIsAlive(_heroes):
# Se a fine turno non ci sono eroi vivi, hanno vinto i mostri
outcome = "La battaglia è conclusa.\n------------------------ HANNO VINTO I MOSTRI !!! ------------------------"
battleLogger.recordEvent(outcome)
DBHelper.Battle.insertBattle(battleLogger.getBattleLog())
return battleLogger.getBattleLog()
elif not cls.checkIfSquadIsAlive(_monsters):
# Se a fine turno non ci sono mostri vivi, hanno vinto gli eroi
outcome = "La battaglia è conclusa.\n------------------------ HANNO VINTO GLI EROI !!! ------------------------"
battleLogger.recordEvent(outcome)
DBHelper.Battle.insertBattle(battleLogger.getBattleLog())
return battleLogger.getBattleLog()
def getRandomHero(heroes: list[Hero]) -> Hero:
# Questo metodo serve per recuperare un eroe vivo casuale dalla lista degli eroi
while True:
randomHeroIndex = randint(0, len(heroes) - 1)
aHero = heroes[randomHeroIndex]
if aHero.isAlive():
return aHero
def getRandomMonster(monsters: list[Monster]) -> Monster:
# Questo metodo serve per recuperare un mostro vivo casuale dalla lista dei mostri
while True:
randomMonsterIndex = randint(0, len(monsters) - 1)
aMonster = monsters[randomMonsterIndex]
if aMonster.isAlive():
return aMonster
def checkIfSquadIsAlive(squad: list[Challenger]) -> bool:
# Questo metodo serve per verificare se una data squadra ha ancora almeno un membro vivo
for _challanger in squad:
if _challanger.isAlive():
return True
return False
| [
"davidecap00@hotmail.it"
] | davidecap00@hotmail.it |
c47c138fbfc71660b3e48e1c7bd7b7396dd4fea6 | b8fa5d2da0145e849b42c39fc8c81697c3a9e187 | /for.py | 27e5eef7649f234273f03c15388121bd3946efba | [] | no_license | ryanblahnik/Automate-the-Boring-Stuff | 68c67f09ea59252e41ddd0167de0cb33134b1ed2 | 466ecf5f5c128c58cf95a87e15837229e35f9aa9 | refs/heads/master | 2021-05-31T17:40:14.019847 | 2016-06-04T15:23:45 | 2016-06-04T15:23:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | print('My name is')
for i in range (20, -10, -2):
print('Jimmy Five Times ' + str(i))
| [
"ryanblahnik@gmail.com"
] | ryanblahnik@gmail.com |
ea6605ef676901cdaa2b7ecd171b34dbb404925c | bad0c65e6d2b70e54e7493203313889c51b131bd | /lesson_45/Booking.py | a1dd459bbe307d3d997b74b44feae4dfc40a43e4 | [] | no_license | kwiatkowski1981/Inheritance_in_python | 32e66c56aaf96558c8180e7d30409e0493462694 | 3b2912659e6ca26c24e2eaba69c6391622c92eb9 | refs/heads/master | 2023-05-24T18:29:02.437260 | 2021-06-09T20:18:14 | 2021-06-09T20:18:14 | 371,510,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | from datetime import datetime
class Booking:
def __init__(self, start_date: datetime, end_date: datetime):
self.start_date = start_date
self.end_date = end_date
def get_difference(self):
difference = self.end_date - self.start_date
return difference.days + 1 # liczy nie wliczajac ost dnia cos jak range(1, 7)
| [
"jakub.kwiatkowski1981@gmail.com"
] | jakub.kwiatkowski1981@gmail.com |
6b44d42ee68fd6a11886d0d5ed8184f10026f9ce | 56d84916b48a70bf4b23bc012ef1f48a8cb35834 | /architectures/DANet/utils/constant.py | a7bc8c942c85c77cf58ccf38c82a924ca205cc4e | [] | no_license | medical-projects/Deep-Learning-on-medical-Datasets | 3a100ba690fc2753f61ad72996fc216bf7f26bf9 | e5983785eacfb1ba022c81b5d375d0709dafb256 | refs/heads/master | 2023-02-11T14:52:12.507711 | 2021-01-06T03:49:43 | 2021-01-06T03:49:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | MASK_BG = 0
MASK_LIVER = 63 / 255
MASK_KR = 126 / 255
MASK_KL = 189 / 255
MASK_SPLEEN = 252 / 255
IMG_WIDTH = 256
IMG_HEIGTH = 256
CLASS_INCREMENT = 63
NUMBER_CLASS = 4
| [
"jadasmar97@gmail.com"
] | jadasmar97@gmail.com |
5d22b4797ca4a4d3f1e60a8d26ac60137495612b | 2557d02e93a6b47e462d72ae31b3bd51af4f5759 | /rssr/settings.py | 9cd566b8d9842749f0294b74f29b580bb7e1811c | [] | no_license | davilima6/rssr | ffef9eed412f0610c7a33a582aab317192003a27 | cfa4693e549cdb67c9fae57393c7670f33651f99 | refs/heads/master | 2021-01-01T19:11:23.635962 | 2014-09-24T13:44:38 | 2014-09-26T14:00:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,910 | py | """
Django settings for rssr project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*m)r91#9qh4kfjrwm@4d_r3hnb9b$2e=6u44ntsfsnb-i-*m&+'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
ADMINS = (
('Davi Lima', 'davilima6@gmail.com'),
)
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd-party
'bootstrapform',
# ours
'feedlyr',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'rssr.urls'
WSGI_APPLICATION = 'rssr.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'rssr.db'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# STATICFILES_FINDERS = (
# 'django.contrib.staticfiles.finders.FileSystemFinder',
# 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# # 'django.contrib.staticfiles.finders.DefaultStorageFinder',
# 'dajaxice.finders.DajaxiceFinder',
# )
# List of callables that know how to import templates from various sources.
# TEMPLATE_LOADERS = (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
# )
# TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
# AUTHENTICATION_BACKENDS = (
# # Needed to login by username in Django admin, regardless of `allauth`
# "django.contrib.auth.backends.ModelBackend",
# # `allauth` specific authentication methods, such as login by e-mail
# "allauth.account.auth_backends.AuthenticationBackend",
# )
| [
"davilima6@gmail.com"
] | davilima6@gmail.com |
ff20f97e522dad036e7df019b8c4e0a5caae626a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_unguents.py | 87d4634aa61496578132ed4c4606ab4ff28ddf79 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py |
from xai.brain.wordbase.nouns._unguent import _UNGUENT
#calss header
class _UNGUENTS(_UNGUENT, ):
def __init__(self,):
_UNGUENT.__init__(self)
self.name = "UNGUENTS"
self.specie = 'nouns'
self.basic = "unguent"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
903d89061c0763490c5d32fede65e3efd0580966 | 2b1ecde355731929993e5da98c6157e3d20d84ff | /src/count_vectorizer_using_scikit.py | 39623a2a8d67d4fcbd34c6268af24d796b0b5626 | [] | no_license | Parvez-Khan-1/text-vectorization-techniques | e13c892025c3f5686ec61b2b433ea4f7e91af8d0 | bf953cb5136c4efbcd2e1ba31e15fe8211f85e8d | refs/heads/master | 2020-04-16T10:41:50.016401 | 2019-01-14T12:54:48 | 2019-01-14T12:54:48 | 165,513,696 | 1 | 0 | null | 2019-01-14T12:54:49 | 2019-01-13T14:17:06 | Python | UTF-8 | Python | false | false | 395 | py | from sklearn.feature_extraction.text import CountVectorizer
text = ['An apple a day keeps doctor away.',
'Parvez likes to eat apples.',
'Natural Language Processing is Fun.']
vectorizer = CountVectorizer(ngram_range=(1, 2))
vectorizer.fit(text)
print(vectorizer.vocabulary_)
test_example = ['I like apples']
vector = vectorizer.transform(test_example)
print(vector.toarray()) | [
"ppathan@digitalpharmacist.com"
] | ppathan@digitalpharmacist.com |
4196a9a7104fb3f05d55626be64abebd451b20f1 | 9d3280739c5fa3c58eb927f40ab5f82173f2831f | /src/pybind/fstext/fstext_pybind_test.py | 66b70fc20382b5666df316d5ef9a6c251fa55da8 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | aadps/kaldi | 00c2db99331cdf80fe376cb1ea8bce70d3ea05d5 | cd351bb31c98f9d540c409478cbf2c5fef1853ca | refs/heads/aadps | 2020-12-09T15:56:00.679658 | 2020-04-13T10:58:03 | 2020-04-13T10:58:03 | 233,353,064 | 0 | 0 | NOASSERTION | 2020-01-28T02:28:52 | 2020-01-12T07:11:30 | Shell | UTF-8 | Python | false | false | 1,693 | py | #!/usr/bin/env python3
# Copyright 2020 Mobvoi AI Lab, Beijing, China (author: Fangjun Kuang)
# Apache 2.0
import math # for math.isnan
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
import unittest
from kaldi import fst
class TestLatticeWeight(unittest.TestCase):
def test_lattice_weight(self):
w = fst.LatticeWeight()
self.assertEqual(w.Value1(), 0) # lm cost
self.assertEqual(w.Value2(), 0) # acoustic cost
w.SetValue1(1)
w.SetValue2(2)
self.assertEqual(w.Value1(), 1)
self.assertEqual(w.Value2(), 2)
w = fst.LatticeWeight(10, 20)
self.assertEqual(w.Value1(), 10)
self.assertEqual(w.Value2(), 20)
w = fst.LatticeWeight.One()
self.assertEqual(w.Value1(), 0)
self.assertEqual(w.Value2(), 0)
w = fst.LatticeWeight.Zero()
self.assertEqual(w.Value1(), float('inf'))
self.assertEqual(w.Value2(), float('inf'))
self.assertEqual(w.Type(), 'lattice4')
w = fst.LatticeWeight.NoWeight()
self.assertTrue(math.isnan(w.Value1()))
self.assertTrue(math.isnan(w.Value2()))
def test_compact_lattice_weight(self):
lat_w = fst.LatticeWeight(10, 20)
s = [1, 2, 3, 4, 5]
w = fst.CompactLatticeWeight(lat_w, s)
self.assertEqual(w.Weight(), lat_w)
self.assertEqual(w.String(), s)
self.assertEqual(str(w), '10,20,1_2_3_4_5')
# compactlattice44: the first 4 is for sizeof(float)
# and the second is for sizeof(int)
self.assertEqual(w.Type(), 'compactlattice44')
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | aadps.noreply@github.com |
639ac060faa73c4d26eef615c19c9364d3db8ecb | 2e5b5f51d5f7c42752dd4304dd264b84fb41a91a | /vegetativo/views.py | 4be5b74410f2a5c8e8f0c817f09908499d3cfe55 | [] | no_license | 22022013/13072015 | d05594321dfd9613f318f55d2fc33b3ae32d6f52 | 02d3cacc98ac8dae359eaed2dada9c5e52bb88b0 | refs/heads/master | 2020-04-05T23:05:33.670255 | 2015-09-11T00:24:36 | 2015-09-11T00:24:36 | 39,043,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,947 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.contrib.auth.models import Group
from django.core.urlresolvers import reverse as r
from django.contrib.auth.decorators import login_required
from vegetativo.models import OrdemServico
from core.views import group_required,verifica_membro
from vegetativo.forms import OrdemServicoForm
def ordens_servico(request):
'''
@ordens_servico: Metodo de listagem das os cadastradas no sistema
'''
ordens_servico = OrdemServico.objects.all()
return render(request, 'ordens_servico.html',{'ordens_servico': ordens_servico})
def os_nova(request):
'''
@os_nova: Metodo de criação de uma ordem de servico
'''
if request.method == 'POST':
form = OrdemServicoForm(request.POST)
if form.is_valid():
os = form.save(commit=False)
os.save()
return HttpResponseRedirect( r('vegetativo:ordens_servico'))
else:
return render(request,'os_cad.html',{'form': form,'status':"Nova"})
else:
return render(request,'os_cad.html',{'form': OrdemServicoForm(),'status':"Nova"})
def os_editar(request,os_id):
'''
@os_editar: Metodo de edição de uma os cadastrada na base
'''
os = OrdemServico.objects.get(id=os_id)
if request.method == 'POST':
form = OrdemServicoForm(request.POST,instance=os)
if form.is_valid():
os = form.save(commit=False)
os.save()
return HttpResponseRedirect( r('vegetativo:ordens_servico'))
else :
return render(request, 'os_cad.html', { 'form':form ,'status':"Editar"})
else:
return render(request,'os_cad.html',{'form': OrdemServicoForm(instance=os),'status':"Editar"})
| [
"beh.mno@hotmail.com"
] | beh.mno@hotmail.com |
f4bb3586cb062de7a8efa4f03401542b346f3f2a | 66caa4ea68759ad897d739a0d0abade0b17388e2 | /tracker/site/views.py | 8427fd416cd771d612441d24fa475032b4cb61da | [
"Apache-2.0"
] | permissive | XOyarz/potato-app | 0e5ff6b1733bb050294529ba4df311b843b85173 | 845e69da5bf82e2ebacd0d5488f8a3ce7041751d | refs/heads/master | 2021-09-03T10:54:24.843198 | 2018-01-08T13:30:52 | 2018-01-08T13:30:52 | 114,784,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,928 | py | from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from django.views.generic import TemplateView, CreateView, UpdateView, ListView, DeleteView
from .forms import ProjectForm, TicketForm
from .models import Project, Ticket
class ProjectContextMixin(object):
project = None
def get_project(self):
if not self.project:
self.project = get_object_or_404(Project, pk=self.kwargs['project_id'])
return self.project
def get_context_data(self, **kwargs):
context = super(ProjectContextMixin, self).get_context_data(**kwargs)
context['current_project'] = self.get_project()
return context
class MyTicketsView(TemplateView):
template_name = "site/my_tickets.html"
def get_context_data(self):
if self.request.user.is_authenticated():
tickets = (
Ticket.objects
.filter(assignees=self.request.user.pk)
.order_by('-modified')
)
else:
tickets = []
return {
'tickets': tickets
}
my_tickets_view = MyTicketsView.as_view()
class ProjectListView(ListView):
model = Project
template_name = "site/project_list.html"
def get_context_data(self):
yolo = self.request.user.tickets.all()
projects = []
for i in yolo:
projects.append(i.project)
projects = set(projects)
other_projects = Project.objects.all()
other_projects = set(other_projects) - projects
print('other', other_projects, 'mine', projects)
return {'other_projects': other_projects, 'my_projects': projects}
project_list_view = ProjectListView.as_view()
class CreateProjectView(CreateView):
model = Project
form_class = ProjectForm
template_name = "site/project_form.html"
def get_success_url(self):
return reverse("project-list")
def get_form_kwargs(self):
kwargs = super(CreateProjectView, self).get_form_kwargs()
kwargs['user'] = self.request.user
kwargs['title'] = 'Create project'
return kwargs
create_project_view = login_required(CreateProjectView.as_view())
class UpdateProjectView(ProjectContextMixin, UpdateView):
model = Project
form_class = ProjectForm
pk_url_kwarg = 'project_id'
template_name = "site/project_form.html"
def get_success_url(self):
return reverse("project-list")
def get_form_kwargs(self):
kwargs = super(UpdateProjectView, self).get_form_kwargs()
kwargs['user'] = self.request.user
kwargs['title'] = "Edit {0}".format(self.object.title)
return kwargs
update_project_view = login_required(UpdateProjectView.as_view())
class ProjectView(ProjectContextMixin, TemplateView):
template_name = "site/project_detail.html"
def get_context_data(self, **kwargs):
context = super(ProjectView, self).get_context_data(**kwargs)
project = self.get_project()
context.update({
"project": project,
"tickets": project.tickets.all()
})
return context
project_view = ProjectView.as_view()
class CreateTicketView(ProjectContextMixin, CreateView):
model = Ticket
form_class = TicketForm
template_name = "site/ticket_form.html"
def get_success_url(self):
return reverse("project-detail", kwargs={"project_id": self.kwargs['project_id']})
def get_form_kwargs(self):
kwargs = super(CreateTicketView, self).get_form_kwargs()
kwargs['project'] = self.get_project()
kwargs['user'] = self.request.user
kwargs['title'] = 'Create ticket'
return kwargs
create_ticket_view = login_required(CreateTicketView.as_view())
class UpdateTicketView(ProjectContextMixin, UpdateView):
model = Ticket
form_class = TicketForm
pk_url_kwarg = 'ticket_id'
template_name = "site/ticket_form.html"
def get_success_url(self):
return reverse("project-detail", kwargs={"project_id": self.kwargs['project_id']})
def get_form_kwargs(self):
kwargs = super(UpdateTicketView, self).get_form_kwargs()
# Fix bug 2 & 3
x = Ticket.objects.get(pk=self.kwargs['ticket_id'])
kwargs['project'] = x.project
kwargs['user'] = self.request.user
kwargs['title'] = "Edit {0}".format(self.object.title)
return kwargs
update_ticket_view = login_required(UpdateTicketView.as_view())
# Added Delete View, together with new delete-ticket template and URL
class DeleteTicketView(ProjectContextMixin, DeleteView):
model = Ticket
pk_url_kwarg = 'ticket_id'
def get_success_url(self):
return reverse("project-detail", kwargs={"project_id": self.kwargs['project_id']})
delete_ticket_view = login_required(DeleteTicketView.as_view()) | [
"xavier982@hotmail.com"
] | xavier982@hotmail.com |
d0ceb01df76cc0d262d8c8bced2b54029ec1c256 | 9f5ea89c729a52a44cc363f62ceccd744771b061 | /user/migrations/0007_auto_20190520_2034.py | 1a24b0f5a52c22e9fe67a2a3c03f6f5c01746ed2 | [] | no_license | Leandrorferreira/DEMODAY | 4a13b676a524aba09257ceaa71bb51907283f378 | 14d8bf00796927abb9e2126505f0408bca50fcd8 | refs/heads/master | 2023-05-05T02:46:16.386684 | 2019-05-28T12:35:59 | 2019-05-28T12:35:59 | 188,088,187 | 3 | 0 | null | 2023-04-21T20:32:11 | 2019-05-22T17:54:17 | CSS | UTF-8 | Python | false | false | 520 | py | # Generated by Django 2.2.1 on 2019-05-20 23:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0006_auto_20190520_2022'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='user_birthday',
new_name='birthday',
),
migrations.RenameField(
model_name='profile',
old_name='user_cellphone',
new_name='cellphone',
),
]
| [
"leandrorferreira95@gmail.com"
] | leandrorferreira95@gmail.com |
58b2baef07663c5e82c8e96e9e9e199a40108943 | af685f9625dc3fc1892171df396ed46155caa092 | /WORC/resources/fastr_tools/worc/bin/FeatureConverter_tool.py | 84635983ccc0a62c9a1aa63c19be4a548ed16b53 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | MStarmans91/WORC | b66d7de70e2f3acab5100a3431855216b31bd7b1 | f267b3d05c8193939aa4f43e47c6e24f9307864e | refs/heads/master | 2023-08-17T14:02:29.566811 | 2023-08-15T08:58:42 | 2023-08-15T08:58:42 | 92,295,542 | 65 | 20 | NOASSERTION | 2023-08-15T08:58:44 | 2017-05-24T13:31:31 | Python | UTF-8 | Python | false | false | 2,404 | py | #!/usr/bin/env python
# Copyright 2017-2020 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from WORC.featureprocessing.FeatureConverter import FeatureConverter
def main():
parser = argparse.ArgumentParser(description='Radiomics classification')
parser.add_argument('-feat_in', '--feat_in', metavar='feat_in',
nargs='+', dest='feat_in', type=str, required=True,
help='Patient features input of first modality (HDF)')
parser.add_argument('-toolbox', '--toolbox', metavar='toolbox', nargs='+',
dest='toolbox', type=str, required=True,
help='Toolbox used for feature calculation')
parser.add_argument('-cf', '--conf', metavar='config', nargs='+',
dest='cf', type=str, required=True,
help='Configuration')
parser.add_argument('-feat_out', '--feat_out', metavar='feat_out',
nargs='+', dest='feat_out', type=str, required=True,
default=None,
help='Patient features input of second modality (HDF)')
args = parser.parse_args()
# Convert several input arguments from lists to strings
if type(args.feat_in) is list:
args.feat_in = ''.join(args.feat_in)
if type(args.toolbox) is list:
args.toolbox = ''.join(args.toolbox)
if type(args.cf) is list:
args.cf = ''.join(args.cf)
if type(args.feat_out) is list:
args.feat_out = ''.join(args.feat_out)
# Run converter
FeatureConverter(feat_in=args.feat_in,
toolbox=args.toolbox,
config=args.cf,
feat_out=args.feat_out)
if __name__ == '__main__':
main()
| [
"m.starmans@erasmusmc.nl"
] | m.starmans@erasmusmc.nl |
821517e5ea6fac918e2c0b7b96dc5c4dbd163249 | ac86b8b2ace578e71d125054d0cf4a36a8b1ffe1 | /main/dataCollection.py | 6586ebce6bae95d02ed08af8ec6ee1afa6263bfe | [
"MIT"
] | permissive | KubeKing/Butterknife-Matchmaking | e22d293b4d85ee531406579a38d43f7c7c5c56e3 | 1ebb261441f547d960e478a0283637babec49b90 | refs/heads/master | 2021-06-18T15:44:05.753021 | 2021-02-05T15:52:50 | 2021-02-05T15:52:50 | 168,908,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,149 | py | #Developed by Trey Walker for The Butterknife
from __future__ import print_function #Google
from apiclient import discovery #Google API
from oauth2client import client #Google API
from oauth2client import tools #Google API
from oauth2client.file import Storage #Google API
import httplib2 #For HTTP Usage
import requests #For HTTP Usage
import os #For Local File Usage
SCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'
CLIENT_SECRET_FILE = 'client_secret.json' #Goto Google's API Dev
APPLICATION_NAME = 'Butterknife Matchmaking Survey' #Name of Application
def get_credentials():
#Gets the credentials to run the Google API
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'sheets.googleapis.com-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
credentials = tools.run_flow(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def getSheet(range):
print("Collecting "+range+"... ", end="", flush=True)
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets', 'v4', http=http, discoveryServiceUrl=discoveryUrl)
spreadsheetId = '1WVXeSRS6Q8D52LIlMPGg2O-jDL91_lXGsWQ0YNVMo3U' #Google Sheet ID
rangeName = range #Range
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheetId, range=rangeName).execute()
values = result.get('values', [])
if not values:
print('FAILED!')
return(False)
else:
print('DONE!')
return(values)
if __name__ == '__main__':
print(getSheet('B2:AC'))
| [
"techking802@gmail.com"
] | techking802@gmail.com |
862ab8f5ecaa1f4ee3719e2a020c680e26816952 | ccf00a0a8cd1ccc724c2899fa572e5731453cef7 | /Assignment 1/cs7641-master/supervised/boosting/plot_adaboost_regression.py | 4a6e1e4ae866158220d25a7d65c4349ff64e574f | [] | no_license | kuanchao/MachineLearningProjects | 2641924ba75ab56720367fb2846951b007040daa | 40e7d8d31358e265afb015dbad800646e87a5d2d | refs/heads/master | 2021-01-01T06:06:50.945397 | 2017-07-16T03:29:26 | 2017-07-16T03:29:26 | 97,357,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,725 | py | print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import csv
import pdb
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
#Image Class
class Images:
def __init__(self):
self.data = []
self.target = []
self.feature_names = np.array([])
def loadData(self, filename):
with open(datafile) as csvfile:
file_reader = csv.reader(csvfile)
#Deal with headers
features = file_reader.next()
i = 0
for fields in features:
if i != 0:
self.feature_names = np.append(self.feature_names, fields)
i = i + 1
#Skip header line and seed data/targets
next(file_reader, None)
for row in file_reader:
self.data.append(row[1:])
self.target.append(row[0])
###############################################################################
# Load data
#boston = datasets.load_boston()
#X, y = shuffle(boston.data, boston.target, random_state=13)
#X = X.astype(np.float32)
#offset = int(X.shape[0] * 0.9)
#X_train, y_train = X[:offset], y[:offset]
#X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Load data
datafile = '../data/trainsmall.csv'
images = Images()
images.loadData(datafile)
X, y = shuffle(images.data, images.target, random_state=13)
X = X.astype(np.uint16)
y = y.astype(np.uint16)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
max_learners = np.arange(2, 400, 20)
correctPredictions = []
for i, l in enumerate(max_learners):
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
#params[i] = {'n_estimators': i, 'max_depth': 4, 'min_samples_split': 2, 'learning_rate': 0.1}
clf = ensemble.GradientBoostingClassifier(n_estimators=l, max_depth=4)
#clf = ensemble.AdaBoostClassifier(n_estimators=l, learning_rate=0.1)
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
mse = mean_squared_error(y_test, pred)
print("MSE: %.4f" % mse)
correct = 0
for j in range(0, len(y_test)):
if y_test[j] == round(pred[j]):
correct = correct + 1
correctPredictions.append((float(correct)/len(y_test)) * 100.0)
print("Prediction Correct Rate: " + str(((float(correct)/len(y_test)) * 100.0)))
###############################################################################
# Plot training deviance
plt.figure()
plt.title('Boosting: Performace vs Number of Learners')
plt.plot(max_learners, correctPredictions, lw=2, label = 'Prediction Correctness')
plt.legend()
plt.xlabel('Number of Learners')
plt.ylabel('Correct Prediction Percentage')
plt.show()
###############################################################################
# Plot feature importance
#feature_importance = clf.feature_importances_
# make importances relative to max importance
#feature_importance = 100.0 * (feature_importance / feature_importance.max())
#sorted_idx = np.argsort(feature_importance)
#pos = np.arange(sorted_idx.shape[0]) + .5
#plt.subplot(1, 2, 2)
#plt.barh(pos, feature_importance[sorted_idx], align='center')
#pdb.set_trace()
#plt.yticks(pos, images.feature_names[sorted_idx])
#plt.xlabel('Relative Importance')
#plt.title('Variable Importance')
plt.show() | [
"kuanchao@gmail.com"
] | kuanchao@gmail.com |
7bb0f2efce838cff29cd7b40abcf7651c2dee37c | 130d2040cc9f216b552d363fde6c913e301574bb | /node_modules/mongodb/node_modules/bson/build/config.gypi | c7b44e426dda164eab7267419f3388460bef6877 | [
"Apache-2.0"
] | permissive | kevin-prox/quoteit | 59a8e8c70c86723a5f0a0a690f3101218a4ec933 | 9d157d2d88bcecd7437cec710899e980cce4d5ba | refs/heads/master | 2020-04-06T04:58:35.763377 | 2014-09-15T20:22:02 | 2014-09-15T20:22:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,171 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 46,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "/usr",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/home/developer/.node-gyp/0.10.31",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"user_agent": "npm/1.4.23 node/v0.10.31 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"user": "",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"cache_max": "Infinity",
"userconfig": "/home/developer/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"https_proxy": "http://172.26.97.251:3128/",
"cafile": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr",
"registry": "https://registry.npmjs.org/",
"browser": "",
"cache_lock_wait": "10000",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/developer/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "2",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "0.10.31",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"strict_ssl": "",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"username": "",
"dev": "",
"globalconfig": "/usr/etc/npmrc",
"init_module": "/home/developer/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"email": "",
"json": "",
"spin": "true"
}
}
| [
"Kevin.Prox@wnco.com"
] | Kevin.Prox@wnco.com |
2c91f717896fc3f5eac18c5d46fd83a24d3fc38e | a8c5737cd18bef86bedfe4528c05aab785007505 | /backend/myconcretecasting/models.py | 78abc11478297ec15db419431c373f838d7b1117 | [] | no_license | CSTC-WTCB-BBRI/myConcreteCasting | 5ed5984c59bc81fac234409852dcea82c05a4d41 | 9e7414c76ba7abafefafbaba1313cf695d8bf616 | refs/heads/master | 2023-07-05T19:12:50.337158 | 2021-09-02T18:06:40 | 2021-09-02T18:06:40 | 368,918,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,563 | py | from django.contrib.auth.models import AbstractUser
from django.utils.translation import ugettext_lazy as _
from django.db import models
from djongo import models
from .managers import UserManager
import uuid
class User(AbstractUser):
username = None
email = models.EmailField(_("email address"), unique=True)
USERNAME_FIELD = "email"
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return self.email
class Casting(models.Model):
id = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=50)
description = models.TextField(blank=True)
isClassEI = models.BooleanField()
fcm2_fcm28_ratio = models.FloatField(null=True)
type2_addition = models.BooleanField()
rc2_rc28_ratio = models.FloatField(null=True)
cement_type = models.CharField(max_length=15)
strength_class = models.CharField(max_length=10)
target_strength = models.IntegerField(default=None, null=True)
casting_start = models.IntegerField(default=None, null=True)
curing_duration = models.IntegerField(default=None, null=True)
hardening_duration = models.IntegerField(default=None, null=True)
class Jobsite(models.Model):
owner = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
address = models.CharField(max_length=255)
coordinates = models.JSONField()
description = models.TextField(blank=True)
castings = models.ArrayField(
model_container=Casting
)
| [
"automeedwin@gmail.com"
] | automeedwin@gmail.com |
b165896c2271d3ae0926d185ad15cbd8e3d9f4cc | 9e852bd873f25dd836ad29ab16a1d90e068fdf0c | /mysite/blog/admin.py | c76937d5e1dcb8c43a416ea69340e34b3dbae65a | [] | no_license | todd-san/mysite | 8f0fbb883102e848c0325804dcce7092baef6dae | f19631caf9bebde1762118fbf06c833614ddc335 | refs/heads/master | 2021-09-20T06:18:33.032383 | 2018-08-05T16:55:08 | 2018-08-05T16:55:08 | 114,711,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | from django.contrib import admin
from .models import Post, AboutMe, Service, Contact, Project
# Register your models here.
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ['id', 'title']
@admin.register(AboutMe)
class AboutMeAdmin(admin.ModelAdmin):
list_display = ['id', 'title']
@admin.register(Service)
class ServiceAdmin(admin.ModelAdmin):
list_display = ['id', 'title']
@admin.register(Contact)
class ContactAdmin(admin.ModelAdmin):
list_display = ['id', 'address', 'email', 'phone_number']
@admin.register(Project)
class ProjectAdmin(admin.ModelAdmin):
list_display = ['id', 'name', 'date_modified']
| [
"mille2tm@gmail.com"
] | mille2tm@gmail.com |
90f4696555e174ef011d383417a37633f1b0867b | 54d2887e3c910f68366bd0aab3c692d54245e22a | /abc/abc_042_125/abc089/a.py | 0311aa625dd7cd2c28dad38a37460155d16513a9 | [] | no_license | Kevinrobot34/atcoder | 7aec367fd2c6b589e9d583dae7b3c7520ce9fa12 | 482ea508f098f81e4f19522fe518dd22c781aca9 | refs/heads/master | 2022-07-10T23:44:45.290022 | 2022-06-29T11:30:26 | 2022-06-29T11:30:26 | 158,081,477 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 32 | py | n = int(input())
print(n // 3)
| [
"kevinrobot34@yahoo.co.jp"
] | kevinrobot34@yahoo.co.jp |
f63cc5c3a8d57406402d3ee8c99b0c3f4c70154e | 5f98660f60923710c7873640c74cfa5ff3b26a86 | /portal/views.py | 06f165eb235f8621079bd6cf81b4e84a2ba6f225 | [] | no_license | yingle/land_war | 1b5745c26375e37f090418a8e7b0425db86ed629 | 439a756ad406c3aca99e1997e4c4e1958af1b0b6 | refs/heads/master | 2021-01-17T18:30:34.194762 | 2016-07-27T11:13:41 | 2016-07-27T11:13:41 | 64,011,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | from django.shortcuts import render
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required()
def personal_space(request):
return render_to_response('portal/personal_space.html') | [
"linyingle@gmail.com"
] | linyingle@gmail.com |
287b3db42bf56ee7ebd9037f090b0b4c6d7780a6 | e91b4ceb117f4bed0baa8985fb42bce72f0a6262 | /.ipynb_checkpoints/covid-checkpoint.py | d1553e49effd6ee79ebb935a2cd42a67308d9d08 | [] | no_license | ChiCodes2020/tkh_project | 5c0693ba0cb72a5146b9efbe7a306ed470d7f138 | cd498d162e5df5e22e9c98f283d2489a64e367af | refs/heads/main | 2023-02-15T17:30:42.025032 | 2021-01-12T20:08:00 | 2021-01-12T20:08:00 | 313,191,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,291 | py | import requests
from bs4 import BeautifulSoup
import csv
#we want to create a script in python
#we want to create a function that scrapes from the specified website
covid_data = [["Country", "Cases", "Deaths", "Recoveries", "Death_rate", "Recovery_rate" ]]
def scrape():
req = requests.get('https://en.wikipedia.org/wiki/Template:COVID-19_pandemic_data')
soup = BeautifulSoup(req.text)
for i in soup.select("tr")[2:]:
try:
country_name = list(i.select("th")[1].strings)[0]
# print(country_name)
country_cases = list(i.select("td")[0].strings)[0][:-1]
if country_cases == "No dat":
country_cases = None
country_deaths = list(i.select("td")[1].strings)[0][:-1]
if country_deaths == "No dat":
country_deaths = None
country_recoveries = list(i.select("td")[2].strings)[0][:-1]
if country_recoveries == "No dat":
country_recoveries = None
country_data = [country_name, country_cases, country_deaths, country_recoveries]
covid_data.append(country_data)
except:
break
return covid_data | [
"chiona@chionasmacbook2.home"
] | chiona@chionasmacbook2.home |
67d3f1296255d2887b0386a00853c9a2edc086cd | 313db12a459ee17d1fe354978ecc9fe9a0359b98 | /python/CreateClass.py | 44b6c2334a35c087f7c87df5a73541b3e420d7f5 | [] | no_license | siknight/hadooppratice | f6bfa0e4409b4b7ae81c82610de4030c78ef782d | e0878f732bded0c05b6beaef6e4167da42ac22c5 | refs/heads/master | 2023-02-11T08:53:50.046639 | 2021-01-14T03:45:55 | 2021-01-14T03:45:55 | 316,176,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | class Cat(object):
# 吃
def eat(self):
print('猫在吃鱼....')
self.age =18
# 喝东西
def drink(self):
print("猫在喝东西...")
# 创建一个对象,并用变量tom来保存它的引用
if __name__ == '__main__':
tom = Cat()
tom.eat()
print(tom.age)
tom.name="erhuo"
print(tom.name)
| [
"1786678583@qq.com"
] | 1786678583@qq.com |
678801de7d60da4e7d3edb4cb0592205b6e881d4 | 6ee89e8bdb4e3270d0fd89023c5992f671d99a95 | /hisense/util/Python2DataBase.py | 6bf3465b9a589e67ac1fa0aadec2e4ff12a95db3 | [] | no_license | maxiao227/passengerFlowForecast | cb711a2233bacfbbbe89f5b569ee7cf9d8b55ef1 | a7e58c33d026efca27f6290e39b4917abd566bae | refs/heads/master | 2020-03-24T00:33:09.648423 | 2018-07-25T06:37:13 | 2018-07-25T06:37:13 | 142,296,081 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,252 | py | # -*- coding: utf-8 -*-
import configparser
import uuid
import datetime
class Python2DataBase(object):
Config = configparser.ConfigParser()
Config.read('../model/dbconfig.conf')
dirver = Config.get('DATABASE', 'dirver')
url = Config.get('DATABASE', 'url')
user = Config.get('DATABASE', 'user')
password = Config.get('DATABASE', 'password')
jarFile = Config.get('DATABASE', 'jarFile')
sql = 'INSERT INTO REC_SENSEAREAFORCECASTDATA ( ID, PFDATATYPE, ACTTIME, RECTIME, PFDATA2 ) VALUES '
def set2DataBaseNextHour(self, result, model):
uuidValue = uuid.uuid1()
predictTime = (datetime.datetime.now() + datetime.timedelta(hours=1)).strftime("%Y/%m/%d %H:%M:%S")
executeSql = self.sql + '( \'' + uuidValue + '\', \'' + str(model) + '\', ' + predictTime + ', SYSDATE,\'' + str(
result) + '\')'
def set2DataBaseCurrentDay(self, result, model):
uuidValue = uuid.uuid1()
for i in range(22):
predictResult = result[i]
predictTime = (datetime.datetime.now() + datetime.timedelta(hours=(2 + i))).strftime("%Y/%m/%d %H:%M:%S")
executeSql = self.sql + '( \'' + uuidValue + '\', \'' + str(model) + '\', ' + predictTime + ', SYSDATE,\'' + str(
predictResult) + '\')'
def set2DataBaseCurrentWeek(self, result, model):
uuidValue = uuid.uuid1()
for i in range(144):
predictResult = result[24 + i]
predictTime = (datetime.datetime.now() + datetime.timedelta(hours=(25 + i))).strftime("%Y/%m/%d %H:%M:%S")
executeSql = self.sql + '( \'' + uuidValue + '\', \'' + str(model) + '\', ' + predictTime + ', SYSDATE,\'' + str(
predictResult) + '\')'
for iter1 in range(3):
for iter2 in range(168):
predictResult = result[iter2]
weekHour = (iter1 + 1) * 7 * 24
predictTime = (datetime.datetime.now() + datetime.timedelta(hours=(weekHour + iter2))).strftime(
"%Y/%m/%d %H:%M:%S")
executeSql = self.sql + '( \'' + uuidValue + '\', \'' + str(
model) + '\', ' + predictTime + ', SYSDATE,\'' + str(
predictResult) + '\')'
| [
"maxiao1@hisense.com"
] | maxiao1@hisense.com |
4b68733a5da1facd4daa9d36b3eafb06d1b7bea2 | 79a484e91a8df432a0ded93806a1e8237df7c253 | /umibukela/migrations/0020_auto_20170124_1443.py | 03d19703ba05730c59fd74bd2588eed73576e207 | [
"MIT"
] | permissive | OpenUpSA/umibukela | 7ba14397ad543154d3a32ebfd84e89aa07f7011e | 34c1a29a429b88c2f574e9120cfe93ba524633da | refs/heads/master | 2023-07-26T19:45:12.531887 | 2023-07-10T15:53:07 | 2023-07-10T15:53:07 | 47,106,932 | 0 | 0 | MIT | 2023-02-02T01:36:59 | 2015-11-30T09:03:27 | Python | UTF-8 | Python | false | false | 618 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('umibukela', '0019_auto_20170124_1252'),
]
operations = [
migrations.AlterField(
model_name='cycleresultset',
name='monitors',
field=models.ManyToManyField(help_text=b"Only monitors for the current partner are shown. If you update the Partner you'll have to save and edit this Cycle Result Set again to see the available monitors.", to='umibukela.Monitor', blank=True),
),
]
| [
"jbothma@gmail.com"
] | jbothma@gmail.com |
0646ba4d33ba2598127f9032251950afa8f84983 | a4469f87d13c4edef3eba384f6c994d097ecdb7f | /testing.py | a9c8b822e1df5655f66000db1e2738f0e82b3d4c | [] | no_license | joseluisvaz/conv-net | e03b184eab83335b9ce1d09c734e34af61aa55b6 | 53b3a3d344010d8ee7a6f8bd92ca3c2d8c87958d | refs/heads/master | 2021-08-24T03:48:59.923532 | 2017-12-01T17:52:07 | 2017-12-01T17:52:07 | 112,429,625 | 0 | 0 | null | 2017-12-01T17:52:08 | 2017-11-29T05:10:16 | Python | UTF-8 | Python | false | false | 2,932 | py | import numpy as np
from utils import load_dataset
from utils import convert_to_one_hot
from utils import accuracy
from utils import random_mini_batches
from layers.convolutional_layer import Conv
from layers.fullyconnected import FullyConnected
from layers.flatten import Flatten
from layers.max_pool import MaxPool
from activations import relu, lkrelu, linear, sigmoid, cross_entropy
from neural_network import Network
(X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes) = load_dataset()
X_train = X_train_orig/255.
X_test = X_test_orig/255.
Y_train = convert_to_one_hot(Y_train_orig, 6).T
Y_test = convert_to_one_hot(Y_test_orig, 6).T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
layers = [
Conv((5, 5, 3, 8), strides=1,pad=2, activation=relu, filter_init=lambda shp: np.random.normal(size=shp) * 1.0 / (5*5*3)),
MaxPool(f=8, strides=8, channels = 8),
Conv((3, 3, 8, 16), strides=1,pad=1, activation=relu, filter_init=lambda shp: np.random.normal(size=shp) * 1.0 / (3*3*8)),
MaxPool(f=4, strides=4, channels = 16),
Flatten((2, 2, 16)),
FullyConnected((2*2*16, 20), activation=sigmoid, weight_init=lambda shp: np.random.normal(size=shp) * np.sqrt(1.0 / (2*2*16 + 20))),
FullyConnected((20, 6), activation=linear, weight_init=lambda shp: np.random.normal(size=shp) * np.sqrt(1.0 / ( 20+ 6)))
]
minibatch_size = 20
lr = 0.009
k = 2000
net = Network(layers, lr=lr, loss=cross_entropy)
num_epochs = 10
costs = []
m = X_train.shape[0]
for epoch in range(num_epochs):
minibatch_cost = 0.
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
minibatches = random_mini_batches(X_train, Y_train, minibatch_size)
epoch_cost = 0
for minibatch in minibatches:
(minibatch_X, minibatch_Y) = minibatch
net.train_step((minibatch_X, minibatch_Y))
loss = np.sum(cross_entropy.compute((net.forward(minibatch_X), minibatch_Y)))
print("cost minibatch %f" % loss)
epoch_cost += loss / num_minibatches
if epoch % 5 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if epoch % 1 == 0:
costs.append(epoch_cost)
#for epoch in xrange(100):
# shuffled_index = np.random.permutation(X_train.shape[0])
# batch_train_X = X_train[shuffled_index[:batch_size]]
# batch_train_Y = Y_train[shuffled_index[:batch_size]]
# net.train_step((batch_train_X, batch_train_Y))
# loss = np.sum(cross_entropy.compute((net.forward(batch_train_X), batch_train_Y)))
# print 'Epoch: %d loss : %f' % (epoch, loss)
# if epoch % 1000 == 1:
# print 'Accuracy on first 50 test set\'s batch : %f' % accuracy(net, X_test[:50], Y_test[:50])
# if epoch % 5000 == 5000 - 1:
# print 'Accuracy over all test set %f' % accuracy(net, X_test, Y_test) | [
"noreply@github.com"
] | joseluisvaz.noreply@github.com |
4b0f0c943e827fd54a1f6f76f1ec7914cb9b0560 | 0a3f857d5d5035b6e9fa4841b9ecae700a9c808b | /example/test8/test72.py | 7b5bf9b602c85a74c4c49d930a0c8d97a415a380 | [] | no_license | liceyo/liceyo-study-python | d98031a7d49f69c817506db792a0fce4c65910a9 | cbabe4cdf84b28783865e37b0e3d0e41593d509b | refs/heads/master | 2020-03-31T20:49:24.323303 | 2018-10-23T06:32:40 | 2018-10-23T06:32:40 | 152,556,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | # 创建一个链表
if __name__ == '__main__':
l = [input("please input a number:\n") for i in range(5)]
print(l) | [
"lewislichengyong@gmail.com"
] | lewislichengyong@gmail.com |
e49c6db96c82229c075a0553fd4af97cc9f3a4a0 | 4290a698a7192fd8b4f25544d656509706439025 | /Kata_HW/HW4_3_Banjo.py | be728c76fbb11ddcbb72ffa3dcf37a9185689dc6 | [] | no_license | Row35/Homework.SSAcademy | 6401b869da99f5b963b4ec320eb229e03a1e9927 | a577f5ee17ab3cb11a80e6beabfd06bf6f8fc289 | refs/heads/master | 2020-12-26T10:18:51.381457 | 2020-03-03T10:20:58 | 2020-03-03T10:20:58 | 237,479,109 | 0 | 0 | null | 2020-02-06T21:19:17 | 2020-01-31T17:17:19 | Python | UTF-8 | Python | false | false | 163 | py | def areYouPlayingBanjo(name):
if name[0] == 'R' or name[0] == 'r':
return name + " plays banjo"
else:
return name + " does not play banjo" | [
"ivan.shtoyko@gmail.com"
] | ivan.shtoyko@gmail.com |
d44ba106ea8aff1d8cf7dd57c7ddf30bbbeb3023 | aebacedc43afabf8ce54bb25f4cbe040441dcba4 | /appscripts/appscripts-acer-120311/prefcns13.py | 18829a38e63f5364d6b331c5b7b1cc4b9e340e4e | [] | no_license | swanandgore/rappertk | 84e968447597494645ac0c9868358fc6a194197a | d1a5d5e0d096dfc23237e29bfd983183ca1e2fbd | refs/heads/master | 2020-05-17T07:59:43.613762 | 2014-08-20T12:13:56 | 2014-08-20T12:13:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,302 | py |
import os, shutil, re
import geometry
from xray import cif2mtz, uniqueify, sfall, mtz2hkl, cns_generate, cns_anneal, sgCCP4toCNS, fft, omitmap, mapman
from procrun import proc_run_exitOnError as execCmd
from xcheck import XrayScorer, XrayRanker
from data import sgtable
from evalCAtrace import comparePhiPsiOmegaChi
from pdbr import protein, isAAres
import prot2res
from pref import removeZeroLines
from pref import fixCNSop
from data import sgtable , long2shortHM
from scplacement import SCplacement
from loopbuild import Multiloop
import prepareChain
from stump import getCRYST , getRESO
ccp4args = {
0: [{"reftype":"restrained", "wa":0.20, "breftype":"ISOTROPIC", "ncyc":20}], #on native
1: [{"reftype":"unrestrained", "wa":0.75, "breftype":"OVER", "ncyc":20, "assignBfac":[20,30]}, #on catrace
{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":40}], #on catrace
2: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[60,90]}],
3: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[60,90]}],
4: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[60,90]}],
5: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[60,90]}],
6: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[60,90]}],
7: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[15,25]}],
8: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[15,25]}],
9: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[15,25]}],
10: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[15,25]}],
11: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[15,25]}],
12: [{"reftype":"restrained", "wa":0.50, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[15,25]}],
13: [{"reftype":"restrained", "wa":0.20, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[15,25]}],
14: [{"reftype":"restrained", "wa":0.20, "breftype":"ISOTROPIC", "ncyc":40, "assignBfac":[ 5, 6]}],
}
cnsArgs = {}
for cycle in range(20) : cnsArgs[cycle] = {} ; cnsArgs[cycle]["num_cycles"] = 2 ; cnsArgs[cycle]["temperature"] = 5000
#cnsArgs[0]["wa"] = -1 ; cnsArgs[0]["num_cycles"] = 1 ; cnsArgs[0]["temperature"] = 50
#cnsArgs[1]["wa"] = -1 ; cnsArgs[1]["num_cycles"] = 1 ; cnsArgs[1]["temperature"] = 50
def cnsRefinement(mtzin,pdbin, mtzout,pdbout, a,b,c,alpha,beta,gamma,sg,reso, cnsArgs,cycle, extraTOPfile=None, extraPARfile=None) :
mtz2hkl(mtzin, "cns.hkl")
cns_generate(pdbin, "generate.mtf", "generate.pdb", extraTOPfile, extraPARfile, "generate.log")
removeZeroLines("generate.pdb") ## ???
wa = -1 ; harmCA = None
if cnsArgs[cycle].has_key("harmCA") and cnsArgs[cycle]["harmCA"] != None : harmCA = 1
cns_anneal(a, b, c, alpha, beta, gamma, sgCCP4toCNS[sg], reso,
"cns.hkl", "generate.mtf", "generate.pdb", extraPARfile, "anneal%d.log"%cycle, wa, cnsArgs[cycle]["num_cycles"], cnsArgs[cycle]["temperature"], harmCA)
removeZeroLines("anneal.pdb") ## ???
fixCNSop("anneal.pdb")
os.rename("anneal.pdb", pdbout)
sfall(pdbout, "rfree.mtz", mtzout, reso)
mapman("anneal_2fofc.map", mtzout+"2fofc.map")
mapman("anneal_fc.map", mtzout+"fc.map")
#moleman(pdbout)
def main() :
import optparse ; parser = optparse.OptionParser()
parser.add_option("--dir-xyzout", action='store', type='string', dest='dir_xyzout', help='to create all the files during refinement. it shdnt be already present.')
parser.add_option("--xyzin", action='store', type='string', dest='pdbfile', help='starting pdb containing a model of pdb-ligand complex')
parser.add_option("--hklin", action='store', type='string', dest='sf', help='structure factors file')
parser.add_option("--a", action='store', type='float', dest='a', help='cell dimension a')
parser.add_option("--b", action='store', type='float', dest='b', help='cell dimension b')
parser.add_option("--c", action='store', type='float', dest='c', help='cell dimension c')
parser.add_option("--alpha", action='store', type='float', dest='alpha', help='cell angle alpha')
parser.add_option("--beta", action='store', type='float', dest='beta', help='cell angle beta')
parser.add_option("--gamma", action='store', type='float', dest='gamma', help='cell angle gamma')
parser.add_option("--sg", action='store', type='string', dest='sg', help='cell spacegroup, in CCP4 notation')
parser.add_option("--resolution", action='store', type='float', dest='resolution', help='resolution of the data')
parser.add_option("--use-ca-restraints", action='store', dest='caRes', help='[True/False], Apply positional restraints on the C-alpha atoms',default="True")
parser.add_option("--use-sc-restraints", action='store', dest='scRes',type= 'string', help='[True/False], Apply positional restraints on the centroid of the sidechain atoms',default="True",)
parser.add_option("--ca-restraint-radius", action='store', type='float', dest='caRad', help='radius of spherical restraint on CA position', default=1)
parser.add_option("--sc-centroid-restraint-radius", action='store', type='float', dest='scRad', help='radius of spherical restraint on sidechain centroid', default=2)
parser.add_option("--sidechain-vdw-reduction", action='store', type='float', dest='scReduction', help='factor to reduce effective vdw dist in case of sidechains', default= 0.75)
parser.add_option("--population-size", action='store', type='int', dest='popsize', help='population size for PopulationStrategy', default=100)
parser.add_option("--verbose", action='store', type='int', dest='verbose', help='0 means least verbosity etc.', default=0)
parser.add_option("--backtrack", action='store', type='string', dest='backtrack', help='use backtracking version of PopulationStrategy. eg 4X5 will set backtrack numsteps and stepsize to 4,5 respectively. not used by default.', default=None)
parser.add_option("--rotamerlib", action='store', type='string', dest='rotLib', help='[PRL/SCL1.0/SCL0.5/SCL0.2] Name of rotamer library to use when building side chains ', default='SCL1.0')
parser.add_option("--add-sidechains", action='store', type='string', dest='addsc', help='Build missing side chains ', default='False')
parser.add_option("--use-given-rotamer", action='store', type='string', dest='userot', help='Use given rotamer', default='False')
parser.add_option("--randomize", action='store', type='int', dest='randomize', help='seed for randomizing', default=None)
parser.add_option("--mconly", action='store', type='string', dest='mconly', help='[True/False] Build mainchain only', default="False")
parser.add_option("--sconly", action='store', type='string', dest='sconly', help='[True/False] Build side chains only, can only be used when MAP/MTZ file is given. See web page for further details', default="False")
parser.add_option("--opsax", action='store', type='string', dest='opsax', help='[True/False] Reassign side chains with OPSAX, will only be used when MTZ or MAP file is given', default="True")
parser.add_option("--attempts", action='store', type='int', dest='natt', help='Number of attempts made to build section', default=5)
parser.add_option("--cacaCutoff", action='store', type='float', dest='cacaCutoff', help='Minimum distance ( angstrom ) between adjacent Calpha atoms in order to detect a chain-break', default=5.)
################# Electron density parameters ####################################
parser.add_option("--FP", action='store', type='string', dest='f1label', help='Column label for FP in MTZ file', default=None)
parser.add_option("--SIGFP", action='store', type='string', dest='sigf1label', help='Column label for sigFP in MTZ file', default=None)
parser.add_option("--FC", action='store', type='string', dest='f2label', help='Column label for FC in MTZ file', default=None)
parser.add_option("--PHIC", action='store', type='string', dest='phiclabel', help='Column label for PHIC in MTZ file', default=None)
parser.add_option("--use-FreeR", action='store', type='string', dest='usefreer', help='[True/False] Use FreeR set ? ', default="False")
parser.add_option("--FreeR", action='store', type='string', dest='freeRlabel', help='Column label for FreeR in MTZ file', default=None)
parser.add_option("--n", action='store', type='int', dest='n', help='Value of n for difference map calculations nFo-(n-1)Fc', default=2)
############# Residues to be modelled ####################################
parser.add_option("--rebuild-poor-regions-only", action='store', type='string', dest='poorOnly', help='[True/False] Rebuild regions ofinput structure with poor fit to an electron density map. Residues to be rebuilt are identified using a real space correlation coefficientscore, the cut-off for which is set using --poor-fit-threshold.', default="False")
parser.add_option("--poor-fit-threshold", action='store', type='float', dest='poorThreshold', help='Correlation coefficient threshold to identify poor fitting regions', default=0.9)
parser.add_option("--loopseq", action='store', type='string', dest='loopres', help='Amino acid sequence for loop to be built', default=None)
parser.add_option("--use-loopclosure-restraints", action='store', type='string', dest='closure', help='Use geometric restraints to ensure closure of loop with anchor residues', default= "True")
parser.add_option("--start", action='store', type='int', dest='start', help='Residue number to start building from ', default=None)
parser.add_option("--stop", action='store', type='int', dest='stop', help='Residue number to stop building at', default=None)
parser.add_option("--chainid", action='store', type='string', dest='chainid', help='Chain ID of section to be built.', default=None)
parser.add_option("--modelN2C", action='store', type='string', dest='modelN2C', help='[True/False] Model fragment without loop closure restraints. Used in conjunction with --start, --stop, --chainid. Requires --use-ca-restraints True ', default="False")
######### Ouptut parameters #############################################
parser.add_option("--models-get-native-bfactors", action='store', type='string', dest='nativeBfac', help='[True/False] Assign B-factors of remodelled atoms to original values', default="False")
parser.add_option("--default-mainchain-b-factor", action='store', type='float', dest='mcBfac', help='The value of B-factor assigned to the newly built main chain atoms', default=20.)
parser.add_option("--default-sidechain-b-factor", action='store', type='float', dest='scBfac', help='The value of B-factor assigned to the newly built side chain atoms', default=30.)
### Electron density parametets #########################################
parser.add_option("--minimum-sig", action='store', type='float', dest='minXSig', help='Minimum sigma ', default=0.25)
parser.add_option("--maximum-sig", action='store', type='float', dest='maxXSig', help='Maximum sigma ', default=2.0)
########## Optional restraints ##########################################
parser.add_option("--make-ed-optional", action='store', type='string', dest='edOpt', help='[True/False] If False, then the mainchain will be unconditionally forced to lie in positive density. If True then positive density restraint on the mainchain will be made optional.This is useful when tracing through a structure with regions in very poor (non-existent) density', default= "False")
parser.add_option("--make-all-restraints-optional", action='store', type='string', dest='allOpt', help='[True / False ] If True, then all restraints will be made optional', default="False")
(options, args) = parser.parse_args()
if not os.path.isdir(options.dir_xyzout) : os.mkdir(options.dir_xyzout)
shutil.copyfile(options.pdbfile, "%s/0.model0.pdb" % options.dir_xyzout)
shutil.copyfile(options.sf, "%s/rfree.mtz" % options.dir_xyzout)
os.chdir(options.dir_xyzout)
if (options.a == None or options.b == None or options.c == None or options.alpha == None or options.beta == None or options.gamma == None) :
options.a,options.b,options.c,options.alpha , options.beta , options.gamma,d1 = getCRYST(options.pdbfile)
if (options.a == None or options.b == None or options.c == None or options.alpha== None or options.beta==None or options.gamma == None ):
print "CRYST card cannot be read from coordinate file. Please input cell paramater a, b , c , alpha, beta , gamma = ",options.a , options.b , options.c , options.alpha , options.beta , options.gamma
import sys ; sys.exit()
if options.sg == None :
d1,d2,d3,d4 , d5 , d6, options.sg = getCRYST(options.pdbfile)
if options.sg == None :
print "Please input space group " , options.sg ; import sys ; sys.exit()
ss = ""
for sg1 in options.sg:
if sg1 in ["\n","\t","\s"]:
continue
else :
ss = ss+sg1
options.sg = ss
if options.sg in long2shortHM.keys():
shortsg = long2shortHM[options.sg]
options.sg = shortsg
if options.sg not in sgtable.keys():
print "Check --sg , Not recognised [%s][%d]"%( options.sg, len(options.sg))
import sys ; sys.exit()
if options.resolution == None :
options.resolution = getRESO(options.pdbfile)
if (options.resolution == None):
print "Please input resolution " , options.resolution
import sys ; sys.exit()
numRefCycles = 20 ; startCycle = 0
for cycle in range(startCycle, numRefCycles) :
if cycle > 5 : userot = 1
else : userot = 0
xscorecutoff = options.poorThreshold
if options.sconly != 'True':
if cycle == 15 : options.scRad *= 2
#if cycle < 10 : xscorecutoff = 0.8
#else : xscorecutoff = 0.9
#if cycle == 0 :
# scvdwr = .75 ; options.popsize = 500
modelIn = "0.model%d.pdb" % cycle
cnsout = "cns%d.pdb" % cycle
rtkmodel = "model%d.pdb" % (cycle+1) # rappertk model to be generated in this cycle
if options.f2label != None and options.phiclabel != None and cycle == 0 :
shutil.copyfile("rfree.mtz", "phased.mtz")
else :
sfall(modelIn, "rfree.mtz", "phased.mtz")
phasedmtz = "phased%d.mtz" % cycle # phase the str factors with current model
#cnsphasedmtz = "phased%d.mtz" % cycle # phase the str factors with current model
if not os.path.isfile(cnsout) :
cnsRefinement("phased.mtz", modelIn, phasedmtz, cnsout,
options.a, options.b, options.c, options.alpha, options.beta, options.gamma, options.sg, options.resolution,
cnsArgs, cycle)
from pref13 import main as prefRapperMain
#sfall(cnsout, phasedmtz , cnsphasedmtz)
prefRapperMain(cnsout,rtkmodel,options.dir_xyzout,None,phasedmtz,options.caRes,options.scRes,options.caRad,options.scRad,options.scReduction,options.popsize,options.verbose,options.backtrack,options.rotLib,1,options.mconly,options.sconly,options.opsax,options.natt,options.cacaCutoff,options.a,options.b,options.c,options.alpha,options.beta,options.gamma,options.sg,options.resolution,options.f1label,options.sigf1label,"FC","PHIC",options.usefreer,options.freeRlabel,options.n,options.poorOnly,xscorecutoff,options.loopres,options.start,options.stop,options.chainid,options.modelN2C,options.nativeBfac,options.mcBfac,options.scBfac,options.minXSig,options.maxXSig,options.edOpt,options.allOpt,options.closure,options.addsc,options.userot,"cns")
# prefRapperMain(cnsout,rtkmodel,options.dir_xyzout,None,phasedmtz,options.caRes,options.scRes,options.caRad,options.scRad,scvdwr,popsize,options.verbose,options.backtrack,rotlib, 1 , "False", "False" , "True" , 5 , 5.0 ,options.a,options.b,options.c,options.alpha,options.beta,options.gamma,options.sg,options.resolution,"FP","SIGFP",None,None,"True","FreeR_flag",2,"True",xscoreCutoff,None,None,None,None,"False","False",20.0,30.0,0.25,2.0,"False","False")
if __name__ == "__main__" :
main()
import sys ; sys.exit(0)
from scplacement import SCplacement
import prepareChain
scPrepC, useGivenRot, useDEE = prepareChain.PrepareChain("PRL"), 1, 1
badresids = ["VAL 85 ", "ASP 86 ", "TYR 68 ", "TYR 90 ",],
SCplacement("premodel2.pdb", 0.5, "mmm.pdb", "dotfile", useDEE, "phased1.mtz2fofc.map", "FP", "FC", "PHIC", "2F1-F2", 0, 5, None, useGivenRot,
badresids, scPrepC).run()
import sys ; sys.exit(0)
replaceWaters("model1.pdb", "rtk0.map")
| [
"swanand@ebi-001.ebi.ac.uk"
] | swanand@ebi-001.ebi.ac.uk |
30a8a841f6e80bbe20e32a81e4e4f4a2a72c08e2 | 1edb43554ede707ce18fe33b009402b91a022c99 | /test.py | 2c5b126982624bc36018331deaebb9d0799059a0 | [] | no_license | Techainer/mnist-mlchain-examples | 09d3a1a0fe72c7cc7a3cec6e6e9e80371735b639 | 053d2bc3a6d0db8182cb1e64b19ca0670b38805a | refs/heads/master | 2022-12-04T15:17:49.866910 | 2020-08-12T13:32:50 | 2020-08-12T13:32:50 | 286,946,915 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | import glob
import time
import cv2
from mlchain.client import Client
from mlchain.workflows import Parallel, Task
from PIL import Image
from tqdm import tqdm
model = Client(api_address='127.0.0.1:9001').model()
all_samples = glob.glob('data/*.jpg')*10
def predict_single_image(sample):
image = cv2.imread(sample)
return model.predict(image=image)
# Sequential
start_time = time.time()
for sample in tqdm(all_samples):
res = predict_single_image(sample)
print('Sequentail prediction tooks:', time.time() - start_time)
# Parallel
start_time = time.time()
tasks = [Task(predict_single_image, sample) for sample in all_samples]
res = Parallel(tasks, max_threads=4).run(progress_bar=True)
print('Parallel prediction tooks:', time.time() - start_time)
| [
"lamhoangtung.vz@gmail.com"
] | lamhoangtung.vz@gmail.com |
5fd0fc6232e320bcc15760a732c4cc9b643b3674 | 42528f5dcd3e2d4adbbb0e370a8298ff62e6c679 | /memento/Editor.py | fef3915c23f67483654361ec4b78e62aa26b9ecf | [] | no_license | mohamedelashhab/design-pattern | 5b36d54ed7a141220c86ddff92dea6622f8c3b7e | da5b7d06b4f93a427a7499d98a9e970b61ce6b97 | refs/heads/master | 2022-11-27T16:10:24.825512 | 2020-08-08T15:13:23 | 2020-08-08T15:13:23 | 276,177,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | from memento.EditorState import EditorState
class Editor():
def __init__(self):
self.__content = ''
@property
def content(self) -> str:
return self.__content
@content.setter
def content(self, value: str) -> None:
self.__content = value
def createState(self) -> EditorState:
return EditorState(self.__content)
def restore(self, state: EditorState) -> EditorState:
self.content = state.content
| [
"elashhab_fcih@yahoo.com"
] | elashhab_fcih@yahoo.com |
2087f66359a6383aadf0b06ec31295815bc2ae13 | 2c8ed67a9e54b98a9b432f5a66287e4523497d65 | /python/hsreplay/elements.py | 26ca5f8871e7e77da22c26d41bcde04d629b64d6 | [
"MIT",
"Python-2.0",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | EvilNuff/HSReplay | 79915a87df182d3af3c4a7ed8fb3f9e84135e106 | 26fd02cbfbff7f5a6fec0573d227d3e1aff417bd | refs/heads/master | 2021-01-12T08:00:48.065962 | 2016-10-14T22:03:48 | 2016-10-14T22:03:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,857 | py | from dateutil.parser import parse as parse_timestamp
from hearthstone.hslog import packets
from .utils import ElementTree
def node_for_tagname(tag):
for k, v in globals().items():
if k.endswith("Node") and v.tagname == tag:
return v
raise ValueError("No matching node for tag %r" % (tag))
class Node(object):
attributes = ()
tagname = None
def __init__(self, *args):
self._attributes = {}
self.nodes = []
for k, arg in zip(("ts", ) + self.attributes, args):
setattr(self, k, arg)
def __repr__(self):
return "<%s>" % (self.__class__.__name__)
@classmethod
def from_xml(cls, xml):
if xml.tag != cls.tagname:
raise ValueError("%s.from_xml() called with %r, not %r" % (
cls.__name__, xml.tag, cls.tagname
))
ts = xml.attrib.get("ts")
if ts:
ts = parse_timestamp(ts)
ret = cls(ts)
for element in xml:
ecls = node_for_tagname(element.tag)
node = ecls.from_xml(element)
for attrname in ecls.attributes:
setattr(node, attrname, element.attrib.get(attrname))
ret.nodes.append(node)
return ret
def append(self, node):
self.nodes.append(node)
def xml(self):
element = ElementTree.Element(self.tagname)
for node in self.nodes:
element.append(node.xml())
for attr in self.attributes:
attrib = getattr(self, attr, None)
if attrib is not None:
if isinstance(attrib, bool):
attrib = str(attrib).lower()
elif isinstance(attrib, int):
# Check for enums
attrib = str(int(attrib))
element.attrib[attr] = attrib
if self.timestamp and self.ts:
element.attrib["ts"] = self.ts.isoformat()
for k, v in self._attributes.items():
element.attrib[k] = v
return element
class GameNode(Node):
tagname = "Game"
attributes = ("id", "reconnecting")
timestamp = True
packet_class = packets.PacketTree
@property
def players(self):
return self.nodes[1:3]
def export(self):
tree = self.packet_class(self.ts)
create_game = self.nodes[0].export()
for player in self.players:
create_game.players.append(player.export())
tree.packets.append(create_game)
for node in self.nodes[3:]:
tree.packets.append(node.export())
return tree
class GameEntityNode(Node):
tagname = "GameEntity"
attributes = ("id", )
timestamp = False
packet_class = packets.CreateGame
def export(self):
packet = self.packet_class(self.ts, int(self.id))
for node in self.nodes:
packet.tags.append(node.export())
return packet
class PlayerNode(Node):
tagname = "Player"
attributes = (
"id", "playerID", "accountHi", "accountLo", "name",
"rank", "legendRank", "cardback"
)
timestamp = False
packet_class = packets.CreateGame.Player
def export(self):
packet = self.packet_class(
self.ts, int(self.id), int(self.playerID),
int(self.accountHi), int(self.accountLo)
)
packet.name = self.name
for node in self.nodes:
if node.tagname == "Tag":
packet.tags.append(node.export())
return packet
def xml(self):
ret = super(PlayerNode, self).xml()
deck = getattr(self, "deck", None)
if deck is not None:
element = ElementTree.Element("Deck")
ret.append(element)
for card in deck:
e = ElementTree.Element("Card")
e.attrib["id"] = card
element.append(e)
return ret
class DeckNode(Node):
tagname = "Deck"
attributes = ()
timestamp = False
packet_class = None
class CardNode(Node):
tagname = "Card"
attributes = ("id", "premium")
timestamp = False
packet_class = None
class FullEntityNode(Node):
tagname = "FullEntity"
attributes = ("id", "cardID")
timestamp = False
packet_class = packets.FullEntity
def export(self):
packet = self.packet_class(self.ts, int(self.id), self.cardID)
for node in self.nodes:
packet.tags.append(node.export())
return packet
class ShowEntityNode(Node):
tagname = "ShowEntity"
attributes = ("entity", "cardID")
timestamp = False
packet_class = packets.ShowEntity
def export(self):
packet = self.packet_class(self.ts, int(self.entity), self.cardID)
for node in self.nodes:
packet.tags.append(node.export())
return packet
class BlockNode(Node):
tagname = "Block"
attributes = ("entity", "type", "index", "target")
timestamp = True
packet_class = packets.Block
def export(self):
index = int(self.index) if self.index is not None else -1
packet = self.packet_class(
self.ts, int(self.entity or 0), int(self.type), index,
None, None, int(self.target or 0)
)
for node in self.nodes:
packet.packets.append(node.export())
packet.ended = True
return packet
class MetaDataNode(Node):
tagname = "MetaData"
attributes = ("meta", "data", "info")
timestamp = False
packet_class = packets.MetaData
def export(self):
packet = self.packet_class(
self.ts, int(self.meta), int(self.data or 0), int(self.info)
)
for node in self.nodes:
packet.info.append(node.export())
return packet
class MetaDataInfoNode(Node):
tagname = "Info"
attributes = ("index", "entity")
timestamp = False
def export(self):
return int(self.entity)
class TagNode(Node):
tagname = "Tag"
attributes = ("tag", "value")
timestamp = False
def export(self):
return (int(self.tag), int(self.value))
class TagChangeNode(Node):
tagname = "TagChange"
attributes = ("entity", "tag", "value")
timestamp = False
packet_class = packets.TagChange
def export(self):
return self.packet_class(self.ts, int(self.entity), int(self.tag), int(self.value))
class HideEntityNode(Node):
tagname = "HideEntity"
attributes = ("entity", "zone")
timestamp = True
packet_class = packets.HideEntity
def export(self):
return self.packet_class(self.ts, int(self.entity), int(self.zone))
class ChangeEntityNode(Node):
tagname = "ChangeEntity"
attributes = ("entity", "cardID")
timestamp = True
packet_class = packets.ChangeEntity
def export(self):
packet = self.packet_class(self.ts, int(self.entity), self.cardID)
for node in self.nodes:
packet.tags.append(node.export())
return packet
##
# Choices
class ChoicesNode(Node):
tagname = "Choices"
attributes = ("entity", "id", "taskList", "type", "min", "max", "source")
timestamp = True
packet_class = packets.Choices
def export(self):
taskList = int(self.taskList) if self.taskList else None
packet = self.packet_class(
self.ts, int(self.entity or 0), int(self.id), taskList,
int(self.type), int(self.min), int(self.max)
)
packet.source = self.source
for node in self.nodes:
packet.choices.append(node.export())
return packet
class ChoiceNode(Node):
tagname = "Choice"
attributes = ("index", "entity")
timestamp = False
def export(self):
return int(self.entity)
class ChosenEntitiesNode(Node):
tagname = "ChosenEntities"
attributes = ("entity", "id")
timestamp = True
packet_class = packets.ChosenEntities
def export(self):
packet = self.packet_class(self.ts, int(self.entity), int(self.id))
for node in self.nodes:
packet.choices.append(node.export())
return packet
class SendChoicesNode(Node):
tagname = "SendChoices"
attributes = ("id", "type")
timestamp = True
packet_class = packets.SendChoices
def export(self):
packet = self.packet_class(self.ts, int(self.id), int(self.type))
for node in self.nodes:
packet.choices.append(node.export())
return packet
##
# Options
class OptionsNode(Node):
tagname = "Options"
attributes = ("id", )
timestamp = True
packet_class = packets.Options
def export(self):
packet = self.packet_class(self.ts, int(self.id))
for i, node in enumerate(self.nodes):
packet.options.append(node.export(i))
return packet
class OptionNode(Node):
tagname = "Option"
attributes = ("index", "entity", "type")
timestamp = False
packet_class = packets.Option
def export(self, id):
optype = "option"
packet = self.packet_class(self.ts, int(self.entity or 0), id, int(self.type), optype)
for i, node in enumerate(self.nodes):
packet.options.append(node.export(i))
return packet
class SubOptionNode(Node):
tagname = "SubOption"
attributes = ("index", "entity")
timestamp = False
packet_class = packets.Option
def export(self, id):
optype = "subOption"
type = None
packet = self.packet_class(self.ts, int(self.entity), id, type, optype)
for i, node in enumerate(self.nodes):
packet.options.append(node.export(i))
return packet
class OptionTargetNode(Node):
tagname = "Target"
attributes = ("index", "entity")
timestamp = False
packet_class = packets.Option
def export(self, id):
optype = "target"
type = None
return self.packet_class(self.ts, int(self.entity), id, type, optype)
class SendOptionNode(Node):
tagname = "SendOption"
attributes = ("option", "subOption", "target", "position")
timestamp = True
packet_class = packets.SendOption
def export(self):
return self.packet_class(
self.ts, int(self.option), int(self.subOption), int(self.target), int(self.position)
)
| [
"jerome@leclan.ch"
] | jerome@leclan.ch |
ed51baa8059fc8e7048379aed5191ae1448b2e24 | 534124255a7f0a249daaa250ff2f3d0d0f5413e4 | /poethepoet/executor/base.py | efa729d18b707c2900c81ea4d49f35d1058209e2 | [
"MIT"
] | permissive | AnarchyCrew/poethepoet | 41102b0989221dc550a3e443b59623ac81fc2270 | e36ee136f3c4c0f016fa87069fac7f83af583aae | refs/heads/master | 2022-12-25T12:02:29.646396 | 2020-09-27T18:00:32 | 2020-09-27T18:00:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,018 | py | from subprocess import Popen, PIPE
import sys
from typing import Any, MutableMapping, Optional, Sequence, TYPE_CHECKING
if TYPE_CHECKING:
from pathlib import Path
from ..context import RunContext
class PoeExecutor:
"""
A base class for poe task executors
"""
working_dir: Optional["Path"]
# TODO: maybe recieve a reference to the PoeConfig
# Also maybe invert the control so the executor is given a task to run
def __init__(
self,
context: "RunContext",
env: MutableMapping[str, str],
working_dir: Optional["Path"] = None,
dry: bool = False,
):
self.context = context
self.working_dir = working_dir
self.env = env
self.dry = dry
def execute(self, cmd: Sequence[str], input: Optional[bytes] = None,) -> int:
raise NotImplementedError
def _exec_via_subproc(
self,
cmd: Sequence[str],
*,
input: Optional[bytes] = None,
env: Optional[MutableMapping[str, str]] = None,
shell: bool = False
) -> int:
if self.dry:
return 0
popen_kwargs: MutableMapping[str, Any] = {"shell": shell}
popen_kwargs["env"] = self.env if env is None else env
if input is not None:
popen_kwargs["stdin"] = PIPE
if self.working_dir is not None:
popen_kwargs["cwd"] = self.working_dir
# TODO: exclude the subprocess from coverage more gracefully
_stop_coverage()
proc = Popen(cmd, **popen_kwargs)
proc.communicate(input)
return proc.returncode
def _stop_coverage():
"""
Running coverage around subprocesses seems to be problematic, esp. on windows.
There's probably a more elegant solution that this.
"""
if "coverage" in sys.modules:
# If Coverage is running then it ends here
from coverage import Coverage
cov = Coverage.current()
if cov:
cov.stop()
cov.save()
| [
"n@natn.me"
] | n@natn.me |
ccfe4d93e43740333528c22f4c98234b6a43ece9 | b20e387ab0cde80669c85dc1c257ca7a799148d3 | /manage.py | 8401a071decceb58a266c970e171d13b0601864d | [] | no_license | luomaohao/file_upload_sys | d352aeb6ac8578b4b37594681da5b65b52ed9c55 | 73c2e850b99533512cdccd1a09a77749f61a9bd3 | refs/heads/master | 2021-02-22T10:03:32.361120 | 2020-03-08T05:18:06 | 2020-03-08T05:18:06 | 245,375,004 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "file_upload.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"1293680370@qq.com"
] | 1293680370@qq.com |
d21050a17e15ff92bccfbce4604ba90af3d3d95f | 56818903f60b5e7b88645f88badc92bfa5d2c65f | /automlcli/settings.py | 05d100770da7b6b2f4c87b22a2dd400e38345549 | [
"MIT"
] | permissive | altescy/automlcli | 23e82ad957ac8cbeb43d734741dd8dfb9b24b0ff | ec57ac57df5d9d9f8a7ef79bb7a96a86801f32f4 | refs/heads/main | 2023-04-29T03:57:06.181052 | 2021-05-23T12:19:34 | 2021-05-23T12:19:34 | 341,651,976 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | from pathlib import Path
# colt settings
DEFAULT_COLT_SETTING = {
"typekey": "type",
}
# automlcli directory settings
AUTOMLCLI_ROOT = Path.home() / ".automlcli"
# plugin settings
LOCAL_PLUGINS_FILENAME = ".automlcli_plugins"
GLOBAL_PLUGINS_FILENAME = AUTOMLCLI_ROOT / "plugins"
| [
"altescy@fastmail.com"
] | altescy@fastmail.com |
0f904e64473e0a25754c0b977e1599a61fcaaa7b | 660e35c822423685aea19d038daa8356722dc744 | /account_statement_ofx/tests/__init__.py | eef3074bc7837bf7d59e074cce70d4916358feba | [] | no_license | saifkazi/tryton_modules | a05cb4a90ae2c46ba39d60d2005ffc18ce5e44bb | 94bd3a4e3fd86556725cdff33b314274dcb20afd | refs/heads/main | 2023-05-05T12:20:02.059236 | 2021-05-19T10:46:37 | 2021-05-19T10:46:37 | 368,768,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | # This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
try:
from trytond.modules.account_statement_ofx.tests.test_account_statement_ofx import suite # noqa: E501
except ImportError:
from .test_account_statement_ofx import suite
__all__ = ['suite']
| [
"saif.kazi76@gmail.com"
] | saif.kazi76@gmail.com |
1f205b501f856e9272614f1464fb7bd772afb52b | 555c398a8a5af5d9d8a47926b2501109bf424f0e | /stonks.py | edfc183bf9545aef36e0d64c7279740b290ba2cc | [] | no_license | brianjohnpolasek/Stonks | 83b124872b9e843cfacab3120109fdb9dfd17585 | 32cc2365c2efef9aa309dc349ba0da8d8c98b6cc | refs/heads/master | 2021-01-06T07:03:28.912030 | 2020-03-30T21:33:47 | 2020-03-30T21:33:47 | 241,240,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,947 | py | import sys
import os
import re
from datetime import datetime
import plotly.graph_objects as pgo
from pandas_datareader import data as pdr
import pandas as pd
class tcolors:
GREEN = '\033[92m'
WARNING = '\033[93m'
BLUE = '\033[94m'
END = '\033[0m'
def validate_input(stock_name, stock_start_date, stock_end_date):
try:
if (len(re.search("([A-Z]{5})|(([A-Z]{4})(\.[A-Z]|\-[A-Z]|))|(([A-Z]{3})(\.[A-Z]|\-[A-Z]|))|(([A-Z]{2})(\.[A-Z]|\-[A-Z]|))|(([A-Z]{1})(\.[A-Z]|\-[A-Z]|))", stock_name).group()) < 1):
print('Failed stock name.\n')
return False
if (len(re.search("([0-9]{4}-[0-9]{2}-[0-9]{2})", stock_start_date).group()) != 10):
print('Failed start date.\n')
return False
if (len(re.search("([0-9]{4}-[0-9]{2}-[0-9]{2})", stock_end_date).group()) != 10):
print('Failed end date.\n')
return False
except:
print(tcolors.WARNING + 'Invalid data.\n' + tcolors.END)
return False
else:
print(tcolors.GREEN + 'Data is Valid.\n' + tcolors.END)
return True
def get_user_input():
print(tcolors.BLUE + 'Enter data manually.\n' + tcolors.END)
stock_name = input('Enter stock name (Ex. XYZ): ')
stock_start_date = input('Enter start date (Ex. 0000-00-00): ')
stock_end_date = input('Enter end date (Ex. 0000-00-00): ')
if (validate_input(stock_name, stock_start_date, stock_end_date) == False):
return get_user_input()
else:
return [stock_name, stock_start_date, stock_end_date]
# Initialized variables
valid_input = False
user_input = []
stock_name = ""
stock_start_date = ""
stock_end_date = ""
curr_date = datetime.now().strftime('%Y-%m-%d')
print('\nNumber of arguments: ' + str(len(sys.argv)))
print('Today\'s date: ' + curr_date + '\n')
# Validate command line arguments
if (len(sys.argv) == 4):
stock_name = str(sys.argv[1]).upper()
stock_start_date = str(sys.argv[2])
stock_end_date = str(sys.argv[3])
if (stock_end_date == "today"):
stock_end_date = curr_date
valid_input = validate_input(stock_name, stock_start_date, stock_end_date)
# Acquire user data if args are not given or invalid
if (valid_input != True):
user_input = get_user_input()
else:
user_input = [stock_name, stock_start_date, stock_end_date]
# Import stock data and save to file
print('Acquiring data...')
pdr.DataReader(user_input[0], 'yahoo', user_input[1], user_input[2]).to_csv('data/output_' + stock_name + '_' + curr_date + '.csv')
print('Data acquired.\n')
# Example input
# pdr.DataReader('TSLA', 'yahoo', '2017-01-01', '2018-01-01').to_csv('data/output2.csv')
# Read saved csv file
print('Reading csv data...')
stock_csv = pd.read_csv('data/output_' + stock_name + '_' + curr_date + '.csv')
print('Data read success.\n')
# Graph stock data using Plotly
fig = pgo.Figure(data=[pgo.Candlestick(x=stock_csv['Date'],
open=stock_csv['Open'],
high=stock_csv['High'],
low=stock_csv['Low'],
close=stock_csv['Close'])
])
# print('Close Data: ' + stock_csv['Close'])
# Bollinger Calculations
# rolling_avg = stock_csv['Close'].rolling(window=20).mean()
# std_dev = stock_csv['Close'].rolling(window=20).std()
# upper_band = rolling_avg + (2 * std_dev)
# lower_band = rolling_avg - (2 * std_dev)
# print('Rolling Average: ' + rolling_avg)
# print('Standard Deviation ' + std_dev)
# print('Upper Band: ' + upper_band)
# print('Lower Band: ' + lower_band)
'''
fig.add_trace(
pgo.Figure(x=upper_band, name='Upper Band')
)
fig.update_layout(
title="Stock Data for " + stock_name,
xaxis_title="Date",
yaxis_title="Value"
)
'''
print('Launching graph...')
fig.show()
print('Graph launch success.\n')
# Save graph as png
print('Saving graph to file \'images/graph.png\'...')
fig.write_image('images/graph_' + stock_name + '_' + curr_date + '.png')
print('Graph saved to file.\n')
print('Done.')
| [
"brianjohnpolasek@gmail.com"
] | brianjohnpolasek@gmail.com |
fd8ac21a8d9b8432a25e4625bc8ff3e90e64da60 | 64cad428fb95a4815f83a90ee44144e1b4b44766 | /env/bin/django-admin.py | 3a80150dc43fbf285f554927972b5e4eddee0a13 | [] | no_license | virginiah894/Api | 5ddcd0eca325d2967d9bbb634ff5bc89d68f6e24 | 96392c7c20d0e25dc2b751a44a3cd379531fafc4 | refs/heads/master | 2022-11-11T10:14:41.153391 | 2020-07-04T14:40:58 | 2020-07-04T14:40:58 | 277,127,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | #!/home/access/Documents/perry projects/Django-APIs/env/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"virgyperry@gmail.com"
] | virgyperry@gmail.com |
3a7cafd0b8495001f094ed73028cc04915094f23 | b96f7c01df9417aaf4408e794b1edcc501921c6f | /pirates/coderedemption/CodeRedemptionUD.py | ebf302c6eaa53f6a706c9ea2ff6aa52f277ff69f | [] | no_license | Puggyblue999/PiratesOfTheCarribeanOnline | 492b5feec3dace921026ab1ec64603c208869a62 | 5c7eff12c3821d337404be0face368a5a899fff1 | refs/heads/master | 2021-01-22T15:10:54.858772 | 2015-06-25T20:30:11 | 2015-06-25T20:30:11 | 38,146,060 | 4 | 4 | null | 2015-07-01T18:58:11 | 2015-06-27T04:01:44 | Python | UTF-8 | Python | false | false | 126 | py | from direct.distributed.DistributedObjectUD import DistributedObjectUD
class CodeRedemptionUD(DistributedObjectUD):
pass
| [
"bryanmuschter@hotmail.com"
] | bryanmuschter@hotmail.com |
5f7ebe9c145d89cf84f6b4697ee7cd8fa43e1a4f | 6292d9b85c357a5e7752e8f58e9518d319254877 | /behavioral_QC_scripts/cued_task_switching_single_task.py | 7135e583fb9e750afd57dce5e46da48a7ca0a12a | [] | no_license | jkl071/network-attack-analysis | a4142aef44abb01f7163cc25dd7cca35eb94ee53 | 5e8cf93243cb5a7dab21a823577f0c9db36fd15d | refs/heads/master | 2020-06-01T22:19:49.525072 | 2019-06-10T20:05:48 | 2019-06-10T20:05:48 | 190,948,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,632 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 4 10:10:39 2018
@author: jamie
two by two single task for network grant
4 blocks of 48 trials, 192 total
task switch: stay vs switch
cue switch: stay vs switch
full counterbalancing
"""
import pandas as pd
input_path = "/Users/jamie/Desktop/network_output/final/A3NNB4LWIKA3BQ/modified_for_analysis/"
task = 'cued_task_switching_single_task_network_A3NNB4LWIKA3BQ.csv'
df = pd.read_csv(input_path + task)
test_trials = df[(df.trial_id == "test_trial")] #practice_trial for practice
task_stay__cue_stay = 0
task_stay__cue_switch = 0
task_switch__cue_stay = 0
task_switch__cue_switch = 0
for row in range(0,len(test_trials)):
if test_trials.iloc[row].task_condition == "stay" and test_trials.iloc[row].cue_condition == "stay":
task_stay__cue_stay += 1
elif test_trials.iloc[row].task_condition == "stay" and test_trials.iloc[row].cue_condition == "switch":
task_stay__cue_switch += 1
elif test_trials.iloc[row].task_condition == "switch" and test_trials.iloc[row].cue_condition == "stay":
task_switch__cue_stay += 1
elif test_trials.iloc[row].task_condition == "switch" and test_trials.iloc[row].cue_condition == "switch":
task_switch__cue_switch += 1
print("task_stay__cue_stay = " + str(task_stay__cue_stay) + " / " + str(len(test_trials)))
print("task_stay__cue_switch = " + str(task_stay__cue_switch) + " / " + str(len(test_trials)))
print("task_switch__cue_stay = " + str(task_switch__cue_stay) + " / " + str(len(test_trials)))
print("task_switch__cue_switch = " + str(task_switch__cue_switch) + " / " + str(len(test_trials)))
suspect_trial_timing = []
for row in range(0,len(df)-1):
actual_duration = df.iloc[row + 1].time_elapsed - df.iloc[row].time_elapsed
expected_duration = df.iloc[row + 1].block_duration + df.iloc[row].timing_post_trial
if df.iloc[row + 1].trial_type == 'poldrack-categorize':
expected_duration += 500
if abs(expected_duration - actual_duration) > 50:
suspect_trial_timing.append(str(df.iloc[row + 1].trial_index) + '_' +
task + '_' +
str(abs(expected_duration - actual_duration)) + '_' +
str(actual_duration) + '_' +
df.iloc[row + 1].trial_id + '_' +
df.iloc[row + 1].trial_type)
if len(suspect_trial_timing) == 0:
print('no suspect timing issues')
else:
print('check suspect_trial_timing array') | [
"jamie@jamies-mbp-2.attlocal.net"
] | jamie@jamies-mbp-2.attlocal.net |
f36d9c33e85490d677887205bfdcc78f7c7c80d0 | b9eaba237cf73ba25b6dd6ced2a55f2a60a5b159 | /oauth_meetup/apps.py | b972ca550e733bab0bff97b7ae1c6d4f7c670f33 | [] | no_license | py-yyc/oauth_meetup | 06453c771290adc1802c4a828eaccb61454aeb81 | 77ef1802011097b86b56d526a29b74195f214d20 | refs/heads/master | 2020-04-17T21:00:44.159436 | 2019-02-26T14:45:28 | 2019-02-26T14:45:28 | 166,930,510 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | from django.apps import AppConfig
class OauthMeetupConfig(AppConfig):
name = 'oauth_meetup'
| [
"andrew@neitsch.ca"
] | andrew@neitsch.ca |
fd0769634efd56515d94fd4ea9f4eb462529f871 | f79dec3c4033ca3cbb55d8a51a748cc7b8b6fbab | /mail/thunderbird24/patches/patch-mozilla_media_webrtc_signaling_signaling.gyp | 41966d28b7816a3796cb2349cea04aecf86e6dd2 | [] | no_license | jsonn/pkgsrc | fb34c4a6a2d350e8e415f3c4955d4989fcd86881 | c1514b5f4a3726d90e30aa16b0c209adbc276d17 | refs/heads/trunk | 2021-01-24T09:10:01.038867 | 2017-07-07T15:49:43 | 2017-07-07T15:49:43 | 2,095,004 | 106 | 47 | null | 2016-09-19T09:26:01 | 2011-07-23T23:49:04 | Makefile | UTF-8 | Python | false | false | 1,285 | gyp | $NetBSD: patch-mozilla_media_webrtc_signaling_signaling.gyp,v 1.1 2014/07/27 05:36:07 ryoon Exp $
--- mozilla/media/webrtc/signaling/signaling.gyp.orig 2013-10-23 22:09:11.000000000 +0000
+++ mozilla/media/webrtc/signaling/signaling.gyp
@@ -228,6 +228,19 @@
'cflags_mozilla': [
],
}],
+ ['os_bsd==1', {
+ 'include_dirs': [
+ ],
+ 'defines': [
+ # avoiding pointless ifdef churn
+ 'SIP_OS_OSX',
+ 'OSX',
+ 'SECLIB_OPENSSL',
+ ],
+
+ 'cflags_mozilla': [
+ ],
+ }],
['OS=="mac"', {
'include_dirs': [
],
@@ -760,7 +773,7 @@
],
}],
- ['OS=="mac"', {
+ ['OS=="mac" or os_bsd==1', {
'include_dirs': [
],
@@ -803,14 +816,13 @@
'defines' : [
'SIP_OS_OSX',
- '_POSIX_SOURCE',
+ # using BSD extensions, leave _POSIX_SOURCE undefined
'CPR_MEMORY_LITTLE_ENDIAN',
'NO_SOCKET_POLLING',
'USE_TIMER_SELECT_BASED',
'FULL_BUILD',
'STUBBED_OUT',
'USE_PRINTF',
- '_DARWIN_C_SOURCE',
'NO_NSPR_10_SUPPORT',
],
| [
"ryoon"
] | ryoon |
66e96c5a2a6fa3d3a12e692d3d109ee166009903 | 8d19a7ce4deaa5f59acb8048e886ba7f05d87180 | /quick/add_silanol_stannanol.py | 78b50bd1c4e4b72ded2ed13e9de4d8bce73b0127 | [
"MIT"
] | permissive | dejac001/MCFlow | bbc66529716e92de8b9507cc74b3de37da3d7143 | 19d1ee21318b49102842d75493a2fb830ec116f0 | refs/heads/master | 2022-12-17T15:08:46.648626 | 2020-07-26T18:24:53 | 2020-07-26T18:24:53 | 202,601,205 | 1 | 1 | MIT | 2022-12-08T10:45:35 | 2019-08-15T19:39:25 | Python | UTF-8 | Python | false | false | 3,400 | py | def add2fort77(old_file, coords, box):
'''
coords: {'mol#':[],...
where mol# is an integer. The indexes of list
contain printable coordinates of all bead of this molecule in
order of fort.4 file.
'''
f = open('fort.77.new','w')
molec_types = []
box_types = []
FinishDispl = False
molecType = False
boxType = False
Start = True
for line in open(old_file):
if Start and (len(line.split()) == 1) and (int(line.split()[0]) < 10000):
nchain_old = int(line.split()[0])
FinishDispl = True
Start = False
f.write(line.replace('%i'%nchain_old, '%i'%(sum([nchain_old] +
[len(coords[i]) for i in coords.keys()]))))
elif FinishDispl:
if line.split()[0] == '1':
molecType = True
FinishDispl = False
molec_types += line.split()
else:
f.write(line)
elif molecType:
molec_types += line.split()
if len(molec_types) == nchain_old:
# write everything at once
my_line = ''
for molec in molec_types:
my_line += ' %s'%molec
for molNum in sorted(coords.keys()):
for i in range(len(coords[molNum])):
my_line += ' %s'%molNum
f.write(my_line + '\n')
boxType = True
molecType = False
elif boxType:
box_types += line.split()
if len(box_types) == nchain_old:
my_line = ''
for ibox in box_types:
my_line += ' %s'%ibox
for molNum in sorted(coords.keys()):
for i in range(len(coords[molNum])):
my_line += ' %i'%box
f.write(my_line + '\n')
boxType = False
else:
f.write(line)
for molNum in sorted(coords.keys()):
for myPos in coords[molNum]:
f.write(myPos)
f.close()
import os
if __name__ == '__main__':
molecules = []
path_to_struc = '../../../structures/'
for file in [i for i in os.listdir(path_to_struc) if 'fort77_mol' in i]:
mols = {}
print(file)
for line in open(path_to_struc + file):
if len(line.split()) == 4:
bead, x, y, z = line.split()
if bead in mols.keys(): bead = bead + str(len([i for i in mols.keys() if bead in i]))
mols[bead] = '%s %s %s\n'%(x,y,z)
molecules.append(mols)
str_coordinates = {'2':[],'3':[]} #mols 2 and 3
for mol in molecules:
my_mol_str = ''
for bead in [i for i in mols.keys() if 'Cr' in i]:
my_mol_str += mol[bead] + '-0.3750000\n'
if 'C' in mol.keys(): # stands for Si
my_mol_str += mol['C'] + '1.4290000\n'
qO, qH = -0.739, 0.435
nmoltype = '2'
elif 'Sn' in mol.keys():
my_mol_str += mol['Sn'] + '1.5550000\n'
qO, qH = -0.887, 0.457
nmoltype = '3'
my_mol_str += mol['Os'] + '%e\n'%qO
my_mol_str += mol['H'] + '%e\n'%qH
str_coordinates[nmoltype].append(my_mol_str)
add2fort77('fort.77',str_coordinates, 1)
| [
"dejac001@umn.edu"
] | dejac001@umn.edu |
fd2be1813f88a15643cb145e2e3dffcc04551149 | d9adc2009506a7a6958b087c4bd6f132e00fc3c0 | /spectrum_api_service/constants/spectrum_constants.py | ce3680a26487a018b5e4fa6c70bd272de8c4f700 | [] | no_license | bitcubico/SpectrumApiServicePy | b8ced339fd8527e28d347b09aff2271b5c213cf0 | 39caf96384982cb4666cf0ef08703f338809d362 | refs/heads/main | 2023-03-14T12:08:22.304792 | 2021-03-03T22:12:46 | 2021-03-03T22:12:46 | 342,911,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py | # ALARM_ATTRIBUTES
ALARM_ID_ATTR = '0x11f9c'
ALARM_SOURCE_ATTR = '0x11fc4'
ALARM_TITLE_ATTR = '0x12b4c'
ALARM_STATUS_ATTR = '0x11f4f'
ALARM_MODIFIED_TIME_ATTR = '0x13345'
ACKNOWLEDGED_ATTR = '0x11f4d'
CAUSE_COUNT = '0x12a07'
CAUSE_CODE_ATTR = '0x11f50'
CAUSE_LIST_ATTR = '0x12a05'
CREATED_BY_ATTR = '0x11fb9'
CREATION_DATE_ATTR = '0x11f4e'
CONDITION_ATTR = '0x1000a'
EVENT_ATTR = '0x4820007'
EVENT_ID_LIST_ATTR = '0x11f52'
EVENT_TYPE_ATTR = '0x11fb8'
EVENT_SYMPTOM_COUNT_ATTR = '0x12a70'
EVENT_SYMPTOM_LIST_ATTR = '0x12a6f'
IP_TO_DOMAIN_MAP_ATTR = '0x12a82'
LAST_OCCURRENCE_DATE_ATTR = '0x1321a'
LANDSCAPE_NAME_ATTR = '0x11d42'
MODEL_CLASS_ATTR = '0x11ee8'
MODEL_HANDLE_ATTR = '0x129fa'
MODEL_HANDLE_OF_ALARMED_MODEL_ATTR = '0x11f53'
MODEL_NAME_ATTR = '0x1006e'
MODEL_TYPE_NAME_ATTR = '0x10000'
MODEL_TYPE_OF_ALARMED_MODEL_ATTR = '0x10001'
NETWORK_ADDRESS_ATTR = '0x12d7f'
OCCURRENCES_ATTR = '0x11fc5'
ORIGINATING_EVENT_ATTR = '0x1296e'
PRIMARY_ALARM_ATTR = '0x11f54'
SECURE_DOMAIN_ADDRESS_ATTR = '0x12d83'
SECURE_DOMAIN_DISPLAY_ATTR = '0x12c05'
SECURITY_STRING_ATTR = '0x10009'
SEVERITY_ATTR = '0x11f56'
SIGNIFICANT_MODEL_ID_ATTR = '0x12a56'
SYMPTOM_LIST_ATTR = '0x12a04'
SYMPTOM_COUNT_ATTR = '0x12a06'
TROUBLE_TICKET_ID_ATTR = '0x12022'
TROUBLESHOOTER_ATTR = '0x11f57'
TROUBLESHOOTER_MODEL_HANDLE_ATTR = '0x11fc6'
USER_CLEARABLE_ATTR = '0x11f9b'
WEB_CONTEXT_URL_ATTR = '0x12a63'
| [
"bitcubico@gmail.com"
] | bitcubico@gmail.com |
a2e11f23a8530e4c3bae2082a0884de996760d94 | 2d0f59426a87d0b3328f976bf7a1b42344159daf | /api/models/__init__.py | 4720200c86fd9139593ee398b2193616fbd81e33 | [] | no_license | naveenailawadi/GUSIF | 454a2e402fedb4d7e1e3991e481d40b09aacc368 | 00b12cce56150cc4fbcf3dd3792437e1f2d121e8 | refs/heads/master | 2023-07-15T22:12:05.297583 | 2021-09-01T19:07:16 | 2021-09-01T19:07:16 | 297,208,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,035 | py | from api import db
from api import bcrypt
from api.secrets import ADMIN_PROFILE
from datetime import datetime as dt
from sqlalchemy.exc import NoInspectionAvailable
# create a user model
class UserModel(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(320), nullable=False, unique=True)
password = db.Column(db.String(100), nullable=False)
creation_date = db.Column(
db.TIMESTAMP, server_default=db.func.current_timestamp())
# create a function to validate users
def validate_user(email, password):
# get the user
user = UserModel.query.filter_by(email=email).first()
if not user:
return False, {'message': f"no account associated with {email}"}, 404
# check if passwords match
if not bcrypt.check_password_hash(user.password, password):
return False, {'message': f"incorrect password for {email}"}, 401
return True, user, 201
def validate_admin(email, password):
print(ADMIN_PROFILE['email'] + ' ' + ADMIN_PROFILE['password'])
if email != ADMIN_PROFILE['email']:
return False
elif password != ADMIN_PROFILE['password']:
return False
else:
return True
# check an attribute
def get_and_check_attribute(obj, c):
try:
value = getattr(obj, c.key)
except NoInspectionAvailable:
return None
# check dt
if type(value) is dt:
value = value.strftime('%s')
return value
def object_as_dict(obj):
if obj:
obj_dict = {c.key: get_and_check_attribute(obj, c)
for c in db.inspect(obj).mapper.column_attrs}
else:
obj_dict = {}
return obj_dict
# make a function that copies models
def copy_model(model):
db.session.expunge(model)
db.make_transient(model)
model.id = None
# add the model back to the session and refresh the id
db.session.add(model)
db.session.flush()
db.session.refresh(model)
print(f"New id: {model.id}")
return model
| [
"naveen.ailawadi91@gmail.com"
] | naveen.ailawadi91@gmail.com |
062d01992b4ff6403439725111428e675235023b | ca12492b8fe66e34d7152a5118a573175b0a176f | /backend/wallet/migrations/0001_initial.py | 06c04d1d09b8fdac41184f9f6cca8bc684953e59 | [] | no_license | crowdbotics-apps/asile-mobile-22968 | 3d02c0de123ba1b13d79a098ea7eb543658d5f8f | c5005ad17c262f87bdd8eefb89145ee75fdca168 | refs/heads/master | 2023-01-24T17:16:53.239439 | 2020-11-25T08:42:33 | 2020-11-25T08:42:33 | 315,842,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,944 | py | # Generated by Django 2.2.17 on 2020-11-25 05:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('task_profile', '0001_initial'),
('task', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CustomerWallet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('balance', models.FloatField()),
('expiration_date', models.DateTimeField()),
('last_transaction', models.DateTimeField()),
('customer', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='customerwallet_customer', to='task_profile.CustomerProfile')),
],
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account_token', models.CharField(max_length=255)),
('payment_account', models.CharField(max_length=10)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('wallet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='paymentmethod_wallet', to='wallet.CustomerWallet')),
],
),
migrations.CreateModel(
name='TaskerWallet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('balance', models.FloatField(max_length=254)),
('expiration_date', models.DateTimeField()),
('last_transaction', models.DateTimeField()),
('tasker', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='taskerwallet_tasker', to='task_profile.TaskerProfile')),
],
),
migrations.CreateModel(
name='TaskerPaymentAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account_token', models.CharField(max_length=255)),
('payment_account', models.CharField(max_length=10)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('wallet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='taskerpaymentaccount_wallet', to='wallet.TaskerWallet')),
],
),
migrations.CreateModel(
name='PaymentTransaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.FloatField()),
('tip', models.FloatField()),
('tracking_id', models.CharField(max_length=50)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='paymenttransaction_customer', to='task_profile.CustomerProfile')),
('payment_method', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='paymenttransaction_payment_method', to='wallet.PaymentMethod')),
('tasker', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='paymenttransaction_tasker', to='task_profile.TaskerProfile')),
('transaction', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='paymenttransaction_transaction', to='task.TaskTransaction')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
1119c8ecfa54f48992efb04df39a06b72654cc4b | 2a15ca7e8a62631d760585e524d3b1cf8c089403 | /pieces_data.py | 84d53a3e16f61930db22b5ed6d6f5cc77d554b11 | [
"MIT"
] | permissive | akhyn/masstris | 9e1524480f15e1f58b4028a965e711909cab30bd | 9a48390f4793d00669a0d13b212173c37aae30b4 | refs/heads/master | 2020-03-17T02:06:09.194655 | 2018-07-16T18:38:41 | 2018-07-16T18:38:41 | 133,178,224 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,896 | py | # Must be square
# Each line represents a piece orientation
pieces_data = [
# type: 'I'
{'color': 1,
'positions': [[[0, 0, 0, 0], [1, 1, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [0, 0, 0, 0]],
[[0, 1, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0]]]},
# type: 'J'
{'color': 2,
'positions': [[[2, 0, 0, 0], [2, 2, 2, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 2, 2, 0], [0, 2, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 0], [2, 2, 2, 0], [0, 0, 2, 0], [0, 0, 0, 0]],
[[0, 2, 0, 0], [0, 2, 0, 0], [2, 2, 0, 0], [0, 0, 0, 0]]]},
# type: 'L'
{'color': 3,
'positions': [[[0, 0, 3, 0], [3, 3, 3, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 3, 0, 0], [0, 3, 0, 0], [0, 3, 3, 0], [0, 0, 0, 0]],
[[0, 0, 0, 0], [3, 3, 3, 0], [3, 0, 0, 0], [0, 0, 0, 0]],
[[3, 3, 0, 0], [0, 3, 0, 0], [0, 3, 0, 0], [0, 0, 0, 0]]]},
# type: 'O'
{'color': 4,
'positions': [[[0, 4, 4, 0], [0, 4, 4, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 4, 4, 0], [0, 4, 4, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 4, 4, 0], [0, 4, 4, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 4, 4, 0], [0, 4, 4, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]},
# type: 'S'
{'color': 5,
'positions': [[[0, 5, 5, 0], [5, 5, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 5, 0, 0], [0, 5, 5, 0], [0, 0, 5, 0], [0, 0, 0, 0]],
[[0, 0, 0, 0], [0, 5, 5, 0], [5, 5, 0, 0], [0, 0, 0, 0]],
[[5, 0, 0, 0], [5, 5, 0, 0], [0, 5, 0, 0], [0, 0, 0, 0]]]},
# type: 'T'
{'color': 6,
'positions': [[[0, 6, 0, 0], [6, 6, 6, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 6, 0, 0], [0, 6, 6, 0], [0, 6, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 0], [6, 6, 6, 0], [0, 6, 0, 0], [0, 0, 0, 0]],
[[0, 6, 0, 0], [6, 6, 0, 0], [0, 6, 0, 0], [0, 0, 0, 0]]]},
# type: 'Z'
{'color': 7,
'positions': [[[7, 7, 0, 0], [0, 7, 7, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 7, 0], [0, 7, 7, 0], [0, 7, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 0], [7, 7, 0, 0], [0, 7, 7, 0], [0, 0, 0, 0]],
[[0, 7, 0, 0], [7, 7, 0, 0], [7, 0, 0, 0], [0, 0, 0, 0]]]}] | [
"noreply@github.com"
] | akhyn.noreply@github.com |
db6c5d94640a859c089ee996a59d9c1c484bfd6b | 1f32096af05da776c59a11b74a424637aa718113 | /move_file/move_bam.py | 9fd73946bf90b89fd42983fa40e1ddda260a9a36 | [] | no_license | ohsu-comp-bio/compbio-galaxy-wrappers | a222dbef5d4d4101f1705c6101f2e212435d1ea8 | 6162bc6d8ee37401de8dffec545935953028bed7 | refs/heads/master | 2023-08-31T05:32:22.305366 | 2023-08-29T18:24:59 | 2023-08-29T18:24:59 | 32,424,561 | 6 | 13 | null | 2023-09-14T19:28:18 | 2015-03-17T22:40:39 | Python | UTF-8 | Python | false | false | 5,980 | py | #!/usr/bin/env python
from bioblend import galaxy
import getpass
import argparse
import json
import shutil
import os
import grp
import errno
import pysam
import requests.packages.urllib3
import subprocess
requests.packages.urllib3.disable_warnings()
VERSION='0.2.1'
def mkdir_p(path):
### Emulate mkdir -p functionality.
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def get_access_groups():
"""
Find all access groups associated with the current user.
"""
my_groups = [(grp.getgrgid(g).gr_name, int(g)) for g in os.getgroups()]
return my_groups
def set_new_group(my_groups, group):
"""
Set a new current group for the process to run under."
"""
for entry in my_groups:
if entry[0] == group:
os.setgid(entry[1])
def index_bam(bai_origin, dest_bai, dest_bam):
### Index the BAM based on whether it already exists in Galaxy or not.
if bai_origin != None:
print("Copying " + bai_origin + " to " + dest_bai + ".")
shutil.copyfile(bai_origin, dest_bai)
os.chmod(dest_bai, 0440)
else:
print("BAI not found in Galaxy, reindexing...")
pysam.index(dest_bam, dest_bai)
# These functions run commands using sg and subprocess. For users that
# have more than 16 groups.
def run_sg_copy_cmd(origin, dest, group):
"""
Build the command, using sg, that will run a copy operation as a specific group.
Don't know of python libs for this, will use subprocess.
-a = -rlptgoD in rsync
"""
# cmd = "sg %s \"cp %s %s\"" % (group, origin, dest)
cmd = "sg %s \"rsync --chmod=u+rw,g+rw,o-rwx %s %s\"" % (group, origin, dest)
print(cmd)
process = subprocess.call(args=cmd, shell=True, stdout=subprocess.PIPE)
# print(subprocess.check_output(["pidof", cmd]))
def run_sg_index_cmd(filename, group):
"""
Perform an indexing step under sg utility.
"""
cmd = "sg %s \"samtools index %s\"" % (group, filename)
print(cmd)
process = subprocess.call(args=cmd, shell=True, stdout=subprocess.PIPE)
def main():
### Store current user, so we can connect them to a Galaxy API key.
curr_user = getpass.getuser()
### Set argparse options.
parser = argparse.ArgumentParser(description='Move BAM files from Galaxy to warm storage.')
# parser.add_argument('--dummy_input', help="Dummy input file.")
parser.add_argument('--galaxy_url', default='https://exaclinical.ohsu.edu/galaxy', help='URL of the Galaxy instance.')
### Temporarily set a default history id to test with.
parser.add_argument('--history_id', default='8d4d7622a593869c', help='Galaxy history id, defined after a Galaxy history is created.')
parser.add_argument('--sample_id', default='DNA-15-01448-1', help='Illumina SampleSheet sample id. This will be used to create the BAM and BAI files.')
parser.add_argument('--bam_path', help='Path where BAM files will be deposited.')
parser.add_argument('--run_id', help='A subdirectory with the same name as the run_id will be created in the bam_path directory.')
parser.add_argument('--input', help="Input file to be moved.")
parser.add_argument('--output', default='/tmp/default.log', help='Outfile')
args = parser.parse_args()
api_file = '/home/users/' + curr_user + '/.galaxy_api_key.json'
with open(api_file, 'r') as f:
api_key = json.load(f)
# Code to find access groups and set a default access group based on where the BAM files are going.
my_groups = get_access_groups()
# Choose your path, based NFS groups number limitations.
if len(my_groups) > 16:
use_sg = True
else:
use_sg = False
# set_new_group(my_groups, "CorlessLab")
print(my_groups)
print(len(my_groups))
gi = galaxy.GalaxyInstance(url=args.galaxy_url, key=api_key[curr_user])
this_hist = gi.histories.show_history(args.history_id, contents=True)
for entry in this_hist:
# Make the name an argument, or do something else, so we can move other BAM files that Print Reads.
if "Print Reads" in entry['name'] and "BAM" in entry['name'] and entry['deleted'] != True:
dataset_id = entry['id']
bam_origin = gi.datasets.show_dataset(dataset_id)['file_path']
bai_origin = gi.datasets.show_dataset(dataset_id)['metadata_bam_index']
### Change this behavior to automatically create an index with Samtools if there is none.
if bam_origin == args.input:
new_path = args.bam_path + args.run_id + '/'
dest_bam = new_path + args.sample_id + '.bam'
dest_bai = new_path + args.sample_id + '.bai'
print("Copying " + bam_origin + " to " + dest_bam + ".")
if use_sg == False:
mkdir_p(new_path)
if not os.path.isfile(dest_bam):
shutil.copyfile(bam_origin, dest_bam)
os.chmod(dest_bam, 0440)
### Check to see if the index file was found in Galaxy, if not, make one.
index_bam(bai_origin, dest_bai, dest_bam)
else:
if not os.path.isfile(dest_bai):
print("BAM file has been copied, but there is no index.")
index_bam(bai_origin, dest_bai, dest_bam)
else:
# Convert to argument.
mkdir_p(new_path)
run_sg_copy_cmd(bam_origin, dest_bam, "CorlessLab")
run_sg_index_cmd(dest_bam, "CorlessLab")
elif bam_origin == None:
raise Exception("No BAM filepath found in Galaxy for " + bam_origin)
handle_out = open(args.output, 'w')
handle_out.close()
if __name__ == "__main__":
main()
| [
"letaw@ohsu.edu"
] | letaw@ohsu.edu |
7eaa9fc85a30db26a3829c8841b094e18cc215c9 | 86c6aa15312058a3ecda5ae46681c28817bd9636 | /forms.py | 7b00cc6e273405300cb882250934d74370529912 | [] | no_license | ld-amaya/warbler | 707eeba8a81308f844a64ef1c82187176c2c6073 | c5fbd60ac74add77d36be51090a58da1857d3e19 | refs/heads/main | 2023-02-16T19:34:16.539805 | 2021-01-11T00:57:52 | 2021-01-11T00:57:52 | 326,639,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,601 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, TextAreaField
from wtforms.validators import DataRequired, Email, Length
class MessageForm(FlaskForm):
"""Form for adding/editing messages."""
text = TextAreaField('text', validators=[DataRequired()])
class UserAddForm(FlaskForm):
"""Form for adding users."""
username = StringField('Username', validators=[DataRequired()])
email = StringField('E-mail', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[Length(min=6)])
image_url = StringField('(Optional) Image URL')
class UserProfileForm(FlaskForm):
"""From for editing user profile"""
username = StringField('Username', validators=[DataRequired()])
email = StringField('E-mail', validators=[DataRequired(), Email()])
location = StringField('Location (Optional')
image_url = StringField('Image URL (Optional)')
header_image_url = StringField('Header URL (Optional)')
bio = StringField('Bio (Optional)')
password = PasswordField('Password', validators=[Length(min=6)])
class LoginForm(FlaskForm):
"""Login form."""
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[Length(min=6)])
class PasswordForm(FlaskForm):
"""Password Form"""
password = PasswordField('Existing Password', validators=[Length(min=6)])
new_password = PasswordField('New Password', validators=[Length(min=6)])
confirm_password = PasswordField(
'Confirm Password', validators=[Length(min=6)])
| [
"louamaya@me.com"
] | louamaya@me.com |
86aad0348b322a2f956b6383ab4d9264b7a71afd | 0ebec1e899789ae2597c01bae7ca2c3382c4266d | /session5/a_customising_plots.py | 0a6ea8df7506befcbf9f9e859b1a2d01d340e160 | [
"Apache-2.0"
] | permissive | TugdualSarazin/MACT20.21_Digital_tools_Big_Data_part_1 | 02fda6b401bcdad2a240de00960ff0dbc61fc94d | b43b9f50ec42bb413c2c3a090cf11f9886676c58 | refs/heads/main | 2023-01-13T20:51:44.000981 | 2020-11-09T12:25:11 | 2020-11-09T12:25:11 | 313,076,622 | 0 | 0 | Apache-2.0 | 2020-11-15T16:44:29 | 2020-11-15T16:44:28 | null | UTF-8 | Python | false | false | 2,890 | py | # encoding: utf-8
##################################################
# This script shows uses the pandas and matplotlib libraries to produce different kind of plots
# It also combines data from two sources and create multiple plots
# Find extra documentation about data frame here:
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.scatter.html
##################################################
#
##################################################
# Author: Diego Pajarito
# Copyright: Copyright 2020, IAAC
# Credits: [Institute for Advanced Architecture of Catalonia - IAAC, Advanced Architecture group]
# License: Apache License Version 2.0
# Version: 1.0.0
# Maintainer: Diego Pajarito
# Email: diego.pajarito@iaac.net
# Status: development
##################################################
# We need to import pandas library as well as the plot library matplotlib
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# We read the file for population data and gross domestic product
amb_mplts = pd.read_csv('../data/catalunya/AMB_municipalities_min.csv')
lu_mplts = pd.read_csv('../data/luxembourg/population.csv', skiprows=[2,3])
# First, we filter data for a single country, mind the way to select only columns having numeric data
pop_cat = amb_mplts['population']
area_cat = amb_mplts['area']
pop_lu = lu_mplts[['Year', '2020']]
pop_lu.columns = ['canton', 'population']
pop_lu_1821 = lu_mplts[['Year', '1821']]
pop_lu_1821.columns = ['canton', 'population']
# Plots allow basic configuration of visual features. Here some of the most common
colors = np.random.rand(len(pop_cat))
plt.scatter(x=pop_cat, y=area_cat, c=colors)
plt.show()
# Charts can also use lines to represent patterns from different subsets
for value in lu_mplts['Year']:
a_pop = lu_mplts[lu_mplts['Year'] == value]
a_pop = a_pop.iloc[0, 1:15]
plt.plot(a_pop)
plt.show()
# try to customise axis
#plt.xticks(np.arange(0, 2020, 100))
plt.yticks(np.arange(0,175000, 50000))
# There are different ways to represent data density,
# this 2d histogram shows population and area distribution
plt.hist2d(pop_cat, area_cat)
plt.show()
# We can create the arrangement for multiple plots and compare the differences in patterns
fig, axs = plt.subplots(2, 2, sharex=False, sharey=False)
axs[0, 0].scatter(x=pop_cat, y=area_cat, c=colors)
axs[1, 0].hist2d(pop_cat, area_cat, bins=20)
axs[0, 1].scatter(x=pop_lu['population'], y=pop_lu_1821['population'])
axs[1, 1].hist2d(x=pop_lu['population'], y=pop_lu_1821['population'], bins=20)
plt.show()
# We can create the arrangement for multiple plots and compare the differences in patterns
fig, axs = plt.subplots(1, 2, sharex=True, sharey=True)
axs[0].scatter(x=pop_lu['population'], y=pop_lu_1821['population'])
axs[1].hist2d(x=pop_lu['population'], y=pop_lu_1821['population'], bins=20)
plt.show()
| [
"diegopajarito@gmail.com"
] | diegopajarito@gmail.com |
5205855f7f68c9287ecc194b57aaba8531b85d53 | 2b9acf5805447f2085420f946b4d3cdf788b7940 | /setup.py | 11f4ba9b9bb48e0ef8b838970b6fc22264920da3 | [
"BSD-3-Clause"
] | permissive | spidezad/filesdownloader | 4549133df6813b9c937bfdaaa5e6b564034e1ad5 | e3e20cfea7fa67e7485de1b8519dd2312ac8c8e1 | refs/heads/master | 2020-03-23T21:34:17.178345 | 2018-07-24T07:07:24 | 2018-07-24T07:07:24 | 142,116,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,307 | py | from setuptools import setup, find_packages
from codecs import open
from os import path
__version__ = '0.0.1'
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# get the dependencies and installs
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if 'git+' not in x]
dependency_links = [x.strip().replace('git+', '') for x in all_reqs if x.startswith('git+')]
setup(
name='filesdownloader',
version=__version__,
description='Fast download mulitple files from web',
long_description=long_description,
url='https://github.com/spidezad/filesdownloader',
download_url='https://github.com/spidezad/filesdownloader/tarball/' + __version__,
license='BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
],
keywords='',
packages=find_packages(exclude=['docs', 'tests*']),
include_package_data=True,
author='Tan Kok Hua',
install_requires=install_requires,
dependency_links=dependency_links,
author_email=''
)
| [
"kokhua81@gmail.com"
] | kokhua81@gmail.com |
84b3fd060fdbfce79cd61e4f0152ece8169803a1 | faacdf7022efe25624ca37e5e7d7ec8692158474 | /DeepMS_model.py | 4c6b5c10b77c27e12194e3f39051a19b408ba66c | [] | no_license | bsml320/DeepMS | 0ffcea38398430255e397877a75c52a1af853031 | 6f514732351c42a13f5810846e67fad70b14b87e | refs/heads/master | 2022-12-02T02:05:36.474988 | 2020-08-13T21:46:59 | 2020-08-13T21:46:59 | 179,566,364 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,222 | py | # -*- coding: utf-8 -*-
import sys
import subprocess
import os
import numpy as np
import pandas as pd
np.random.seed(56) # for reproducibility
from keras.models import Model
from keras.layers import Dense, Input, Dropout
from keras import regularizers
#============================================
output_file = sys.argv[1]
latent_dim = int(sys.argv[2])
batch_size_n = int(sys.argv[3])
learning_rate = float(sys.argv[4])
noise_factor = float(sys.argv[5])
#============================================
#output_file = "matrix_top10k_markers"
#os.mkdir("AE_"+output_file)
mf_file = os.path.join('', output_file)
mf_df = pd.read_table(mf_file, index_col=0)
print(mf_df.shape)
output_file = (str(output_file)+"_"+str(latent_dim)+"_"+str(batch_size_n)+"_"+str(learning_rate)+"_"+str(noise_factor))
if os.path.exists("DeepMS_"+output_file):
cmd = 'rm -r '+"DeepMS_"+output_file
print(cmd)
subprocess.call(cmd, shell=True)
os.mkdir("DeepMS_"+output_file)
np.random.seed(56)
test_set_percent = 0.2
x_test = mf_df.sample(frac=test_set_percent)
#x_test = mf_df
x_train = mf_df.drop(x_test.index)
#x_train = mf_df
x_train_noisy = x_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size = x_train.shape)
x_test_noisy = x_test + noise_factor * np.random.normal(loc=0.0, scale=1.0, size = x_test.shape)
x_train_noisy = np.clip(x_train_noisy, 0., 1.)
x_test_noisy = np.clip(x_test_noisy, 0., 1.)
original_dim = mf_df.shape[1]
epochs_n = 50
# Compress to 100 dim
encoding_dim = latent_dim
# this is our input placeholder
input_dim = Input(shape=(original_dim,))
# encode
encoder_output = Dense(encoding_dim, activation = "relu", activity_regularizer = regularizers.l1(1e-12))(input_dim)
# decode
decoded = Dense(original_dim, activation = "softmax")(encoder_output)
# autoencoder model
autoencoder = Model(inputs = input_dim, outputs = decoded)
# compile autoencoder
autoencoder.compile(optimizer='adam', loss='mse')
# training
hist = autoencoder.fit(x_train_noisy, x_train, epochs=epochs_n, batch_size=batch_size_n, shuffle=True, validation_data=(x_test_noisy, x_test))
history_df = pd.DataFrame(hist.history)
loss_file = os.path.join("DeepMS_"+output_file, 'Model_evaluation_'+output_file+'.txt')
history_df.to_csv(loss_file, sep="\t")
# encoder model
encoder = Model(inputs = input_dim, outputs = encoder_output)
encoded_df = encoder.predict_on_batch(mf_df)
encoded_df = pd.DataFrame(encoded_df, index = mf_df.index)
encoded_df.index.name = 'sample_id'
encoded_df.columns.name = 'sample_id'
encoded_df.columns = encoded_df.columns + 1
encoded_file = os.path.join("DeepMS_"+output_file, 'Latents_'+output_file+'.tsv')
encoded_df.to_csv(encoded_file, sep='\t')
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model( encoded_input, decoder_layer(encoded_input))
#autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
weights = []
for layer in encoder.layers:
weights.append(layer.get_weights())
#weight_layer_df = pd.DataFrame(weights[1][0], columns=mf_df.columns, index=range(1, latent_dim+1))
weight_layer_df = pd.DataFrame(np.transpose(weights[1][0]), columns=mf_df.columns, index=range(1, latent_dim+1))
weight_layer_df.index.name = 'encodings'
weight_file = os.path.join("DeepMS_"+output_file, 'Weights_encoder_'+output_file+'.tsv')
weight_layer_df.to_csv(weight_file, sep='\t')
#========================
weights = []
for layer in decoder.layers:
weights.append(layer.get_weights())
#weight_layer_df = pd.DataFrame(weights[1][0], columns=mf_df.columns, index=range(1, latent_dim+1))
weight_layer_df = pd.DataFrame(weights[1][0], columns=mf_df.columns, index=range(1, latent_dim+1))
weight_layer_df.index.name = 'decodings'
weight_file = os.path.join("DeepMS_"+output_file, 'Weights_decoder_'+output_file+'.tsv')
weight_layer_df.to_csv(weight_file, sep='\t')
if os.path.exists("DeepMS_"+output_file+"_decoder"):
cmd = 'rm -r '+"DeepMS_"+output_file+"_decoder"
print(cmd)
subprocess.call(cmd, shell=True)
| [
"noreply@github.com"
] | bsml320.noreply@github.com |
21d13d453e1639a8b3bea65ff2b696003d6f3426 | f1b378fdc77f52f5ed8b330fcb1a8081da4d64a0 | /daniel_liang/chapter04/4.34.py | fd3a96bf5838a30073b20c635ae450dc0708f6a7 | [] | no_license | brohum10/python_code | 91e8120930508bd5509a57fe2eb4a536eee6e129 | 0f9db42afa4687b09973bb546fb880e689dbc882 | refs/heads/main | 2023-06-18T03:20:12.890049 | 2021-07-18T14:19:48 | 2021-07-18T14:19:48 | 369,736,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | hex_number = eval(input("Enter a hexadecimal value: "))
if hex_number == str:
if hex_number == 'A' or 'a':
print("hex_number = 10")
elif hex_number == 'B' or' b':
print("hex_number = 11")
elif hex_number == 'C' or 'c':
print("hex_number = 12")
elif hex_number == 'D' or 'd':
print("hex_number = 13")
elif hex_number == 'E' or 'e':
print("hex_number = 14")
elif hex_number == 'F' or 'f':
print("hex_number = 15")
else:
print("Invalid input")
else:
print("The hex number is ", hex_number)
| [
"noreply@github.com"
] | brohum10.noreply@github.com |
eb132c34e22fd57aef74cf8874e37d5a3344d11f | c2bf959105a0de0183cad6584e478c8a0110cebb | /Capitalize Each Word.py | 16f93825a4fe1a4d8fe182121343ad805ce9b04e | [] | no_license | Suryashish14/Suryashish-Programs | 913a76bac52116b7f1f39a4aeddfc8e2355a341b | eedc75447069ac53e978bf7c2748e39ee8ff38d7 | refs/heads/master | 2023-08-15T09:59:51.379619 | 2021-10-13T09:31:26 | 2021-10-13T09:31:26 | 291,040,265 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | string=input('Enter A Sentence -->' )
print(string.title()) | [
"noreply@github.com"
] | Suryashish14.noreply@github.com |
bd0c9fcaedf7bcb80f6578448e927573d92dde0a | 94717e5a126c510f48c3056c304d33371b837b9b | /CEGO/include_previous_pareto.py | cfcf1435c4c677675055273bc2a8f772e60adc88 | [
"MIT"
] | permissive | DriesVerstraete/CEGO | b2baf9298578dc2738941b62506924082d15ba2e | b7e1eb748b3a08a779d8e7a813653b321e28b8d2 | refs/heads/master | 2023-03-20T03:03:29.707206 | 2020-02-20T15:52:09 | 2020-02-20T15:52:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 16 11:26:32 2017
@author: r.dewinter
"""
import numpy as np
def include_previous_pareto(initEval=None, outdir=None, runNo=0):
if initEval is None:
raise ValueError('InitEvalLHS must be set')
if outdir is None:
raise ValueError('outdir must be set')
fileParameters = str(outdir)+'par_run'+str(runNo)+'_finalPF.csv'
fileObjectives = str(outdir)+'obj_run'+str(runNo)+'_finalPF.csv'
fileConstraints = str(outdir)+'con_run'+str(runNo)+'_finalPF.csv'
par_old = np.genfromtxt(fileParameters, delimiter=',')
obj_old = np.genfromtxt(fileObjectives, delimiter=',')
con_old = np.genfromtxt(fileConstraints, delimiter=',')
if par_old.ndim == 1:
par_old = np.array([par_old])
obj_old = np.array([obj_old])
con_old = np.array([con_old])
if len(par_old)>initEval:
return par_old[:initEval, :], con_old[:initEval, :], obj_old[:initEval, :]
else:
return par_old, con_old, obj_old | [
"noreply@github.com"
] | DriesVerstraete.noreply@github.com |
e22839d86b89940ce4b91448ce7af2c4fa6f31d0 | 0bf95b7739c03733e7cf2c7c133b2960372fc1a0 | /models.py | 2740e4e0ca5f223f202998f2c97e92b92242bde5 | [] | no_license | kimjy3402/5th-assignment | 5ba666c16a641d3d19c085a3118c3978c7f63933 | ebae60fac20995c9fbe76e4b851eece3889e7d79 | refs/heads/master | 2020-05-20T21:51:30.180768 | 2019-05-09T09:55:16 | 2019-05-09T09:55:16 | 185,771,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 947 | py | from django.db import models
from django.utils import timezone
# Create your models here.
class Blog(models.Model):
title = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
body = models.TextField()
def __str__(self):
return self.title
# objects = models.Manager()
def summary(self):
return self.body[:100] #pylint: disable=E1136
class Comment(models.Model):
post = models.ForeignKey('Blog', on_delete=models.CASCADE, related_name='comments')
# author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
author = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
# approved_comment = models.BooleanField(default=False)
# def approve(self):
# self.approved_comment = True
# self.save()
def __str__(self):
return self.text | [
"air@Airui-MacBook-Air.local"
] | air@Airui-MacBook-Air.local |
8c6977a6a88267049f29f0ab21620a01356f8d36 | 39cb67781018e23428312610ded87c5d384bb690 | /swinger.py | 23a441d49c82499b30ed56afe259a80e11ef8692 | [] | no_license | yi75798/Swinger | afd8e528cc1bcce3a4db83ce54def54372619717 | b158c4f358fbebe655627969231cf1f0276cf708 | refs/heads/master | 2022-02-25T14:10:25.104740 | 2019-10-25T07:24:12 | 2019-10-25T07:24:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,314 | py | # -*- coding: utf-8 -*-
import nltk, json, pickle
import itertools
from random import shuffle
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
from nltk.probability import FreqDist, ConditionalFreqDist
import sklearn
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
def bag_of_words(words):
return dict([(word, True) for word in words])
def bigram(words, score_fn=BigramAssocMeasures.chi_sq, n=1000):
bigram_finder = BigramCollocationFinder.from_words(words) #把文本变成双词搭配的形式
bigrams = bigram_finder.nbest(score_fn, n) #使用了卡方统计的方法,选择排名前1000的双词
return bag_of_words(bigrams)
def bigram_words(words, score_fn=BigramAssocMeasures.chi_sq, n=1000):
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
return bag_of_words(words + bigrams) #所有词和(信息量大的)双词搭配一起作为特征
def create_word_scores():
posWords = json.load(open('p.json','r'))
negWords = json.load(open('n.json','r'))
posWords = list(itertools.chain(*posWords)) #把多维数组解链成一维数组
negWords = list(itertools.chain(*negWords)) #同理
word_fd = FreqDist() #可统计所有词的词频
cond_word_fd = ConditionalFreqDist() #可统计积极文本中的词频和消极文本中的词频
for word in posWords:
word_fd[word] += 1
cond_word_fd['pos'][word] += 1
for word in negWords:
word_fd[word] += 1
cond_word_fd['neg'][word] += 1
pos_word_count = cond_word_fd['pos'].N() #积极词的数量
neg_word_count = cond_word_fd['neg'].N() #消极词的数量
total_word_count = pos_word_count + neg_word_count
word_scores = {}
for word, freq in word_fd.items():
pos_score = BigramAssocMeasures.chi_sq(cond_word_fd['pos'][word], (freq, pos_word_count), total_word_count) #计算积极词的卡方统计量,这里也可以计算互信息等其它统计量
neg_score = BigramAssocMeasures.chi_sq(cond_word_fd['neg'][word], (freq, neg_word_count), total_word_count) #同理
word_scores[word] = pos_score + neg_score #一个词的信息量等于积极卡方统计量加上消极卡方统计量
return word_scores #包括了每个词和这个词的信息量
def create_word_bigram_scores():
posdata = json.load(open('p.json','r'))
negdata = json.load(open('n.json','r'))
posWords = list(itertools.chain(*posdata))
negWords = list(itertools.chain(*negdata))
bigram_finder = BigramCollocationFinder.from_words(posWords)
bigram_finder = BigramCollocationFinder.from_words(negWords)
posBigrams = bigram_finder.nbest(BigramAssocMeasures.chi_sq, 5000)
negBigrams = bigram_finder.nbest(BigramAssocMeasures.chi_sq, 5000)
pos = posWords + posBigrams #词和双词搭配
neg = negWords + negBigrams
word_fd = FreqDist()
cond_word_fd = ConditionalFreqDist()
for word in pos:
word_fd[word] += 1
cond_word_fd['pos'][word] += 1
for word in neg:
word_fd[word] += 1
cond_word_fd['neg'][word] += 1
pos_word_count = cond_word_fd['pos'].N()
neg_word_count = cond_word_fd['neg'].N()
total_word_count = pos_word_count + neg_word_count
word_scores = {}
for word, freq in word_fd.items():
pos_score = BigramAssocMeasures.chi_sq(cond_word_fd['pos'][word], (freq, pos_word_count), total_word_count)
neg_score = BigramAssocMeasures.chi_sq(cond_word_fd['neg'][word], (freq, neg_word_count), total_word_count)
word_scores[word] = pos_score + neg_score
return word_scores
def find_best_words(word_scores, number):
best_vals = sorted(word_scores.items(), key=lambda x: -x[1])[:number] #把词按信息量倒序排序。number是特征的维度,是可以不断调整直至最优的
best_words = set([w for w, s in best_vals])
return best_words
def score(classifier, name):
classifier = SklearnClassifier(classifier) #在nltk 中使用scikit-learn 的接口
classifier.train(train) #训练分类器
pickle.dump(classifier, open(name + '.pickle','wb'))
pred = classifier.classify_many(test) #对开发测试集的数据进行分类,给出预测的标签
return accuracy_score(tag_test, pred) #对比分类预测结果和人工标注的正确结果,给出分类器准确度
def best_word_features(words):
return dict([(word, True) for word in words if word in best_words])
def pos_features(feature_extraction_method):
posFeatures = []
for i in pos:
posWords = [feature_extraction_method(i),'pos'] #为积极文本赋予"pos"
posFeatures.append(posWords)
return posFeatures
def neg_features(feature_extraction_method):
negFeatures = []
for j in neg:
negWords = [feature_extraction_method(j),'neg'] #为消极文本赋予"neg"
negFeatures.append(negWords)
return negFeatures
pos_review = json.load(open('p.json','r'))
neg_review = json.load(open('n.json','r'))
word_scores_1 = create_word_scores()
word_scores_2 = create_word_bigram_scores()
shuffle(pos_review) #把积极文本的排列随机化
pos = pos_review
neg = neg_review
posFeatures = pos_features(bag_of_words) #使用所有词作为特征
negFeatures = neg_features(bag_of_words)
train = posFeatures+negFeatures
# train = posFeatures[174:]+negFeatures[174:]
# devtest = posFeatures[124:174]+negFeatures[124:174]
test = posFeatures+negFeatures
test, tag_test = zip(*test)
# dev, tag_dev = zip(*devtest) #把开发测试集(已经经过特征化和赋予标签了)分为数据和标签
print('BernoulliNB`s accuracy is %f' %score(BernoulliNB(), 'BernoulliNB'))
print('MultinomiaNB`s accuracy is %f' %score(MultinomialNB(), 'MultinomialNB'))
print('LogisticRegression`s accuracy is %f' %score(LogisticRegression(), 'LogisticRegression'))
print('SVC`s accuracy is %f' %score(SVC(), 'SVC'))
print('LinearSVC`s accuracy is %f' %score(LinearSVC(), 'LinearSVC'))
print('NuSVC`s accuracy is %f' %score(NuSVC(), 'NuSVC')) | [
"davidtnfsh@gmail.com"
] | davidtnfsh@gmail.com |
90f01e806124c7ca87d8fa588c9283d06b53bfcb | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2485/60623/234199.py | d1fc22fa3226c63bdda6a1c2a234b5d3b02955ce | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | # 给定一个单词数组,按排序顺序(计数的递增顺序)一起打印所有字符相同组的计数。
# 例如,如果给定的数组是{“ cat”,“ dog”,“ tac”,“ god”,“ act”},则分组的字谜是“(dog,god)(cat,tac,act)”。因此输出为2 3
size=int(input())
a=0
while a<size:
b=input()#也没有用
strList=input().split()
i=0
while i<len(strList):
l=list(strList[i])
#列表的sort是针对自己,而字典的sort则是返回一个排好序的,但本身并没有排好序
l.sort()
s="".join(l)
strList[i]=s
i=i+1
strList.sort()
j=0
k=1
myList=[]
while j<len(strList):
if j==len(strList)-1:
break
if(strList[j]==strList[j+1]):
k=k+1
else:
myList.append(k)
k=1
j=j+1
myList.append(k)
myList.sort()
m=0
while m<len(myList):
if m!=len(myList)-1:
print(""+myList[m]+" ", end='')
else:
print(myList[m])
m=m+1
a=a+1 | [
"1069583789@qq.com"
] | 1069583789@qq.com |
7a1f699c7d733e4b2b86f61ae2f815e1c27efad3 | 8bf207692fa8c87c8fc9887ecba61fc71c4f34d3 | /tags/astar-only/easy_visualiza.py | eef7822f8ef7cd9d2ce0367e1aa059cec4c9dcde | [] | no_license | jjconti/astar-example | b9f1d487576836696d258d22522a68ee7d21b8e3 | 3f7a54bfa6ffa44a4686708f6f7c73059298c9c5 | refs/heads/master | 2021-01-20T04:39:37.926827 | 2008-10-15T17:32:47 | 2008-10-15T17:32:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,141 | py | # -*- coding: utf-8 -*-
from euclid import LineSegment2, Point2
from itertools_recipes import pairwise
def flatten(l):
r = []
for i in l:
if isinstance(i, list):
r += i
else:
r.append(i)
return r
class Visualiza(object):
def __init__(self, inicio, elementos):
'''
elementos -> [inicio, ... puntos, [puntos],... fin]
'''
elementos = elementos[:]
self.puntos = [Point2(float(x),float(y)) for x,y in flatten(elementos) if (x,y) != inicio]
self.origen = Point2(float(inicio[0]),float(inicio[1]))
self.destinos = self.puntos[:]
self.poligonos = [self.armar_poligono(e) for e in elementos if isinstance(e, list)]
self.segmentos = list(flatten(self.poligonos))
def armar_poligono(self, puntos):
puntos = [Point2(float(x),float(y)) for x,y in puntos]
r =[]
#FIXME: segmentos redudantes
#solo funcionara para figuras convexas
for p1 in puntos:
for p2 in puntos:
if p1 != p2:
r.append(LineSegment2(p1,p2))
return r
def es_visible(self, destino):
print self.origen, destino, self.destinos
segmento1 = LineSegment2(self.origen, destino)
for segmento2 in self.segmentos:
r = segmento1.intersect(segmento2)
if r and r != self.origen and r != destino:
return False
return True
if __name__ == '__main__':
'''
Salida esperada:
Point2(0.00, 4.00) es visible
Point2(3.00, 0.00) es visible
Point2(3.00, 2.00) es visible
Point2(3.00, 4.00) es visible
Point2(5.00, 0.00) no es visible
Point2(5.00, 4.00) no es visible
Point2(7.00, 2.00) no es visible
'''
elementos = [(0,0), (0,4), [(3,0), (3,2), (3,4), (5,4), (5,0)], (7,2)]
inicios = [(0,0), (3,2), (5,4)]
for inicio in inicios:
v = Visualiza(inicio, elementos)
print "Desde", inicio
for destino in v.destinos:
print destino, "es visible" if v.es_visible(destino) else "no es visible"
print "*"*80
| [
"jjconti@3849f24c-4853-0410-924f-80487e21321b"
] | jjconti@3849f24c-4853-0410-924f-80487e21321b |
12270b980765a46d6ab4ba23a0eea12652b0833b | f83fabb08141ba9a74772fcf2df7fc5a2b11c481 | /06_datatypes_Lists_dict.py | bda8231e62215b67e2e296d86b544c0943678190 | [] | no_license | mujeebullahn/Python_Work | df45db6a4cf44583a095266851a4c9604ddd7412 | 679ff9583fa1878dfcfc49e4b33c30da0491d1e2 | refs/heads/master | 2020-06-20T08:31:57.347118 | 2019-07-16T21:45:25 | 2019-07-16T21:45:25 | 197,060,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,139 | py | #Lists and Dictionaries
#List
#sometimes we just need to list our crazy x-bosses
#because we dont want to work there
#this is how ypu make lists
#[] seperate items use ,
#['sfds', 'sdfs']
#lisiting a list and saving to a variable
crazy_pokemons = ['Snorelax', 'Jiggipus', 'Metow']
print(crazy_pokemons)
print(type(crazy_pokemons))
#list are organosed using index
#[0 ,1 ,2]
print(len(crazy_pokemons))
print(crazy_pokemons[2])
print(crazy_pokemons[0])
#if you want to print last list
#you have two op[tions
#array[len(array)-1]
print(crazy_pokemons[len(crazy_pokemons)-1])
print(crazy_pokemons[-1])
#Re-assigning the value is a list, using the index
#we need to evolved mewtoo to mewtee
print(crazy_pokemons)
crazy_pokemons[2] = 'mewtee'
print(crazy_pokemons)
#apending a new pokemon
#we caught pigeoto
crazy_pokemons.append('piggeoto') #add this to list end
print(crazy_pokemons)
crazy_pokemons.insert(0, 'Rattata')
print(crazy_pokemons)
crazy_pokemons.insert(2,'rattata') #shifts and adds
# removing a record
print('doing a pop()')
crazy_pokemons.pop()
print(crazy_pokemons)
crazy_pokemons.pop(0)
print(crazy_pokemons)
#removing using a filter for a value
crazy_pokemons.remove('Jiggipus') #if we dont know the index
print(crazy_pokemons)
# List can have any datatype
mixed_list = ['Jones', 10, 30.5, 'john']
print(mixed_list)
print(type(mixed_list[0]), type(mixed_list[1]))
#Inception List
#[0 , 1 ]
leo_d = ['fist', 2, ['leo', 'd']] #
print(leo_d[1])
print(leo_d[2]) # index 2 = ['leo', 'd']
print(leo_d[2][1]) # is index 2 = ['leo', 'd'] but in there [1] index 1 which is 'd' -->subarray
print(leo_d[2][0][1])
#Tuples
#tuples are immutable lists
#meaning they do not change
#Syntax
# tuple_list = ('hello', 10 , 13 , 2)
#the difference between this and list is that this uses round brackets
#but list uses square brackets []
#we can not change the tuple itself but we can chang the state
my_tuple = ('eggs', 'bread', 'oats')
print(my_tuple)
print(type(my_tuple))
breakpoint() #allows you to have control over the terminal
#my_tuple[3].insert(34.6)
| [
"MNoori@spartaglobal.com"
] | MNoori@spartaglobal.com |
3f79add3d9b8e12c2e8e1e85bed0629b4d712663 | af56a816c57c7f78a1eaaf074ec09ee2b8676d3c | /model.py | 497f4504f65c7fe3a3a6d0e92416776f8a7b1baf | [] | no_license | alwc/Conv-MPN | b6b465571b6207d085ee4ce8f9fca6d3ffa52f57 | 313087f243c1a7a6f2e2f4be54dfd01ced502335 | refs/heads/master | 2022-04-11T17:33:53.751469 | 2020-04-03T21:08:08 | 2020-04-03T21:08:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,627 | py | import torch.nn as nn
from config import *
from unet import UNet
from torch.nn.parameter import Parameter
import math
class graphNetwork(nn.Module):
def __init__(self, times, backbone, edge_feature_map_channel=32,
conv_mpn=False, gnn=False):
super(graphNetwork, self).__init__()
self.edge_feature_channel = edge_feature_map_channel
self.rgb_net = nn.Sequential(
backbone,
nn.Conv2d(2 * self.edge_feature_channel, self.edge_feature_channel, kernel_size=3, stride=1, padding=1)
)
self.gnn = gnn
self.times = times
self.conv_mpn = conv_mpn
# gnn baseline
self.vector_size = 16 * self.edge_feature_channel
if gnn:
vector_size = self.vector_size
self.loop_net = nn.ModuleList([nn.Sequential(
nn.Conv2d(2 * vector_size, 2 * vector_size, kernel_size=1, stride=1),
nn.BatchNorm2d(2 * vector_size),
nn.ReLU(inplace=True),
nn.Conv2d(2 * vector_size, 2 * vector_size, kernel_size=1, stride=1),
nn.BatchNorm2d(2 * vector_size),
nn.ReLU(inplace=True),
nn.Conv2d(2 * vector_size, 2 * vector_size, kernel_size=1, stride=1),
nn.BatchNorm2d(2 * vector_size),
nn.ReLU(inplace=True),
nn.Conv2d(2 * vector_size, vector_size, kernel_size=1, stride=1),
nn.BatchNorm2d(vector_size),
nn.ReLU(inplace=True),
nn.Conv2d(vector_size, vector_size, kernel_size=1, stride=1),
nn.BatchNorm2d(vector_size),
nn.ReLU(inplace=True),
nn.Conv2d(vector_size, vector_size, kernel_size=1, stride=1),
nn.BatchNorm2d(vector_size),
nn.ReLU(inplace=True)
) for _ in range(self.times)])
if conv_mpn:
self.loop_net = nn.ModuleList([
conv_mpn_model(2 * self.edge_feature_channel,
self.edge_feature_channel)
for _ in range(self.times)])
self.edge_pred_layer = nn.Sequential(
nn.Conv2d(self.edge_feature_channel, self.edge_feature_channel, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(self.edge_feature_channel),
nn.ReLU(inplace=True),
nn.Conv2d(self.edge_feature_channel, 2 * self.edge_feature_channel, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(2 * self.edge_feature_channel),
nn.ReLU(inplace=True),
nn.Conv2d(2 * self.edge_feature_channel, 2 * self.edge_feature_channel, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(2 * self.edge_feature_channel),
nn.ReLU(inplace=True),
nn.Conv2d(2 * self.edge_feature_channel, 4 * self.edge_feature_channel, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(4 * self.edge_feature_channel),
nn.ReLU(inplace=True),
nn.Conv2d(4 * self.edge_feature_channel, 4 * self.edge_feature_channel, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(4 * self.edge_feature_channel),
nn.ReLU(inplace=True)
)
self.maxpool = nn.AdaptiveAvgPool2d((2,2))
self.fc = nn.Linear(self.vector_size, 2)
for m in self.modules():
if isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
m.track_running_stats=False
def change_device(self):
self.rgb_net.to(device)
self.loop_net.to(device2)
self.edge_pred_layer.to(device2)
self.fc.to(device)
def forward(self, img, edge_masks, edge_index=None):
if self.training is False:
tt = math.ceil(edge_masks.shape[0] / 105)
edge_feature_init = torch.zeros((edge_masks.shape[0], self.edge_feature_channel, 64, 64)).double().to(device)
for time in range(tt):
if time == tt - 1:
edge_sub_masks = edge_masks[time * 105:, :, :]
else:
edge_sub_masks = edge_masks[time * 105:(time+1) * 105, :, :]
img_expand = img.expand(edge_sub_masks.shape[0], -1, -1, -1)
feature_in = torch.cat((img_expand, edge_sub_masks.unsqueeze(1)), 1)
if time == tt - 1:
edge_feature_init[time * 105:] = self.rgb_net(feature_in)
else:
edge_feature_init[time*105:(time+1)*105] = self.rgb_net(feature_in)
del feature_in
else:
img = img.expand(edge_masks.shape[0], -1, -1, -1)
feature_in = torch.cat((img, edge_masks.unsqueeze(1)), 1)
edge_feature_init = self.rgb_net(feature_in)
edge_feature = edge_feature_init
if device != device2:
edge_feature = edge_feature.to(device2)
if self.conv_mpn:
for t in range(self.times):
feature_neighbor = torch.zeros_like(edge_feature)
for edge_iter in range(edge_masks.shape[0]):
feature_temp = edge_feature[edge_index[1, torch.where(edge_index[0,:] == edge_iter)[0]]]
feature_neighbor[edge_iter] = torch.max(feature_temp, 0)[0]
edge_feature = torch.cat((edge_feature, feature_neighbor), 1)
edge_feature = self.loop_net[t](edge_feature)
if self.training is False:
tt = math.ceil(edge_masks.shape[0] / 105)
edge_pred = torch.zeros((edge_masks.shape[0], 4*self.edge_feature_channel, 64, 64)).double().to(device)
for time in range(tt):
if time == tt - 1:
edge_sub_feature = edge_feature[time * 105:, :, :]
else:
edge_sub_feature = edge_feature[time * 105:(time+1) * 105, :, :]
if time == tt - 1:
edge_pred[time * 105:] = self.edge_pred_layer(edge_sub_feature)
else:
edge_pred[time*105:(time+1)*105] = self.edge_pred_layer(edge_sub_feature)
del edge_sub_feature
else:
edge_pred = self.edge_pred_layer(edge_feature)
edge_pred = self.maxpool(edge_pred)
edge_pred = edge_pred.view((edge_masks.shape[0], self.vector_size, 1, 1))
if self.gnn:
for t in range(self.times):
feature_neighbor = torch.zeros_like(edge_pred)
for edge_iter in range(edge_masks.shape[0]):
feature_temp = edge_pred[edge_index[1, torch.where(edge_index[0,:] == edge_iter)[0]]]
feature_neighbor[edge_iter] = torch.max(feature_temp, 0)[0]
edge_pred = torch.cat((edge_pred, feature_neighbor), 1)
edge_pred = self.loop_net[t](edge_pred)
edge_pred = torch.flatten(edge_pred, 1)
if device != device2:
edge_pred = edge_pred.to(device)
fc = self.fc(edge_pred)
return fc
class conv_mpn_model(nn.Module):
def __init__(self, inchannels, out_channels):
super(conv_mpn_model, self).__init__()
assert inchannels >= out_channels
self.out_channels = out_channels
self.seq = nn.Sequential(
nn.Conv2d(inchannels, inchannels, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(inchannels, track_running_stats=True),
nn.Conv2d(inchannels, inchannels, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(inchannels, track_running_stats=True),
nn.Conv2d(inchannels, inchannels, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(inchannels, track_running_stats=True),
nn.Conv2d(inchannels, inchannels, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(inchannels, track_running_stats=True),
nn.Conv2d(inchannels, out_channels, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(out_channels, track_running_stats=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(out_channels, track_running_stats=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(out_channels, track_running_stats=True)
)
def forward(self, x):
return self.seq(x)
| [
"fuyangz@cs-vml-43.cs.sfu.ca"
] | fuyangz@cs-vml-43.cs.sfu.ca |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.