blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f383d554c135fc392f72f27540010b2c2a96e753 | 743da4642ac376e5c4e1a3b63c079533a5e56587 | /build/lib.win-amd64-3.6/fairseq/modules/adaptive_softmax.py | 1c60d09568cbafd7a449a66bea1936644528f85f | [
"MIT"
] | permissive | tmtmaj/Exploiting-PrLM-for-NLG-tasks | cdae1b6e451b594b11d8ecef3c1cd4e12fe51c9b | e8752593d3ee881cf9c0fb5ed26d26fcb02e6dd5 | refs/heads/main | 2023-06-16T08:26:32.560746 | 2021-07-14T17:50:19 | 2021-07-14T17:50:19 | 371,899,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,028 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import operator
import functools
import torch
import torch.nn.functional as F
from fairseq.modules.quant_noise import quant_noise
from torch import nn
class TiedLinear(nn.Module):
def __init__(self, weight, transpose):
super().__init__()
self.weight = weight
self.transpose = transpose
def forward(self, input):
return F.linear(input, self.weight.t() if self.transpose else self.weight)
class TiedHeadModule(nn.Module):
def __init__(self, weights, input_dim, num_classes, q_noise, qn_block_size):
super().__init__()
tied_emb, _ = weights
self.num_words, emb_dim = tied_emb.size()
self.word_proj = quant_noise(TiedLinear(tied_emb, transpose=False), q_noise, qn_block_size)
if input_dim != emb_dim:
self.word_proj = nn.Sequential(
quant_noise(nn.Linear(input_dim, emb_dim, bias=False), q_noise, qn_block_size),
self.word_proj,
)
self.class_proj = quant_noise(nn.Linear(input_dim, num_classes, bias=False), q_noise, qn_block_size)
self.out_dim = self.num_words + num_classes
self.register_buffer('_float_tensor', torch.FloatTensor(1))
def forward(self, input):
inp_sz = functools.reduce(operator.mul, input.shape[:-1], 1)
out = self._float_tensor.new(inp_sz, self.out_dim)
out[:, :self.num_words] = self.word_proj(input.view(inp_sz, -1))
out[:, self.num_words:] = self.class_proj(input.view(inp_sz, -1))
return out
class AdaptiveSoftmax(nn.Module):
"""
This is an implementation of the efficient softmax approximation for
graphical processing units (GPU), described in the paper "Efficient softmax
approximation for GPUs" (http://arxiv.org/abs/1609.04309).
"""
def __init__(self, vocab_size, input_dim, cutoff, dropout, factor=4., adaptive_inputs=None, tie_proj=False, q_noise=0, qn_block_size=8):
super().__init__()
if vocab_size > cutoff[-1]:
cutoff = cutoff + [vocab_size]
else:
assert vocab_size == cutoff[
-1], 'cannot specify cutoff larger than vocab size'
output_dim = cutoff[0] + len(cutoff) - 1
self.vocab_size = vocab_size
self.cutoff = cutoff
self.dropout = dropout
self.input_dim = input_dim
self.factor = factor
self.q_noise = q_noise
self.qn_block_size = qn_block_size
self.lsm = nn.LogSoftmax(dim=1)
if adaptive_inputs is not None:
self.head = TiedHeadModule(adaptive_inputs.weights_for_band(0), input_dim, len(cutoff) - 1, self.q_noise, self.qn_block_size)
else:
self.head = quant_noise(nn.Linear(input_dim, output_dim, bias=False), self.q_noise, self.qn_block_size)
self._make_tail(adaptive_inputs, tie_proj)
def init_weights(m):
if hasattr(m, 'weight') and not isinstance(m, TiedLinear) and not isinstance(m, TiedHeadModule):
nn.init.xavier_uniform_(m.weight)
self.apply(init_weights)
self.register_buffer('version', torch.LongTensor([1]))
def _make_tail(self, adaptive_inputs=None, tie_proj=False):
self.tail = nn.ModuleList()
for i in range(len(self.cutoff) - 1):
dim = int(self.input_dim // self.factor ** (i + 1))
tied_emb, tied_proj = adaptive_inputs.weights_for_band(i + 1) \
if adaptive_inputs is not None else (None, None)
if tied_proj is not None:
if tie_proj:
proj = quant_noise(TiedLinear(tied_proj, transpose=True), self.q_noise, self.qn_block_size)
else:
proj = quant_noise(nn.Linear(tied_proj.size(0), tied_proj.size(1), bias=False), self.q_noise, self.qn_block_size)
else:
proj = quant_noise(nn.Linear(self.input_dim, dim, bias=False), self.q_noise, self.qn_block_size)
if tied_emb is None:
out_proj = nn.Linear(dim, self.cutoff[i + 1] - self.cutoff[i], bias=False)
else:
out_proj = TiedLinear(tied_emb, transpose=False)
m = nn.Sequential(
proj,
nn.Dropout(self.dropout),
quant_noise(out_proj, self.q_noise, self.qn_block_size),
)
self.tail.append(m)
def upgrade_state_dict_named(self, state_dict, name):
version_name = name + '.version'
if version_name not in state_dict:
raise Exception('This version of the model is no longer supported')
def adapt_target(self, target):
"""
In order to be efficient, the AdaptiveSoftMax does not compute the
scores for all the word of the vocabulary for all the examples. It is
thus necessary to call the method adapt_target of the AdaptiveSoftMax
layer inside each forward pass.
"""
target = target.view(-1)
new_target = [target.clone()]
target_idxs = []
for i in range(len(self.cutoff) - 1):
mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))
new_target[0][mask] = self.cutoff[0] + i
if mask.any():
target_idxs.append(mask.nonzero().squeeze(1))
new_target.append(target[mask].add(-self.cutoff[i]))
else:
target_idxs.append(None)
new_target.append(None)
return new_target, target_idxs
def forward(self, input, target):
"""
Args:
input: (b x t x d)
target: (b x t)
Returns:
2 lists: output for each cutoff section and new targets by cut off
"""
input = input.contiguous().view(-1, input.size(-1))
input = F.dropout(input, p=self.dropout, training=self.training)
new_target, target_idxs = self.adapt_target(target)
output = [self.head(input)]
for i in range(len(target_idxs)):
if target_idxs[i] is not None:
output.append(self.tail[i](input.index_select(0, target_idxs[i])))
else:
output.append(None)
return output, new_target
def get_log_prob(self, input, target):
"""
Computes the log probabilities for all the words of the vocabulary,
given a 2D tensor of hidden vectors.
"""
bsz, length, dim = input.size()
input = input.contiguous().view(-1, dim)
if target is not None:
_, target_idxs = self.adapt_target(target)
else:
target_idxs = None
head_y = self.head(input)
log_probs = head_y.new_zeros(input.size(0), self.vocab_size)
head_sz = self.cutoff[0] + len(self.tail)
log_probs[:, :head_sz] = self.lsm(head_y)
tail_priors = log_probs[:, self.cutoff[0]: head_sz].clone()
for i in range(len(self.tail)):
start = self.cutoff[i]
end = self.cutoff[i + 1]
if target_idxs is None:
tail_out = log_probs[:, start:end]
tail_out.copy_(self.tail[i](input))
log_probs[:, start:end] = self.lsm(tail_out).add_(tail_priors[:, i, None])
elif target_idxs[i] is not None:
idxs = target_idxs[i]
tail_out = log_probs[idxs, start:end]
tail_out.copy_(self.tail[i](input[idxs]))
log_probs[idxs, start:end] = self.lsm(tail_out).add_(tail_priors[idxs, i, None])
log_probs = log_probs.view(bsz, length, -1)
return log_probs
| [
"qkrwjdgur09@naver.com"
] | qkrwjdgur09@naver.com |
310ce2c33c38531a7348178612c9a6507ab65fb8 | de46d637de67d6b2e633d7223c8449c2f2bed06f | /m01.py | 08f83aadd42287c8a32c61d95f56742055222198 | [] | no_license | brryyb/crawler | 56b58677ea8ac7b4af94d85211de85d89151faf8 | 990783cac4c01bb644e3d0aedbe1c7988614f839 | refs/heads/main | 2023-04-09T01:30:40.970271 | 2021-04-28T13:27:59 | 2021-04-28T13:27:59 | 358,171,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | import pymongo
mongo_client = pymongo.MongoClient('mongodb://127.0.0.1:27017')
#print(mongo_client.server_info()) #判断是否连接成功
db = mongo_client['zhihu']
coll = db['questions']
d = coll.find({'qid':32189846})
c = d.count()
print(d.next()['title']) | [
"1151596323@qq.com"
] | 1151596323@qq.com |
215350b8ba658e9fd6f6dd005be25a144c8032e1 | 1c91270fc9bf5d2944cab3b84196f8f0793fdbf1 | /src/model/mapsed.py | 46ff3ad39b7cfeaf2752caef019e1adea968785f | [
"MIT"
] | permissive | echoyi/MAPSED | d0a19fb1f0e60ea35265cbe28d6e77a14af3ff3a | 98452e0101bc907c63bcf03666a3de955ebfb2ce | refs/heads/main | 2023-08-18T11:13:59.082309 | 2021-10-11T20:00:26 | 2021-10-11T20:00:26 | 416,068,549 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,679 | py | import torch.nn as nn
import torch
import torch.nn.functional as F
from src.model.encoder import Encoder
from src.model.decoder import Decoder
class MAPSED(nn.Module):
def __init__(self, vae, latent_shape=(4,3,3), m=5, n=3, lambda_contrast=1, contrast ='L2',lambda_MAE=1):
super(MAPSED, self).__init__()
self.lambda_contrast = lambda_contrast
self.lambda_MAE = lambda_MAE
self.vae = vae
self.pred_seq = n
vae.training = False
self.debug = False
self.training = True
self.seq_len = m
for p in self.vae.parameters():
p.requires_grad = False
self.vae.eval()
self.encoder = Encoder(latent_shape, m, contrast=contrast)
self.decoder = Decoder(latent_shape, m=m, n=n)
def forward(self, x, gt_seq=None, x_aug=None):
loss = 0
recon_loss = 0
feature_maps_seq = self._encode_feature_seq(x)
feature_maps_seq_aug = None
if self.training:
feature_maps_seq_aug = self._encode_feature_seq(x_aug)
semantics, dynamics, z, nce = self.encoder(feature_maps_seq, self.training,
x_aug=feature_maps_seq_aug)
decoded_maps = self.decoder(z)
pred_seq = []
for i in range(self.pred_seq):
pred_seq.append(self.vae.decode(decoded_maps[:, i]))
pred_seq = torch.stack(pred_seq, dim=1)
if self.training:
recon_loss = self.loss_fn(pred_seq, gt_seq, metric='L1L2')
loss = self.lambda_contrast * nce + recon_loss
if self.debug:
return pred_seq, loss, nce, recon_loss, decoded_maps
else:
return pred_seq, loss, nce, recon_loss
def loss_fn(self, pred, target, metric='MSE', per_frame=False):
loss = 0
# sum over shape and mean over bs
if metric == 'MSE':
loss = loss + torch.mean(
torch.sum(F.mse_loss(pred, target, reduction='none'), dim=1))
else:
loss = loss + torch.mean(
torch.sum(self.lambda_MAE*F.l1_loss(pred, target, reduction='none') + F.mse_loss(pred, target, reduction='none'),
dim=1))
if per_frame:
loss = loss / pred.shape[1]
return loss
def _encode_feature_seq(self, x):
feature_maps_seq = []
for i in range(self.seq_len):
z, mu, var = self.vae.encode(x[:, i])
feature_maps_seq.append(z)
# (m, bs, c, w, h) ==> (bs, m, c, w, h)
feature_maps_seq = torch.transpose(torch.stack(feature_maps_seq, dim=0), 0, 1)
return feature_maps_seq
| [
"suiyiamy@gmail.com"
] | suiyiamy@gmail.com |
6e820d1d5f5954963c01bd964aa9c66f883d00d7 | 61dcd9b485bc5e6d07c4adf14f138eabaa9a23b5 | /evennumberedexercise/Exercise6_24.py | 2b58b016281f39f12c87f0eed9c9473c43981ad8 | [] | no_license | bong1915016/Introduction-to-Programming-Using-Python | d442d2252d13b731f6cd9c6356032e8b90aba9a1 | f23e19963183aba83d96d9d8a9af5690771b62c2 | refs/heads/master | 2020-09-25T03:09:34.384693 | 2019-11-28T17:33:28 | 2019-11-28T17:33:28 | 225,904,132 | 1 | 0 | null | 2019-12-04T15:56:55 | 2019-12-04T15:56:54 | null | UTF-8 | Python | false | false | 946 | py | def main():
count = 1
i = 2
while count <= 100:
# Display each number in five positions
if isPrime(i) and isPalindrome(i):
print(i, end = " ")
if count % 10 == 0:
print()
count += 1 # Increase count
i += 1
def isPrime(number):
divisor = 2
while divisor <= number / 2:
if number % divisor == 0:
# If true, number is not prime
return False # number is not a prime
divisor += 1
return True # number is prime
# Return the reversal of an integer, i.e. reverse(456) returns 654
def isPalindrome(number):
return number == reverse(number)
# Return the reversal of an integer, i.e. reverse(456) returns 654
def reverse(number):
result = 0
while number != 0:
remainder = number % 10
result = result * 10 + remainder
number = number // 10
return result
main() | [
"38396747+timmy61109@users.noreply.github.com"
] | 38396747+timmy61109@users.noreply.github.com |
26c09af3c8d940821f4738dc53fabb4e746fbe63 | 7b475586d089f7da385bf125c2e3eb135e04edb7 | /petfactory/rigging/cable_rig/util/blend_positions.py | 22989c0ef708774888682adc9f3c0dc2185bd21a | [] | no_license | EriLee/petfactory_maya_scripts | e3085654f36fcb56b95601b4f801da9d4d990a8a | ba002212d61c94e87579fdbc5968282713b895b6 | refs/heads/master | 2021-01-18T08:00:47.123047 | 2015-03-30T08:10:35 | 2015-03-30T08:10:35 | 33,191,651 | 2 | 2 | null | 2015-03-31T14:57:41 | 2015-03-31T14:57:41 | null | UTF-8 | Python | false | false | 948 | py | pm.openFile('/Users/johan/Documents/Projects/python_dev/scenes/empty_scene.mb', f=True)
ctrl = pm.circle(ch=False)[0]
crv = pm.curve(d=3, ep=[(0,0,0), (5,5,0)])
crv_shape = crv.getShape()
# use a vector product node to get a local position to world position
vector_prod = pm.createNode('vectorProduct')
# set to point matrix product
vector_prod.operation.set(4)
# set the position
vector_prod.input1Y.set(5)
ctrl.worldMatrix[0] >> vector_prod.matrix
# create a pointOncrvinfo to get a ws position from a curve param
point_on_crv = pm.createNode('pointOnCurveInfo')
# set which param to sample
point_on_crv.parameter.set(0.5)
point_on_crv.turnOnPercentage.set(1)
crv_shape.worldSpace[0] >> point_on_crv.inputCurve
# blend the positions
blend_col = pm.createNode('blendColors')
vector_prod.output >> blend_col.color1
point_on_crv.position >> blend_col.color2
# hook up the output
loc = pm.spaceLocator()
blend_col.output >> loc.translate
| [
"johan@petfactory.se"
] | johan@petfactory.se |
37fa578da37f5e4d30811702cf02171e05fcab25 | 8e65928ef06e0c3392d9fa59b7b716f941e933c3 | /python/leetcode/playground/built-in-functions/bool/bool.py | d16f648de09fd46fda6b3f360438a9ee714eeeb9 | [] | no_license | KoryHunter37/code-mastery | 0c79aed687347bfd54b4d17fc28dc110212c6dd1 | 6610261f7354d35bde2411da8a2f4b9dfc238dea | refs/heads/master | 2022-02-24T10:34:03.074957 | 2019-09-21T19:24:57 | 2019-09-21T19:24:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | # bool([x])
# Return a Boolean value of True or False.
# Items are False if they are False, Zero, None, or an Empty Collection.
# > True
print(bool(True))
# > True
print(bool(True or False))
# > False
print(bool(1 and 0))
# > False
print(bool([]))
# > True
# Even though the only element is an empty list, because the collection is not empty, this is considered True.
print(bool([[]]))
# > True
print(bool(["Cat", "Rat", "Bat", 24.81932]))
| [
"koryhunter@gatech.edu"
] | koryhunter@gatech.edu |
988de8db92e7a10b3e6dd7bcbef6acf85aa61a4f | bfb0c07b2674f64395fe9234d8fa2c644a126582 | /Flask/g_wifi/g_wifi/cwlan.py | 76611daedd058a32d70e9003099665ce4a10b37b | [] | no_license | bcbobin/Flask-app | f131d3c2c106865e4bf94bd2be909f4aedcec6b0 | 90a25ec43dac66dafbc4ae48a8ebefa86a399d77 | refs/heads/master | 2020-04-16T21:26:05.318066 | 2019-03-25T14:38:58 | 2019-03-25T14:38:58 | 165,925,562 | 0 | 0 | null | 2019-03-25T14:38:59 | 2019-01-15T21:21:45 | HTML | UTF-8 | Python | false | false | 2,316 | py | # Created by Bogdan Bobin
# Last Updated February 19/19
# Version 0.7.0
################################################################
import cgi, cgitb
import sys , traceback
#for system interaction
import sys
#for http interaction
import json
import urllib
#for reading excel data, pandas requires xlrd
#import pandas as pd
#import xlrd
#for date and time
import datetime
import string
import random
import smtplib
from email.mime.text import MIMEText
import netmiko
#if connection timeout - OSError - Socket is closed - must connectHandler again to fix
def add_user(cont1, cont2, mac, start, end):
#config macfilter add 6c:4b:90:27:4f:a2 3 cwlan-int startdate:enddate
#6c:4b:90:27:4f:a2
#error - '\nIncorrect input! <IP addr> must be a valid IP Address\n'
connect1 = netmiko.ConnectHandler(**cont1)
output1 = connect1.send_command("config macfilter add "+ str(mac) +" 3 cwlan-int " + str(start) + ":" + str(end))
connect1.disconnect()
connect2 = netmiko.ConnectHandler(**cont2)
output2 = connect2.send_command("config macfilter add "+ str(mac) +" 3 cwlan-int " + str(start) + ":" + str(end))
connect2.disconnect()
if "Incorrect input" in output1 or output2: #mac adresss is wrong
return -1
elif "already exists" in output1 and output2: #user already exists on both controllers
return 1
else: #nothing bad happened and finished adding user on both
return 0
def remove_user(cont1, cont2, mac):
connect1 = netmiko.ConnectHandler(**cont1)
connect2 = netmiko.ConnectHandler(**cont2)
output1 = connect1.send_command("config macfilter delete "+ mac)
output2 = connect2.send_command("config macfilter delete "+ mac)
connect1.disconnect()
connect2.disconnect()
#config macfilter delete 88:b1:11:28:e1:5f
#check if user DNE on both controllers - error
#User 88b11128e15f does not exist.
if "does not exist" in output1 and output2: #using and because if exists on one, it is fine to delete and proceed
return -1
else:
return 0
def summary(connect):
#show macfilter summary - not enough information, will need to pull data from custom user database
return 0
| [
"bogdan3964@hotmail.com"
] | bogdan3964@hotmail.com |
eb5d10fe443009630273d8441e3e8a05bf1e6e0e | 54b41d77617849a77354934a6b62c3ed2cfd231b | /Chapter2/Case_study/menu.py | 2fd9622cac77dd8e8761c81b561949bc14c5a835 | [] | no_license | Chino57/python_object_oriented_programming | b52a09e3c5c95f469cbc1cf246af2eaabce813e6 | 108add39c52f62e84666b082599d4fdeae4e7f00 | refs/heads/master | 2020-06-06T02:25:18.036584 | 2019-06-30T18:48:46 | 2019-06-30T18:48:46 | 192,612,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,851 | py | import sys
from notebook import Notebook
class Menu:
"""Display a menu and respond to choices when run"""
def __init__(self):
self.notebook = Notebook()
self.choices = {
"1": self.show_notes,
"2": self.search_notes,
"3": self.add_note,
"4": self.modify_note,
"5": self.quit,
}
def display_menu(self):
print(
"""
Notebook Menu
1. Show all notes
2. Search Notes
3. Add Note
4. Modify Note
5. Quit
"""
)
def run(self):
"""Display the menu and responds to choices"""
while True:
self.display_menu()
choice = input("Enter and option: ")
action = self.choices.get(choice)
if action:
action()
else:
print("{0} is not a valid choice".format(choice))
def show_notes(self, notes=None):
if not notes:
notes = self.notebook.notes
for note in notes:
print("{0}: {1]\n{2}".format(note.id, note.tags, note.memo))
def search_notes(self):
filter = input("Search for: ")
notes = self.notebook.search(filter)
self.show_notes(notes)
def add_note(self):
memo = input("Enter a memo: ")
self.notebook.new_note(memo)
print("Your note has been added. ")
def modify_note(self):
id = input("Enter a note id: ")
memo = input("Enter a memo: ")
tags = input("Enter tags: ")
if memo:
self.notebook.modify_memo(id, memo)
if tags:
self.notebook.modify_tags(id, tags)
def quit(self):
print("Thank you for using your notebook today.")
sys.exit(0)
if __name__ == "__main__":
Menu().run() | [
"pernin.julien@gmail.com"
] | pernin.julien@gmail.com |
fcb2b13bbe87a50f815f9f8969bd729afae761ce | 6a8aef47b701898d6805aeb3a4c47af2c9fb7c22 | /PR_sept9.py | f699eddffcf634f0752df14d10e7a3a2e60b1d21 | [] | no_license | stephendoty826/reading | 3f7e1e9048b09b6b50d992dd255e5b47849bb70f | 2be7c44286ef95aa613c867468592dcb2c0c4cec | refs/heads/main | 2023-08-06T14:04:53.647835 | 2021-10-02T13:04:44 | 2021-10-02T13:04:44 | 412,798,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | # To Do List
# todos = ["pet the cat", "go to work", "shop for groceries", "go home", "feed the cat"]
# todos.extend(["binge watch a show", "go to sleep"])
# add_todo = input("Please add a todo to your list. To exit, press Enter. ")
# while add_todo != "":
# todos.append(add_todo)
# count = 1
# print("\n To do:")
# print("===================")
# for todo in todos:
# print("%d: %s" % (count, todo))
# count += 1
# print("\n")
# add_todo = input("Please add another todo to your list. To exit, press Enter. ")
# print("Have a productive day!")
| [
"stephendoty826@yahoo.com"
] | stephendoty826@yahoo.com |
013828da34812312109f6a6d4d56bcc9cabb1128 | 39d4e5a2687c0f2e10d580f96c5d207debad9c52 | /model/CoinAsset.py | f2a1b4a283b2d5ba715d765f92eec5f60ed7abbc | [] | no_license | mratose/cryptoAppComparison | 7d8a9ff1328dfb1684b9084b998d334fa7f5fae2 | 4168a097b83dd3cbd9aa3fe8490fd699f317b545 | refs/heads/master | 2022-08-16T04:50:58.025127 | 2020-05-19T08:14:05 | 2020-05-19T08:14:05 | 263,580,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | class CoinAsset:
def __init__(self, asset_id, name, type_is_crypto, data_start,
data_end, data_quote_start, data_quote_end, data_orderbook_start, data_orderbook_end,
data_trade_start, data_trade_end, data_symbols_count, volume_1hrs_usd,
volume_1day_usd, volume_1mth_usd, price_usd):
self.asset_id = asset_id
self.name = name
self.type_is_crypto = type_is_crypto
self.data_start = data_start
self.data_end = data_end
self.data_quote_start = data_quote_start
self.data_quote_end = data_quote_end
self.data_orderbook_start = data_orderbook_start
self.data_orderbook_end = data_orderbook_end
self.data_trade_start = data_trade_start
self.data_trade_end = data_trade_end
self.data_symbols_count = data_symbols_count
self.volume_1hrs_usd = volume_1hrs_usd
self.volume_1day_usd = volume_1day_usd
self.volume_1mth_usd = volume_1mth_usd
self.price_usd = price_usd
| [
"olajire.atose@gmail.com"
] | olajire.atose@gmail.com |
9214eed34cce1626804f3fb053f01667c2901288 | 47673df0b8760818eccdaf2bb839b3911590a808 | /Reorder.py | 4e636a138f3067e02450a08797debd4c8f01d49a | [] | no_license | mrrocketraccoon/Fusion360-to-SDF-Exporter | 87516238dccc76698bd1f3d3132f6e81fe8498b9 | ed7dde71c12de677733df4f3a890a0a4565beeac | refs/heads/master | 2021-08-08T14:16:10.317828 | 2017-11-10T13:40:03 | 2017-11-10T13:40:03 | 110,250,713 | 2 | 0 | null | 2017-11-10T13:36:19 | 2017-11-10T13:36:19 | null | UTF-8 | Python | false | false | 18,131 | py | import adsk.core
import adsk.fusion
import traceback
#import os.path
import xml.etree.ElementTree as ET
import math
import xml.dom.minidom as DOM
import os
## @package SDFusion
# This is an exporter for Autodesk Fusion 360 models to SDFormat.
#
# This can be loaded as an addon in Fusion 360.
# It exports all rigid groups of the robot model as links
# to STL and creates nodes in the SDF file for them.
# It creates SDF nodes for all joints of the robot model.
# Supported joint types are: "fixed", "revolute", and "ball".
## Global variable to make the Fusion 360 design object accessible
# for every function.
design = None
## Global variable to make the output file directory accessible for
# every function.
fileDir = "C:/Users/Usuario/Desktop/Legs"
if not os.path.exists(fileDir):
os.makedirs(fileDir)
## Global variable to make the robot model name accessible for
# every function.
modelName = "Legs"
## Global variable to make the root occurrence accessible for
# every function.
rootOcc = None
## Transforms a matrix from Fusion 360 to Gazebo.
#
# This transforms a matrix given in the Fusion 360 coordinate system
# to one in the Gazebo cooridnate system.
#
# @param self a matrix given wrt the Fusion 360 coordinate system
# @return the matrix wrt the Gazebo coordinate system
def gazeboMatrix(self):
matrix = adsk.core.Matrix3D.create()
matrix.setCell(1, 1, 0)
matrix.setCell(1, 2, -1)
matrix.setCell(2, 1, 1)
matrix.setCell(2, 2, 0)
self.transformBy(matrix)
return self
## Converts three double values to string.
#
# This function converts three double values to a string separated by spaces.
#
# @param x the first double value
# @param y the second double value
# @param z the third double value
# @return the string of these values
def vectorToString(x, y, z):
string = str(x) + " " + str(y) + " " + str(z)
return string
## Builds SDF pose node from vector.
#
# This function builds the SDF pose node for every joint.
#
# @param vector the vector pointing to the origin of the joint.
# @return the SDF pose node
def sdfPoseVector(vector):
pose = ET.Element("pose")
# convert from cm (Fusion 360) to m (SI)
x = 0.01 * vector.x
y = 0.01 * vector.y
z = 0.01 * vector.z
pos = vectorToString(x, y, z)
rot = vectorToString(0, 0, 0)
pose.text = pos + " " + rot
return pose
## Builds SDF pose node from matrix.
#
# This function builds the SDF pose node for every link.
#
# @param matrix the transformation matrix of the link
# @return the SDF pose node
def sdfPoseMatrix(matrix):
pose = ET.Element("pose")
# convert from cm (Fusion 360) to m (SI)
trans = matrix.translation
x = 0.01 * trans.x
y = 0.01 * trans.y
z = 0.01 * trans.z
pos = vectorToString(x, y, z)
# calculate roll pitch yaw from transformation matrix
r11 = matrix.getCell(0, 0)
r21 = matrix.getCell(1, 0)
r31 = matrix.getCell(2, 0)
r32 = matrix.getCell(2, 1)
r33 = matrix.getCell(2, 2)
pitch = math.atan2(-r31, math.sqrt(math.pow(r11, 2) + math.pow(r21, 2)))
cp = math.cos(pitch)
yaw = math.atan2(r21 / cp, r11 / cp)
roll = math.atan2(r32 / cp, r33 / cp)
rot = vectorToString(roll, pitch, yaw)
pose.text = pos + " " + rot
return pose
## Builds SDF inertial node from physical properties.
#
# This function builds the SDF inertial node for every link.
#
# @param physics the physical properties of a link
# @return the SDF inertial node
def sdfInertial(physics):
inertial = ET.Element("inertial")
# build pose node of COM
com = physics.centerOfMass
pose = sdfPoseVector(com)
inertial.append(pose)
# build mass node
mass = ET.Element("mass")
mass.text = str(physics.mass)
inertial.append(mass)
# build inertia node
inertia = sdfInertia(physics)
inertial.append(inertia)
return inertial
## Builds SDF node for one moment of inertia.
#
# This helper function builds the SDF node for one moment of inertia.
#
# @param tag the tag of the XML node
# @param value the text of the XML node
# @return the SDF moment of inertia node
def sdfMom(tag, value):
node = ET.Element(tag)
# convert from kg/cm^2 (Fusion 360) to kg/m^2 (SI)
node.text = str(0.0001 * value)
return node
## Builds SDF inertia node from physical properties.
#
# This function builds the SDF inertia node for every link.
#
# @param physics the physical properties of a link
# @return the SDF inertia node
def sdfInertia(physics):
inertia = ET.Element("inertia")
(returnValue, xx, yy, zz, xy, yz, xz) = physics.getXYZMomentsOfInertia()
inertia.append(sdfMom("ixx", xx))
inertia.append(sdfMom("ixy", xy))
inertia.append(sdfMom("ixz", xz))
inertia.append(sdfMom("iyy", yy))
inertia.append(sdfMom("iyz", yz))
inertia.append(sdfMom("izz", zz))
return inertia
## Builds SDF link node.
#
# This function builds the SDF link node for every link.
#
# @param lin the link to be exported
# @return the SDF link node
def linkSDF(lin):
linkName = lin.component.name
link = ET.Element("link", name=linkName)
# build pose node
matrix = gazeboMatrix(lin.transform)
pose = sdfPoseMatrix(matrix)
link.append(pose)
# get physical properties of occurrence
physics = lin.physicalProperties
# build inertial node
inertial = sdfInertial(physics)
link.append(inertial)
# build collision node
collision = ET.Element("collision", name = linkName + "_collision")
link.append(collision)
# build geometry node
geometry = ET.Element("geometry")
collision.append(geometry)
# build mesh node
mesh = ET.Element("mesh")
geometry.append(mesh)
# build uri node
uri = ET.Element("uri")
global modelName
uri.text = "model://" + modelName + "/meshes/" + linkName + ".stl"
mesh.append(uri)
# scale the mesh from mm to m
scale = ET.Element("scale")
scale.text = "0.001 0.001 0.001"
mesh.append(scale)
# build visual node (equal to collision node)
visual = ET.Element("visual", name = linkName + "_visual")
visual.append(geometry)
link.append(visual)
return link
## Builds SDF joint node.
#
# This function builds the SDF joint node for every joint type.
#
# @param joi the joint
# @param name_parent the name of the parent link
# @param name_child the name of the child link
# @return the SDF joint node
def jointSDF(joi, name_parent, name_child):
jointInfo = []
jointType = ""
jType = joi.jointMotion.jointType
if jType == 0:
jointType = "fixed"
elif jType == 1:
jointInfo = revoluteJoint(joi)
jointType = "revolute"
elif jType == 2:
# not implemented
jointType = ""
elif jType == 3:
# not implemented
jointType = ""
elif jType == 4:
# not implemented
jointType = ""
elif jType == 5:
# not implemented
jointType = ""
elif jType == 6:
# SDFormat does not implement ball joint limits
jointType = "ball"
name = joi.name
joint = ET.Element("joint", name=name, type=jointType)
# build parent node
parent = ET.Element("parent")
parent.text = name_parent
joint.append(parent)
# build child node
child = ET.Element("child")
child.text = name_child
joint.append(child)
# build pose node
pose = sdfPoseVector(joi.geometryOrOriginOne.origin)
joint.append(pose)
joint.extend(jointInfo)
return joint
## Builds SDF axis node for revolute joints.
#
# This function builds the SDF axis node for revolute joint.
#
# @param joi one revolute joint object
# @return a list of information nodes (here one axis node)
# for the revolute joint
def revoluteJoint(joi):
info = []
# build axis node
axis = ET.Element("axis")
xyz = ET.Element("xyz")
vector = joi.jointMotion.rotationAxisVector
xyz.text = vectorToString(vector.x, vector.y, vector.z)
axis.append(xyz)
# build limit node
mini = joi.jointMotion.rotationLimits.minimumValue
maxi = joi.jointMotion.rotationLimits.maximumValue
limit = ET.Element("limit")
axis.append(limit)
# Lower and upper limit have to be switched and inverted,
# because Fusion 360 moves the parent link wrt to the
# child link and Gazebo moves the child link wrt to the
# parent link.
lower = ET.Element("lower")
lower.text = str(-maxi)
limit.append(lower)
upper = ET.Element("upper")
upper.text = str(-mini)
limit.append(upper)
# build frame node
frame = ET.Element("use_parent_model_frame")
frame.text = "0"
axis.append(frame)
info.append(axis)
return info
## Plain STL export.
##
# @param occ the occurrence to be exported
# @param linkName the name of the created STL file
def exportToSTL(occ, linkName):
global design
global fileDir
meshFolder = fileDir + "/meshes/"
if not os.path.exists(meshFolder):
os.makedirs(meshFolder)
fileName = meshFolder + linkName
desExp = design.exportManager
stlExportOptions = desExp.createSTLExportOptions(occ, fileName)
desExp.execute(stlExportOptions)
## Exports a rigid group to STL.
## Transforms a matrix from Fusion 360 to Gazebo.
#
# This exports a rigid group as one STL file.
# For this all components of the rigidGroup are copied to a new component.
#
# @param rig the rigid group to be exported
# @return a new occurrence which is used to export the
# relevant information to SDFormat
def rigidGroupToSTL(rig):
global rootOcc
linkName = rig.name
# create new occurrence
linkOcc = rootOcc.addNewComponent(adsk.core.Matrix3D.create())
linkOcc.component.name = linkName
# copy all bodies of the rigid group to the new occurrence
allOcc = rig.occurrences
for occ in allOcc:
allBod = occ.bRepBodies
for bod in allBod:
bod.copyToComponent(linkOcc)
# export new occurrence to STL
exportToSTL(linkOcc, linkName)
return linkOcc
## Exports an single occurrence to STL.
#
# This exports a single Fusion occurence as an STL file.
#
# @param occ the occurrence that needs to be exported.
# @return a new occurrence which is used to export the
# relevant information to SDFormat
def occurrenceToSTL(occ):
global rootOcc
linkName = clearName(occ.name)
# create new occurrence
linkOcc = rootOcc.addNewComponent(adsk.core.Matrix3D.create())
linkOcc.component.name = linkName
# copy all bodies of the occurrence to the new occurrence
allBod = occ.bRepBodies
for bod in allBod:
bod.copyToComponent(linkOcc)
# export new occurrence to STL
exportToSTL(linkOcc, linkName)
return linkOcc
## Clear filenames of unwanted characters
#
# This function replaces all ':' with underscores and deletes spaces in filenames.
# to one in the Gazebo cooridnate system.
#
# @param name a filename
# @return the filename without ':' and spaces
def clearName(name):
name = name.replace(":", "_")
name = name.replace(" ", "")
return name
def export_next(parent_name, terminate):
# design = adsk.fusion.Design.cast(product)
# get root component in this design
# rootComp = design.rootComponent
global model
global allRigidGroups
global allComponents
compareJoint = 'empty'
if compareJoint not in jointsList:
for com in allComponents:
if com is not None:
allJoints = com.joints
#export child joint and link
for joi in allJoints:
if joi is not None:
if joi.name not in jointsList:
one = joi.occurrenceOne
two = joi.occurrenceTwo
#joint_parent = clearName(one.name)
#joint_child = clearName(two.name)
#missing_link = True
# print("one")
# print(one.name)
# print("two")
# print(two.name)
link_parent = None
link_child = None
for rig in allRigidGroups:
value_parent = rig.occurrences.itemByName(one.name)
value_child = rig.occurrences.itemByName(two.name)
if value_parent is not None:
link_parent = rig.name
print('possible parent: ', link_parent)
if value_child is not None:
link_child = rig.name
#missing_link = False
print('possible parent:', link_child)
if link_parent is None or link_child is None:
ui.messageBox('Error: Please include all objects in rigid groups!')
terminate = 1
print('Parent component is: ', parent_name)
print("export joint: ", joi.name)
joint = jointSDF(joi, link_parent, link_child)
model.append(joint)
jointsList.append(joi.name)
compareJoint = joi.name
if link_child != parent_name and link_parent != parent_name:
print('there was a bifurcation')
for rig in allRigidGroups:
if rig.name == link_child and rig.name != parent_name :
linkOcc = rigidGroupToSTL(rig)
link = linkSDF(linkOcc)
model.append(link)
#delete the temporary new occurrence
linkOcc.deleteMe()
#Call doEvents to give Fusion a chance to react.
adsk.doEvents()
print('export_next was called with link_child = parent')
export_next(link_child, terminate)
elif rig.name == link_parent and rig.name != parent_name:
# jointsList.append(link_child)
#export_next(link_child, linksList)
linkOcc = rigidGroupToSTL(rig)
link = linkSDF(linkOcc)
model.append(link)
#delete the temporary new occurrence
linkOcc.deleteMe()
#Call doEvents to give Fusion a chance to react.
adsk.doEvents()
print('export_next was called with link_parent = parent')
print("exported link ", rig.name)
export_next(link_parent, terminate)
if terminate == 1:
return 0
## Exports a robot model from Fusion 360 to SDFormat.
def run(context):
global ui
ui = None
try:
app = adsk.core.Application.get()
ui = app.userInterface
# get active design
global product
product = app.activeProduct
global design
design = adsk.fusion.Design.cast(product)
# get root component in this design
rootComp = design.rootComponent
# get all occurrences within the root component
global rootOcc
rootOcc = rootComp.occurrences
# build sdf root node
root = ET.Element("sdf", version="1.6")
global model
model = ET.Element("model", name=modelName)
root.append(model)
### 1)///// get root component
# Select an occurrence.
occSel = ui.selectEntity('Select root link', 'Occurrences')
if occSel:
occ = adsk.fusion.Occurrence.cast(occSel.entity)
result = ''
for rigidGroup in occ.rigidGroups:
result += rigidGroup.name
if result == '':
ui.messageBox('No rigid groups for the selected occurrence.')
else:
ui.messageBox('The rigid groups below are on the selected occurrence:' + result)
linkOcc = rigidGroupToSTL(rigidGroup)
link = linkSDF(linkOcc)
model.append(link)
#delete the temporary new occurrence
linkOcc.deleteMe()
#Call doEvents to give Fusion a chance to react.
adsk.doEvents()
global allRigidGroups
allRigidGroups = rootComp.allRigidGroups
global allComponents
allComponents = design.allComponents
#2) search for parent component
parent_name = result
global jointsList
jointsList = []
global terminate
terminate = 0
export_next(parent_name, terminate)
# get all construction points that serve as viaPoints
filename = fileDir + "/model.sdf"
domxml = DOM.parseString(ET.tostring(root))
pretty = domxml.toprettyxml()
file = open(filename, "w")
file.write(pretty)
file.close()
ui.messageBox("SDF file of model " + modelName + " written to '" + fileDir + "'.")
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc())) | [
"noreply@github.com"
] | noreply@github.com |
86a043a3a84571bfb640be0a1695deb0b98650e6 | 4d76450e30717d66358d7764e9bdded842dc6174 | /bayes_opt.py | 78ee3cb2c14c27b165366c71088f72a902e16929 | [] | no_license | rlyapin/bayes_opt | 6f4ab6d9082c3c5977eb42971186d2643002a45c | a17495ec7e66218a42b860d581c7dbff38f8fd44 | refs/heads/master | 2021-05-11T19:59:39.097753 | 2018-05-15T21:11:30 | 2018-05-15T21:11:30 | 117,428,020 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,315 | py | # This script implements class for run Bayesian Optimizations
# For now it is implemented with EI as an acqusition function
# More acquisition functions may follow
import math
import numpy as np
import scipy as sp
import scipy.stats
from gp import GP
# Dummy class just to hold details of MCMC
class MCMCSampler:
def __init__(self, log_likelihood, mcmc_opts):
# Class for doing MCMC sampling:
# Below l is supposed to stand for kernel hyperparameters
# log_likelihood: log_likelihood function (l -> log_likelihood)
# mcmc_opts is supposed to be a map with the following entries:
# 'prior': function for prior pdf (l -> pdf value)
# 'icdf': function for inverse cdf ([0, 1] -> l)
# 'jump': function for mcmc exploration (l -> l)
# 'burn_period': number of mcmc iterations to discard before sampling
# 'mcmc_samples': number of kernel hyperparameters to return
self.log_likelihood = log_likelihood
self.mcmc_opts = mcmc_opts
def posterior_sample(self):
# Below l is supposed to stand for kernel hyperparameters
# This function performs Bayesian MCMC sampling for Gaussian kernel hyperparameters
# Specifically, the first point is sampled using inverse cdf for l_prior
# Moves are suggested using l_jump function
# Moves are accepted / rejected with Metropolis-Hastings algorithms
# (i.e. true posterior density is proportional to exp(log_likelihood) * l_prior,
# ratio of posterior values give the probability of acception a move)
# First burn_period samples of l are discarded and n_samples consecutive samples are the output of a function
# MCMC is concerned with the ratio of true probabilities
# However, for efficiency reasons we express everything through log-likelihoods
log_posterior = lambda l: self.log_likelihood(l) + np.log(self.mcmc_opts["prior"](l))
l = self.mcmc_opts["icdf"](np.random.rand())
past_log_posterior = log_posterior(l)
for _ in range(self.mcmc_opts["burn_period"]):
# Adding try except block in case log_posterior sampling fails
# May happen if l jumps to region outside og prior domain
try:
next_l = self.mcmc_opts["jump"](l)
next_log_posterior = log_posterior(next_l)
if np.log(np.random.randn()) < (next_log_posterior - past_log_posterior):
l = next_l
past_log_posterior = next_log_posterior
except:
pass
sampled_l = []
for _ in range(self.mcmc_opts["mcmc_samples"]):
# Adding try except block in case log_posterior sampling fails
# May happen if l jumps to region outside og prior domain
try:
next_l = self.mcmc_opts["jump"](l)
next_log_posterior = log_posterior(next_l)
if np.log(np.random.randn()) < (next_log_posterior - past_log_posterior):
l = next_l
past_log_posterior = next_log_posterior
except:
pass
sampled_l.append(l)
return sampled_l
class BayesOpt:
def __init__(self, data_generator, init_sample_size, max_steps, sigma_obs=None,
is_mcmc=False, mcmc_opts=None):
# Initializing Bayesian optimization objects:
# I need to have an object that generates data and specifies domain of optimization
# max_steps refer to the maximum number of sampled points
self.max_steps = max_steps
self.data_generator = data_generator
# Initializing seen observations and adding a couple of variables for later bookkeeping
self.domain = self.data_generator.domain
pick_x = np.random.choice(range(len(self.domain)), size=init_sample_size, replace=False)
self.x = self.domain[pick_x]
self.y = self.data_generator.sample(self.x)
self.best_y = np.max(self.y)
self.mu_posterior = None
self.std_posterior = None
# Initializing underlying GP
self.gp = GP(self.x, self.y)
self.sigma_obs = sigma_obs
# Initializing MCMC properties (mcmc_properties is supposed to be an instance of MCMCProperties class)
self.is_mcmc = is_mcmc
self.mcmc_opts = mcmc_opts
def add_obs(self, x, y):
# Adding new observations
# It is assumed x and y are passed as scalars
self.x = np.append(self.x, x)
self.y = np.append(self.y, y)
def determine_l(self):
# This function returns kernel hyperparameters for current state of the system
# It is either hyperparameters that optimize log-likelihood or
# In case we have mcmc sampling it is the sample of posterior distribution of hyperparameters
# The output of the function is in either case the array of elements (one element for max-likelihood estimator)
if not self.is_mcmc:
# Getting maximum likelihood estimator (curently for [0, 1] interval)
l = max(np.exp(np.linspace(np.log(0.01), np.log(1), 100)),
key = lambda z: self.gp.log_likelihood(self.sigma_obs, z))
return [l]
if self.is_mcmc:
l_sampler = MCMCSampler(lambda z: self.gp.log_likelihood(self.sigma_obs, z), self.mcmc_opts)
return l_sampler.posterior_sample()
def step(self):
# The main function of BayesOpt class which performs one does a single optimization step
# I estimate the kernel hyperparameters that best fit the data (either with mcmc or likelihood optimization)
# Then I select the best point to sample (currently with EI acquisition function)
# Then I sample the point and update my state
# Sampling kernel hyperparameters
sampled_l = self.determine_l()
# Averaging GP posterior and EI over possible kernel hyperparameters
# Note that as std is not quite an expectation, its averaging is a hack and not necessariy would give true std
mu = np.zeros((len(self.domain),))
std_1d = np.zeros((len(self.domain),))
ei = np.zeros((len(self.domain),))
for l in sampled_l:
sampled_mu, sampled_std_1d = self.gp.gp_posterior(self.domain, self.sigma_obs, l, return_chol=False)
z = (sampled_mu - self.best_y) / sampled_std_1d
sampled_ei = sampled_std_1d * scipy.stats.norm.pdf(z) + z * sampled_std_1d * scipy.stats.norm.cdf(z)
mu += sampled_mu
std_1d += sampled_std_1d
ei += sampled_ei
# Sampling a new point
new_x = self.domain[np.argmax(ei)]
new_y = self.data_generator.sample(new_x)
self.add_obs(new_x, new_y)
self.gp.add_obs(new_x, new_y)
self.best_y = max(new_y, self.best_y)
self.mu_posterior = mu / len(sampled_l)
self.std_posterior = std_1d / len(sampled_l)
def run(self):
# The function that runs whole optimizaion
# For now it only does single steps
# In the future some print and plot statements could be added
for _ in range(self.max_steps):
self.step()
| [
"lyapin.rk@gmail.com"
] | lyapin.rk@gmail.com |
bf7f887b2f4afed4e0b2e789b0184e14ede98e7f | f75b6408c867c9e6a559cedf5d0e9e7b6757794e | /main.py | 419a1bc27c4b62c1be234328ebd11470038ef200 | [
"MIT"
] | permissive | om1ji/my-bot | 7480ea0eb7194ffddae285d2d2dec8abc3823e29 | ed108a305c6a957e371b33b930db0653754f1846 | refs/heads/main | 2023-03-05T14:07:59.691758 | 2021-02-09T19:27:07 | 2021-02-09T19:27:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,044 | py | import config
import telebot
import random
from telebot import types
bot = telebot.TeleBot(config.token)
query = []
greetings = ['Привет', 'привет', 'прив', 'пр', 'Дарова', "дарова", "Даров", "даров", "Дороу", "дороу", "здарова", "Здарова", "Здаров", "здаров", "Прувэт", "прувэт"]
omeja_v = ['Омиджи', "Омежа", "омиджи", "омежа", "омижи", "Омижи", "омеж", "омеjа", "амаль", "Амаль", "омеjи"]
na_chui = ["иди нахуй", "пошёл нахуй", "пошел нахуй", "Иди нахуй", "Пошёл нахуй", "Пошел нахуй", "Нахуй пошёл", "нахуй пошёл", "нахуй пошел", "Нахуй пошел", "нахуй иди", "Нахуй иди"]
# Main commands
@bot.message_handler(commands=['start'])
def handle_start(message):
bot.send_message(message.chat.id, 'Здарова, напиши на /help, чтобы узнать мои команды')
# Other commands
@bot.message_handler(commands=['kto_pidor'])
def kto_pidr(message):
if message.from_user.username == 'NoneType':
bot.reply_to(message, 'Ну кто пидр... кто... ' + message.from_user.first_name + ' конечно!')
else:
bot.reply_to(message, 'Ну кто пидр... кто... ' + '@' + message.from_user.username + ' конечно!')
@bot.message_handler(commands=['rnd_chars'])
def random_hundred_characters(message):
def r_symbol():
char = random.randint(0,52000)
one_or_zero = any([start <= char <= end for start, end in
[(4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215),
(63744, 64255), (65072, 65103), (65381, 65500),
(131072, 196607)]
])
while one_or_zero == True:
return r_symbol()
char = chr(char)
char = char.encode('utf-8')
char = char.decode('utf-8', errors='ignore')
return char
i = 0
s = ''
for x in range(10):
for x in range(10):
s += '```'+ r_symbol() + '```' + ' '
s += '\n'
bot.reply_to(message, text=s, parse_mode='Markdown')
@bot.message_handler(commands=['help'])
def command_list(message):
bot.reply_to(message, '''
А я думал ты уже прочитал все команды...
/kto_pidor
/rnd_chars
P.S. попробуй послать бота ))
''')
# Text handler
@bot.message_handler(content_types=['text'])
def greeting(message):
if message.text in greetings:
bot.reply_to(message, 'Дарооова)')
if message.text in na_chui:
bot.reply_to(message, 'Сам(а) иди :))')
if message.text in omeja_v:
bot.reply_to(message, 'Я тебя слушаю')
if __name__ == '__main__':
bot.infinity_polling()
| [
"noreply@github.com"
] | noreply@github.com |
d17cf53c623fa6d7bd0d5d74da87667c85fca93f | f730a1fc0fe7021d68cec973125d605c10ac7a64 | /code/camera.py | b9bfb55be94621c6619c04db7f15b9de8a045fcd | [] | no_license | wwxFromTju/TJU_AR_alpha0.1 | 47a248b6861dfcdc47a9eefd86250d616a4d71f8 | e435424943846a7812e22afb7ca66a5065d70aec | refs/heads/master | 2021-04-12T11:29:44.434154 | 2016-07-27T10:20:59 | 2016-07-27T10:20:59 | 64,299,053 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | #!/usr/bin/env python
# encoding=utf-8
from scipy import linalg
class Camera(object):
"""
相机的类
"""
def __init__(self, P):
"""
初始化相机类
"""
self.P = P
# 标定矩阵
self.K = None
# 旋转矩阵
self.R = None
# 平移矩阵
self.t = None
# 相机中心
self.c = None
def project(self, X):
"""
:param X: (4, n) 的投影点, 并且对坐标归一化
:return:
"""
x = linalg.dot(self.P, X)
for i in range(3):
x[i] /= x[2]
return x | [
"wxwang@tju.edu.cn"
] | wxwang@tju.edu.cn |
60925646feb8473a3fff7eec5ed67860e4efff65 | aea8fea216234fd48269e4a1830b345c52d85de2 | /fhir/resources/STU3/tests/test_episodeofcare.py | 2a6f7c5f3886e9275ff0a301dc3bf923e2cac14a | [
"BSD-3-Clause"
] | permissive | mmabey/fhir.resources | 67fce95c6b35bfdc3cbbc8036e02c962a6a7340c | cc73718e9762c04726cd7de240c8f2dd5313cbe1 | refs/heads/master | 2023-04-12T15:50:30.104992 | 2020-04-11T17:21:36 | 2020-04-11T17:21:36 | 269,712,884 | 0 | 0 | NOASSERTION | 2020-06-05T17:03:04 | 2020-06-05T17:03:04 | null | UTF-8 | Python | false | false | 4,568 | py | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/EpisodeOfCare
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
import io
import json
import os
import unittest
import pytest
from .. import episodeofcare
from ..fhirdate import FHIRDate
from .fixtures import force_bytes
@pytest.mark.usefixtures("base_settings")
class EpisodeOfCareTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get("FHIR_UNITTEST_DATADIR") or ""
with io.open(os.path.join(datadir, filename), "r", encoding="utf-8") as handle:
js = json.load(handle)
self.assertEqual("EpisodeOfCare", js["resourceType"])
return episodeofcare.EpisodeOfCare(js)
def testEpisodeOfCare1(self):
inst = self.instantiate_from("episodeofcare-example.json")
self.assertIsNotNone(inst, "Must have instantiated a EpisodeOfCare instance")
self.implEpisodeOfCare1(inst)
js = inst.as_json()
self.assertEqual("EpisodeOfCare", js["resourceType"])
inst2 = episodeofcare.EpisodeOfCare(js)
self.implEpisodeOfCare1(inst2)
def implEpisodeOfCare1(self, inst):
self.assertEqual(inst.diagnosis[0].rank, 1)
self.assertEqual(
force_bytes(inst.diagnosis[0].role.coding[0].code), force_bytes("CC")
)
self.assertEqual(
force_bytes(inst.diagnosis[0].role.coding[0].display),
force_bytes("Chief complaint"),
)
self.assertEqual(
force_bytes(inst.diagnosis[0].role.coding[0].system),
force_bytes("http://hl7.org/fhir/diagnosis-role"),
)
self.assertEqual(force_bytes(inst.id), force_bytes("example"))
self.assertEqual(
force_bytes(inst.identifier[0].system),
force_bytes("http://example.org/sampleepisodeofcare-identifier"),
)
self.assertEqual(force_bytes(inst.identifier[0].value), force_bytes("123"))
self.assertEqual(inst.period.start.date, FHIRDate("2014-09-01").date)
self.assertEqual(inst.period.start.as_json(), "2014-09-01")
self.assertEqual(force_bytes(inst.status), force_bytes("active"))
self.assertEqual(
inst.statusHistory[0].period.end.date, FHIRDate("2014-09-14").date
)
self.assertEqual(inst.statusHistory[0].period.end.as_json(), "2014-09-14")
self.assertEqual(
inst.statusHistory[0].period.start.date, FHIRDate("2014-09-01").date
)
self.assertEqual(inst.statusHistory[0].period.start.as_json(), "2014-09-01")
self.assertEqual(
force_bytes(inst.statusHistory[0].status), force_bytes("planned")
)
self.assertEqual(
inst.statusHistory[1].period.end.date, FHIRDate("2014-09-21").date
)
self.assertEqual(inst.statusHistory[1].period.end.as_json(), "2014-09-21")
self.assertEqual(
inst.statusHistory[1].period.start.date, FHIRDate("2014-09-15").date
)
self.assertEqual(inst.statusHistory[1].period.start.as_json(), "2014-09-15")
self.assertEqual(
force_bytes(inst.statusHistory[1].status), force_bytes("active")
)
self.assertEqual(
inst.statusHistory[2].period.end.date, FHIRDate("2014-09-24").date
)
self.assertEqual(inst.statusHistory[2].period.end.as_json(), "2014-09-24")
self.assertEqual(
inst.statusHistory[2].period.start.date, FHIRDate("2014-09-22").date
)
self.assertEqual(inst.statusHistory[2].period.start.as_json(), "2014-09-22")
self.assertEqual(
force_bytes(inst.statusHistory[2].status), force_bytes("onhold")
)
self.assertEqual(
inst.statusHistory[3].period.start.date, FHIRDate("2014-09-25").date
)
self.assertEqual(inst.statusHistory[3].period.start.as_json(), "2014-09-25")
self.assertEqual(
force_bytes(inst.statusHistory[3].status), force_bytes("active")
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.type[0].coding[0].code), force_bytes("hacc"))
self.assertEqual(
force_bytes(inst.type[0].coding[0].display),
force_bytes("Home and Community Care"),
)
self.assertEqual(
force_bytes(inst.type[0].coding[0].system),
force_bytes("http://hl7.org/fhir/episodeofcare-type"),
)
| [
"connect2nazrul@gmail.com"
] | connect2nazrul@gmail.com |
3745cebaea80ac051eadde49e0750ff0cf7da96c | 09a65b3f22854e580a74669435b975b97f317d4a | /No Coffee No Code/chicobentojr/tri-du.py | bb2d974c8c48e8ccc0f0d56ead61af531660aed9 | [] | no_license | chicobentojr/pre-maratona-ifrn | 89df901461e8015ad328fbfc13ece4ee5ab6e329 | e8bf1686ab0652d0cea26eacaada658c8dd28b54 | refs/heads/master | 2021-01-22T23:10:54.104086 | 2016-09-09T03:49:41 | 2016-09-09T03:49:41 | 67,566,387 | 0 | 0 | null | 2016-09-07T02:53:48 | 2016-09-07T02:53:48 | null | UTF-8 | Python | false | false | 179 | py | if __name__ == '__main__':
first, second = [int(x) for x in input().split(' ')]
if first == second or first > second:
print(first)
else:
print(second)
| [
"francisco.bento.jr@hotmail.com"
] | francisco.bento.jr@hotmail.com |
9eaa19c9d5828a8c9d3014e6f598ade1b040dc26 | 8be39cae865fa2163c131a34051c4867ad0350a0 | /examples/quickhowto2/app/views.py | 965acaaf1bbf79ecf7beb5b956b8ac0d380fcf32 | [
"BSD-3-Clause"
] | permissive | ben-github/Flask-AppBuilder | fd13f694457ef4fbc8c73f8b0b90083dc5b978bc | e52947f3e4494a84017bf101b19823df91a41448 | refs/heads/master | 2021-01-17T17:52:19.125926 | 2015-01-09T18:13:30 | 2015-01-09T18:13:30 | 25,661,891 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,050 | py | import calendar
from flask import redirect
from flask_appbuilder import ModelView, GroupByChartView, aggregate_count, action
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.models.generic.interface import GenericInterface
from flask_appbuilder.widgets import FormVerticalWidget, FormInlineWidget, FormHorizontalWidget, ShowBlockWidget
from flask_appbuilder.widgets import ListThumbnail
from flask.ext.appbuilder.models.generic import PSSession
from flask_appbuilder.models.generic import PSModel
from flask_appbuilder.models.sqla.filters import FilterStartsWith, FilterEqualFunction as FA
from app import db, appbuilder
from .models import ContactGroup, Gender, Contact, FloatModel, Product, ProductManufacturer, ProductModel
def fill_gender():
try:
db.session.add(Gender(name='Male'))
db.session.add(Gender(name='Female'))
db.session.commit()
except:
db.session.rollback()
sess = PSSession()
class PSView(ModelView):
datamodel = GenericInterface(PSModel, sess)
base_permissions = ['can_list', 'can_show']
list_columns = ['UID', 'C', 'CMD', 'TIME']
search_columns = ['UID', 'C', 'CMD']
class ProductManufacturerView(ModelView):
datamodel = SQLAInterface(ProductManufacturer)
class ProductModelView(ModelView):
datamodel = SQLAInterface(ProductModel)
class ProductView(ModelView):
datamodel = SQLAInterface(Product)
list_columns = ['name','product_manufacturer', 'product_model']
add_columns = ['name','product_manufacturer', 'product_model']
edit_columns = ['name','product_manufacturer', 'product_model']
add_widget = FormVerticalWidget
class ContactModelView2(ModelView):
datamodel = SQLAInterface(Contact)
list_columns = ['name', 'personal_celphone', 'birthday', 'contact_group.name']
add_form_query_rel_fields = {'contact_group':[['name',FilterStartsWith,'p']],
'gender':[['name',FilterStartsWith,'F']]}
class ContactModelView(ModelView):
datamodel = SQLAInterface(Contact)
add_widget = FormVerticalWidget
show_widget = ShowBlockWidget
list_columns = ['name', 'personal_celphone', 'birthday', 'contact_group.name']
list_template = 'list_contacts.html'
list_widget = ListThumbnail
show_template = 'show_contacts.html'
extra_args = {'extra_arg_obj1': 'Extra argument 1 injected'}
base_order = ('name', 'asc')
show_fieldsets = [
('Summary', {'fields': ['name', 'gender', 'contact_group']}),
(
'Personal Info',
{'fields': ['address', 'birthday', 'personal_phone', 'personal_celphone'], 'expanded': False}),
]
add_fieldsets = [
('Summary', {'fields': ['name', 'gender', 'contact_group']}),
(
'Personal Info',
{'fields': ['address', 'birthday', 'personal_phone', 'personal_celphone'], 'expanded': False}),
]
edit_fieldsets = [
('Summary', {'fields': ['name', 'gender', 'contact_group']}),
(
'Personal Info',
{'fields': ['address', 'birthday', 'personal_phone', 'personal_celphone'], 'expanded': False}),
]
@action("muldelete", "Delete", "Delete all Really?", "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class GroupModelView(ModelView):
datamodel = SQLAInterface(ContactGroup)
related_views = [ContactModelView]
show_template = 'appbuilder/general/model/show_cascade.html'
list_columns = ['name', 'extra_col']
class FloatModelView(ModelView):
datamodel = SQLAInterface(FloatModel)
class ContactChartView(GroupByChartView):
datamodel = SQLAInterface(Contact)
chart_title = 'Grouped contacts'
label_columns = ContactModelView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group': 'contact_group.name',
'series': [(aggregate_count, 'contact_group')]
},
{
'group': 'gender',
'series': [(aggregate_count, 'gender')]
}
]
def pretty_month_year(value):
return calendar.month_name[value.month] + ' ' + str(value.year)
def pretty_year(value):
return str(value.year)
class ContactTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Contact)
chart_title = 'Grouped Birth contacts'
chart_type = 'AreaChart'
label_columns = ContactModelView.label_columns
definitions = [
{
'group': 'month_year',
'formatter': pretty_month_year,
'series': [(aggregate_count, 'contact_group')]
},
{
'group': 'year',
'formatter': pretty_year,
'series': [(aggregate_count, 'contact_group')]
}
]
db.create_all()
fill_gender()
appbuilder.add_view(PSView, "List PS", icon="fa-folder-open-o", category="Contacts", category_icon='fa-envelope')
appbuilder.add_view(GroupModelView, "List Groups", icon="fa-folder-open-o", category="Contacts",
category_icon='fa-envelope')
appbuilder.add_view(ContactModelView, "List Contacts", icon="fa-envelope", category="Contacts")
appbuilder.add_view(ContactModelView2, "List Contacts 2", icon="fa-envelope", category="Contacts")
appbuilder.add_view(FloatModelView, "List Float Model", icon="fa-envelope", category="Contacts")
appbuilder.add_separator("Contacts")
appbuilder.add_view(ContactChartView, "Contacts Chart", icon="fa-dashboard", category="Contacts")
appbuilder.add_view(ContactTimeChartView, "Contacts Birth Chart", icon="fa-dashboard", category="Contacts")
appbuilder.add_view(ProductManufacturerView, "List Manufacturer", icon="fa-folder-open-o", category="Products",
category_icon='fa-envelope')
appbuilder.add_view(ProductModelView, "List Models", icon="fa-envelope", category="Products")
appbuilder.add_view(ProductView, "List Products", icon="fa-envelope", category="Products")
appbuilder.security_cleanup()
| [
"danielvazgaspar@gmail.com"
] | danielvazgaspar@gmail.com |
2ce9c7d6e64cc29146bdcc4a83f1918f560de7a4 | 0ee19e1fecf00e464d19c6fcee04f20302986a05 | /api/urls.py | f347d248c40fe3f59d966e5e9688805986a0d2c4 | [] | no_license | arnavsingh31/Todo_App | cfa6c71d5f553276e489a8be246a60511d34714d | ef1de7a3ae75890719b52b57b2cb1bd9c0ae5f75 | refs/heads/master | 2023-04-01T13:42:39.928676 | 2021-04-01T12:49:08 | 2021-04-01T12:49:08 | 353,697,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.apiOverview, name="api-overview"),
path('task-list/', views.taskList, name="task-list"),
path('task-detail/<str:task_id>/', views.taskDetail, name="task-list"),
path('task-create/', views.taskCreate, name="task-create"),
path('task-update/<str:task_id>/', views.taskUpdate, name="task-update"),
path('task-delete/<str:task_id>/', views.taskDelete, name="task-delete"),
]
| [
"arnav@wrath.com"
] | arnav@wrath.com |
b580ef8a45894dddb42df5fb7d06b62f3b76b132 | f062afcfc7da5ef1fae49ef1dbdb33a0edf83086 | /p1/s4.py | d8bbea3c9058c2c8f140987d5430214feaefa723 | [] | no_license | harvestcore/pyweb | 473ea66c9b5dea22836dde966709ee5102330b9e | dbf6b762c33205f33f1ee9daf80dade56880a9f2 | refs/heads/master | 2020-04-01T18:51:53.108458 | 2018-12-18T18:47:10 | 2018-12-18T18:47:10 | 153,520,059 | 0 | 1 | null | 2021-10-10T20:50:33 | 2018-10-17T20:32:54 | Python | UTF-8 | Python | false | false | 666 | py | #!/usr/bin/env python3
import os
def fib(n):
if n <= 1:
return n
else:
return fib(n - 1) + fib(n - 2)
if __name__ == '__main__':
nombre, salida = input("Entrada: "), input("Salida: ")
archivoSalida = 0
if os.path.isfile(salida):
os.remove(salida)
archivoSalida = open(salida, 'x')
else:
archivoSalida = open(salida, 'x')
if not os.path.isfile(nombre):
print("El archivo de entrada no existe.")
else:
archivo = open(nombre, 'r')
numero = int(archivo.read())
archivoSalida.write(str(fib(numero)) + "\n")
archivo.close()
archivoSalida.close()
| [
"aagomezies@gmail.com"
] | aagomezies@gmail.com |
25b1e11ccb86911371d26d18321fc749230e8a50 | ee0c647d3b279654fcca178e2453386a22c2b130 | /app1/migrations/0002_auto_20180117_0403.py | 52951e7728781fb5dde37130e4595707800b6093 | [] | no_license | wntbrian/django1 | c85bdb13a5ce9249bfb6fff7012e68a05a53b541 | c0b49967e954b8186498f837016a3da538172d9d | refs/heads/master | 2021-09-24T10:17:02.346568 | 2018-10-08T01:13:43 | 2018-10-08T01:13:43 | 117,347,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | # Generated by Django 2.0.1 on 2018-01-17 04:03
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('app1', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='artist',
options={'verbose_name': 'артист', 'verbose_name_plural': 'артисты'},
),
migrations.AddField(
model_name='genre',
name='data',
field=models.DateField(blank=True, default=django.utils.timezone.now, verbose_name='дата выхода'),
),
]
| [
"savladimirov@outlook.com"
] | savladimirov@outlook.com |
b8fc0ce65141983f44c8f120c97563e136c8f859 | 028ddc5e85d89c26f8320b70d8ffe80f3d5aec52 | /src/UQpy/dimension_reduction/hosvd/__init__.py | d8afbfb86c2a6ecffe7096d3d4283d48de3b91ba | [
"MIT"
] | permissive | SURGroup/UQpy | 3b516706e9072c6fac80da0bdfbd23e2193f5844 | 9e98a6279aa5a2ec2d6d4c61226c34712547bcc6 | refs/heads/master | 2023-09-04T03:38:35.294389 | 2023-08-04T12:55:02 | 2023-08-04T12:55:02 | 112,795,497 | 215 | 70 | MIT | 2023-09-14T14:18:22 | 2017-12-01T23:05:13 | Python | UTF-8 | Python | false | false | 72 | py | from UQpy.dimension_reduction.hosvd.HigherOrderSVD import HigherOrderSVD | [
"dimtsap@hotmail.com"
] | dimtsap@hotmail.com |
986b5eef319190bbecc0933f46b6cd7507f813a3 | d49c8215cecb13b4f744287d02c22051aa29cbc4 | /gyoseki/migrations/0003_auto_20191103_1208.py | ec0648591d4a66b4686d26d55eea5f5642af80cd | [
"MIT"
] | permissive | yamaken1343/gyoseki-archive | 5bd3d2c684e52918888e9a2bbd9132675884d9c2 | 2dbe9629676799d68cedc701e95efc13d2f95ee1 | refs/heads/master | 2021-08-06T16:29:19.432481 | 2020-03-10T04:54:56 | 2020-03-10T04:54:56 | 245,939,343 | 1 | 0 | MIT | 2021-03-19T23:10:48 | 2020-03-09T03:54:23 | Python | UTF-8 | Python | false | false | 1,125 | py | # Generated by Django 2.2.3 on 2019-11-03 03:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('gyoseki', '0002_auto_20190905_1848'),
]
operations = [
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
],
),
migrations.AlterField(
model_name='recode',
name='note',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AlterField(
model_name='recode',
name='tag',
field=models.ManyToManyField(blank=True, to='gyoseki.Tag'),
),
migrations.AddField(
model_name='recode',
name='language',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='gyoseki.Language'),
),
]
| [
"yamaken1343@gmail.com"
] | yamaken1343@gmail.com |
ac338cfc3c89a67ea205a8d928a5c67a6e1c9083 | f52c6dd8723d6d539f2124732a1c0ced75f64c67 | /UCI RegChecker/main.py | b5154113ea129e0ed3695c4a11e7a5eabcd0a80d | [] | no_license | scienz/UCI_RegChecker | 44bc811b856baec99eb6b22c309a907fd583cbf5 | e80adc35c7782f4bbf15a58a52383ef41437b1bf | refs/heads/master | 2020-07-23T08:23:28.572241 | 2019-09-10T07:57:44 | 2019-09-10T07:57:44 | 207,498,437 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | from website import app
if __name__ == "__main__":
app.run(debug = True)
| [
"noreply@github.com"
] | noreply@github.com |
0cdaae4efa973bf48bb9119247628fc2710ea0f4 | 5a01ac2c9ffff853576d8796362932482ae42ff4 | /custom_components/colorfy/__init__.py | ddf9b6941d739d0a2dd2ac40baf60b4b65b5df23 | [
"Apache-2.0"
] | permissive | peaster/colorfy | b7ab6f602e4f5ec45dd9a27b1a75a511281b681b | ecdd71d61e478d208b852cd40a24e910f7e5ece0 | refs/heads/master | 2023-03-03T12:15:38.344281 | 2021-02-14T00:23:28 | 2021-02-14T00:23:28 | 338,696,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | from homeassistant import core
async def async_setup(hass: core.HomeAssistant, config: dict) -> bool:
"""Set up the Colorfy component."""
# @TODO: Add setup code.
return True
| [
"peasterbrooks@perfectsensedigital.com"
] | peasterbrooks@perfectsensedigital.com |
b27215479e3d24d61d6359a06b0f73e10f8e5cd5 | 9f3213d44c9b8a296f7009377f11c5e5a911b751 | /ps1/Assignment 1: Coinflip/main.py | 09568582f362d9acf0cbda2359b859e4157c021d | [] | no_license | dcordoba/CordAcademy-Python-Assignments | fd7224cd9c1e2f30726294fffeb42131511de431 | bc08fa90ca88037e406c93b2bd4a4eed0d72961c | refs/heads/master | 2016-09-10T23:24:10.084334 | 2014-10-28T06:05:07 | 2014-10-28T06:05:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | """Simulates the flipping of a coin. When this program is run, a coin will be
flipped until 'HEADS' is achieved the given amount of tiems.
The flip_coin method is provided for you. You will use this method to 'flip' a
coin until you get the desired number of 'HEADS' in a row.
After each flip, you should print the result. Once we see the desired number of
'HEADS' in a row, print the total number of coin flips it took to get there and
exit the program.
"""
import random
import sys
# Starter Code
def flip_coin():
"""Simulates flipping a coin once.
This method returns 'HEADS' 50% of the time, and 'TAILS' the other 50%.
"""
sides = ['HEADS', 'TAILS']
return random.choice(sides)
# Your code
def run_simulation(target_heads):
print 'Flipping coin until we get %d HEADS' % target_heads
# TODO: Use flip_coin to simulate flipping a coin until we get target_heads.
if __name__ == '__main__':
"""Parses the args and calls our run_simulation function."""
assert len(sys.argv) > 1, "Missing the desired number of 'HEADS'"
target_heads = int(sys.argv[1])
run_simulation(target_heads)
| [
"david.cordoba222@gmail.com"
] | david.cordoba222@gmail.com |
c6d0ba4381285740fadc5f4ec42aab9a196a5fdb | 1f999da1e948d9a7e8330e4fb2826ff08fb9a7ac | /generate-dataset/dataset-from-prosite.py | 102a4835504cd6152935c058b413cdbfbc3a4076 | [
"MIT"
] | permissive | kalininalab/rinminer | c6c7748a0e2a3dae0a050b68ff5cc12353335f9d | 5fcbbcf6cc3d8d6343a83b2cbea6154f3350eff0 | refs/heads/master | 2023-05-02T20:17:56.264070 | 2021-05-02T14:49:45 | 2021-05-02T14:49:45 | 261,425,189 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,159 | py | #!/usr/bin/python3
import argparse
import os, sys
import re
import gzip
import errno
from Bio import SeqIO
import pisces
import tempfile
import shutil
from urllib.request import urlretrieve
def pattern_to_regex(pattern):
pattern = pattern.replace('-','')
pattern = re.sub('\[([^[]*)>([^]]*)\]', '([\g<1>\g<2>]|$)', pattern)
pattern = re.sub('\[([^[]*)<([^]]*)\]', '([\g<1>\g<2>]|^)', pattern)
pattern = pattern.replace('<','^').replace('>','$')
pattern = pattern.replace('{','[^').replace('}',']')
pattern = re.sub('\(([0-9,]*)\)', '{\g<1>}', pattern)
pattern = pattern.replace('x','.')
pattern = '.*' + pattern + '.*'
return pattern
def get_chains_for_pattern(pattern, pdb_path):
regex = pattern_to_regex(pattern)
r = re.compile(regex)
include_chains = []
with gzip.open(pdb_path, 'rt') as pdb_file:
for record in SeqIO.parse(pdb_file, "pdb-seqres"):
chain = record.annotations["chain"]
chain_seq = str(record.seq).strip()
if r.match(chain_seq):
include_chains.append(chain)
return include_chains
def get_chains_for_refs(refs, pdb_path):
include_chains = []
with gzip.open(pdb_path, 'rt') as pdb_file:
for line in pdb_file:
line = line.strip()
if line.startswith('DBREF'):
chain = line[12]
ref = line[33:42].strip()
if ref in refs:
include_chains.append(chain)
return include_chains
parser = argparse.ArgumentParser(description='Generate a dataset based on Prosite patterns')
parser.add_argument('blastdb_path', metavar='blastdb_path', help='Path to PISCES BLASTDB directory')
parser.add_argument('prosite_path', metavar='prosite.dat', help='Path to prosite.dat')
parser.add_argument('target_dir', metavar='dataset_dir', help='Directory in which the data set is created')
parser.add_argument('-m', '--max-res', type=float, default=3.0, help='Maximum resolution')
parser.add_argument('-r', '--max-r-value', type=float, default=1.0, help='Maximum R-value')
parser.add_argument('-l', '--min-length', type=int, default=0, help='Maximum length')
parser.add_argument('-i', '--max-identity', type=int, default=50, help='Maximum identity')
parser.add_argument('-e', '--min-entries', type=int, default=3, help='Minimum number of entries in a family')
parser.add_argument('-p', '--pdb-mirror', help='Local PDB mirror path instead of downloading files from the RCSB PDB webserver.')
args = parser.parse_args()
def create_pdb_file(pdb_id, path):
if args.pdb_mirror:
mirror_path = args.pdb_mirror + '/data/structures/divided/pdb/' + pdb_id.lower()[1:3] + '/pdb' + pdb_id.lower() + '.ent.gz'
if os.path.isfile(mirror_path):
try:
os.remove(path)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
pass
os.symlink(mirror_path, path)
return
else:
print(mirror_path)
sys.stderr.write('PDB ID \'{}\' not found in local database, attempting download.\n'.format(pdb_id))
urlretrieve ('http://files.rcsb.org/download/' + pdb_id + '.pdb.gz', path)
# used to filter out similar structures
pisces = pisces.Pisces(args.blastdb_path, args.max_res, args.max_r_value, args.min_length, args.max_identity)
# parse prosite entries
entries = []
with open(args.prosite_path, 'rt') as prosite_file:
entry_id = None
entry_type = None
for line in prosite_file:
line = line.strip()
line_type = line[0:2]
if line_type == 'ID':
entry = line[2:].split(';')
entry_id = entry[0].strip()
entry_type = entry[1].rstrip('.').strip()
pattern = ''
structures = []
refs = []
elif line_type == 'AC':
acc = line[3:].rstrip(';').strip()
elif line_type == 'PA':
pattern += line[3:].rstrip('.').strip()
elif line_type == 'DR':
entry = filter(None, line[3:].split(';'))
for x in entry:
ref = [r.strip() for r in x.split(',')]
# only use true positives
if ref[2] == 'T':
refs.append(ref[0])
elif line_type == '3D':
structures = list(filter(None, map(str.strip, line[2:].split(';'))))
elif line_type == '//':
if entry_type == 'PATTERN' and len(structures) > 0 and len(refs) > 0:
entries.append((entry_id, acc, pattern, set(refs), set(structures)))
entry_id = None
entry_type = None
if entry_type == 'PATTERN' and len(structures) > 0 and len(refs) > 0:
entries.append((entry_id, acc, pattern, set(refs), set(structures)))
tmp_dir = tempfile.TemporaryDirectory()
pdb_files = {}
for num, entry in enumerate(entries, 1):
name, accession_number, pattern, refs, structures = entry
chains = []
print('{}/{}: {} ({}); {}'.format(num, len(entries), name, accession_number, pattern))
# determine matching chains, because prosite only tells us the pdb id
for pdb_id in structures:
if pdb_id not in pdb_files:
try:
pdb_file_path = tmp_dir.name + '/' + pdb_id + '.pdb.gz'
create_pdb_file (pdb_id, pdb_file_path)
pdb_files[pdb_id] = pdb_file_path
except Exception as e:
sys.stderr.write('Structure \'{}\' could not be found: {}\n'.format(pdb_id, e))
continue
else:
pdb_file_path = pdb_files[pdb_id]
try:
#struct_chains_pat = get_chains_for_pattern(pattern, pdb_id)
#if len(struct_chains_pat) == 0:
#sys.stderr.write('Pattern \'{}\' not found in structure \'{}\'\n'.format(pattern, pdb_id))
#sys.exit(1)
struct_chains = get_chains_for_refs(refs, pdb_file_path)
#if len(struct_chains) == 0:
#sys.stderr.write('No matching chains found in structure \'{}\'\n'.format(pdb_id))
#sys.exit(1)
#if len(struct_chains) != len(struct_chains_pat):
#sys.stderr.write('Pattern/references mismatch ({}/{}) for pattern \'{}\' in structure \'{}\'\n'.format(len(struct_chains_pat), len(struct_chains), pattern, pdb_id))
#sys.exit(1)
struct_chains = [pdb_id + struct_chain for struct_chain in struct_chains]
chains.extend(struct_chains)
except IOError:
sys.stderr.write('%s not found, skipping\n' % pdb_id)
keep, stats = pisces.filter(chains)
print('{}/{} chains with at most {}% identity. {} culled for experimental reasons, {} culled for identity'.format(len(keep), len(chains), args.max_identity, stats[0], stats[1]))
if len(keep) < args.min_entries:
sys.stderr.write('Skipping \'{}\': Too many structures culled\n'.format(accession_number))
print('----------')
continue
# create data set
family_dir = args.target_dir + '/' + accession_number
try:
os.makedirs(family_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
name_file = open(family_dir + '/family_name.txt', 'w')
name_file.write(name)
name_file.close()
pattern_file = open(family_dir + '/pattern.txt', 'w')
pattern_file.write(pattern)
pattern_file.close()
for struct in keep:
pdb_id = struct[:4]
chain = struct[4:]
full_name = pdb_id + '_' + chain
dir_path = family_dir + '/' + full_name
try:
os.makedirs(dir_path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
segments_file = open(dir_path + '/' + pdb_id + '.seg', 'w')
segments_file.write('0,{},_,_'.format(chain))
segments_file.close()
pdb_file_path = dir_path + '/' + pdb_id + '.pdb.gz'
shutil.copyfile(pdb_files[pdb_id], pdb_file_path, follow_symlinks=False)
print('----------')
tmp_dir.cleanup()
| [
"skeller@mpi-inf.mpg.de"
] | skeller@mpi-inf.mpg.de |
f52733a5e4b5b8f4b1f1749c808892ba40ca8891 | 72f4e03942f45939fbd95068e538ef29061efa31 | /room_access/controllers/user_controller.py | bf02ab8045afbf8e897bc8a1b04f7d6aaf99e2a7 | [] | no_license | MarkerViktor/tpu_room_access_via_telegram | 6d3cb094c9093928c5013fd24501224f8464f310 | 3cce75f8d1802b98024c31dc85b3805cc85f7c6c | refs/heads/master | 2023-03-18T02:39:02.731979 | 2021-03-19T22:27:05 | 2021-03-19T22:27:05 | 288,744,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,037 | py | import re
from typing import Tuple
from telebot import types
from room_access.app import bot
from room_access.services import user_service, exceptions
from room_access.controllers.utils import admin_required
@bot.message_handler(commands=['users_list'])
@admin_required
def users_list(message: types.Message):
"""Отвечает сообщением со списком всех пользователей и их ID"""
users: Tuple[user_service.UserInfo] = user_service.get_all_users()
answer_string = f"*Всего пользователей — {len(users)}:*\n" \
"`\{user\_id\} : \{last\_name\} \{first\_name\}`\n"
for user in users:
answer_string += f'{user.id} : {user.last_name} {user.first_name}\n'
bot.send_message(chat_id=message.chat.id, text=answer_string, parse_mode='MarkdownV2')
@bot.message_handler(commands=['new_user'])
@admin_required
def new_user(message: types.Message):
"""Создает нового пользователя с указанными фамилией и именем"""
# Имя или фамилия дожны начинаться с заглавной буквы,
# содержать только кирилицу, иметь максимальную длину - 75 символов.
# Пример команды: /new_user Маркер Виктор
if not re.fullmatch(r'^/new_user [А-Я][а-я]{0,74} [А-Я][а-я]{0,74}$', message.text):
bot.reply_to(message,
text='*Неверная команда\!*\n`\/new\_user \{first\_name\} \{last\_name\}`\n'
'Имя или фамилия нового пользователя дожны\n'
'– начинаться с __заглавной буквы__,\n'
'– содержать только __кирилицу__,\n'
'– иметь максимальную длину __75 символов__\.',
parse_mode='MarkdownV2')
return None
try:
user = user_service.get_new_user(command_string=message.text)
answer_text = 'Пользователь успешно создан.'
except exceptions.AlreadyExist:
answer_text = 'Пользователь с заданным сочетанием имени и фамилии уже существует!'
bot.send_message(chat_id=message.chat.id, text=answer_text)
@bot.message_handler(commands=['delete_user'])
@admin_required
def delete_user(message: types.Message):
"""Удаляет пользователя по ID"""
# ID пользователя может быть только числом.
# Пример: /delete_user 12
if not re.fullmatch(r'^/delete_user [0-9]+$', message.text):
bot.reply_to(message, '*Неверная команда\!*\n`\/delete\_user \{user\_id\}`\n'
'ID пользователя можно узнать с помощью команды \/users\_list',
parse_mode='MarkdownV2')
return None
try:
user_info = user_service.delete_user(command_string=message.text)
answer_text = f'Удален пользователь:\n' \
f'{user_info.last_name} {user_info.first_name}'
except exceptions.NotExist:
answer_text = 'Пользователь с заданным ID не существует!'
except exceptions.BadNumberOfArgs:
answer_text = 'Неверное количество аргументов команды!'
except exceptions.BadArgsTypes:
answer_text = 'ID пользователя должно быть числом!'
bot.send_message(chat_id=message.chat.id, text=answer_text)
@bot.message_handler(regexp=r"^/setup_user_model$")
@admin_required
def setup_user_model(message: types.Message):
bot.send_message(chat_id=message.chat.id,
text='Функционал недоступен!')
| [
"MarkerViktor@outlook.com"
] | MarkerViktor@outlook.com |
b4524a2c6c4dec9afdd81e0de0712e0042927eb8 | 3950cb348a4a3ff6627d502dbdf4e576575df2fb | /.venv/Lib/site-packages/numba/np/ufunc/sigparse.py | a54df0e25537c1d62b56201d92da6306fa0fa4ba | [] | no_license | Bdye15/Sample_Programs | a90d288c8f5434f46e1d266f005d01159d8f7927 | 08218b697db91e55e8e0c49664a0b0cb44b4ab93 | refs/heads/main | 2023-03-02T04:40:57.737097 | 2021-01-31T03:03:59 | 2021-01-31T03:03:59 | 328,053,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,909 | py | import tokenize
import string
def parse_signature(sig):
'''Parse generalized ufunc signature.
NOTE: ',' (COMMA) is a delimiter; not separator.
This means trailing comma is legal.
'''
def stripws(s):
return ''.join(c for c in s if c not in string.whitespace)
def tokenizer(src):
def readline():
yield src
gen = readline()
return tokenize.generate_tokens(lambda: next(gen))
def parse(src):
tokgen = tokenizer(src)
while True:
tok = next(tokgen)
if tok[1] == '(':
symbols = []
while True:
tok = next(tokgen)
if tok[1] == ')':
break
elif tok[0] == tokenize.NAME:
symbols.append(tok[1])
elif tok[1] == ',':
continue
else:
raise ValueError('bad token in signature "%s"' % tok[1])
yield tuple(symbols)
tok = next(tokgen)
if tok[1] == ',':
continue
elif tokenize.ISEOF(tok[0]):
break
elif tokenize.ISEOF(tok[0]):
break
else:
raise ValueError('bad token in signature "%s"' % tok[1])
ins, _, outs = stripws(sig).partition('->')
inputs = list(parse(ins))
outputs = list(parse(outs))
# check that all output symbols are defined in the inputs
isym = set()
osym = set()
for grp in inputs:
isym |= set(grp)
for grp in outputs:
osym |= set(grp)
diff = osym.difference(isym)
if diff:
raise NameError('undefined output symbols: %s' % ','.join(sorted(diff)))
return inputs, outputs
| [
"brady.dye@bison.howard.edu"
] | brady.dye@bison.howard.edu |
21badb5ac99248ea98316bd4a2e48df1b9fd38e5 | 155649b574de128db3379bd9d1961de84944cc11 | /venv/bin/wheel | 71efa28a920f8bfc0fe7caa30a4b3c36dc128d4b | [] | no_license | priyambansal/SIH | a28231e51150c17426e961b85060f890e21ff8d3 | 77412cc972db7758c051ed40407397b0f8f0f5d8 | refs/heads/master | 2021-04-06T13:22:15.165531 | 2018-06-06T08:32:04 | 2018-06-06T08:32:04 | 125,324,960 | 2 | 0 | null | 2018-03-27T18:58:31 | 2018-03-15T06:52:48 | Python | UTF-8 | Python | false | false | 232 | #!/home/ananya/SIH/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"keshariananya@gmail.com"
] | keshariananya@gmail.com | |
49cca9864699de57f4f43c3760ffbbd5a13e5479 | a140f5ebfe2e589cb699243c947636ac00017a95 | /compute/openstack.py | 990debc528e945cf7f8ad36c11d752dbf83c6020 | [
"MIT"
] | permissive | wspspring/cephci | b0f8562361a109d53558ab2c297c049024b2f2ac | 24145df7c415287215ba11e614646f533faaa4ac | refs/heads/master | 2023-08-31T15:52:07.984900 | 2021-10-08T11:31:24 | 2021-10-08T11:31:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,648 | py | """Support VM lifecycle operation in an OpenStack Cloud."""
import logging
import socket
from datetime import datetime, timedelta
from time import sleep
from typing import List, Optional, Union
from uuid import UUID
from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeSize
from libcloud.compute.drivers.openstack import (
OpenStack_2_NodeDriver,
OpenStackNetwork,
StorageVolume,
)
from libcloud.compute.providers import get_driver
from libcloud.compute.types import Provider
LOG = logging.getLogger()
# libcloud does not have a timeout enabled for Openstack calls to
# ``create_node``, and it uses the default timeout value from socket which is
# ``None`` (meaning: it will wait forever). This setting will set the default
# to a magical number, which is 280 (4 minutes). This is 1 minute less than the
# timeouts for production settings that should allow enough time to handle the
# exception and return a response
socket.setdefaulttimeout(280)
def get_openstack_driver(
username: str,
password: str,
auth_url: str,
auth_version: str,
tenant_name: str,
tenant_domain_id: str,
service_region: str,
domain_name: str,
api_version: Optional[str] = "2.2",
) -> Union[NodeDriver, OpenStack_2_NodeDriver]:
"""
Return the client that can interact with the OpenStack cloud.
Args:
username: The name of the user to be set for the session.
password: The password of the provided user.
auth_url: The endpoint that can authenticate the user.
auth_version: The API version to be used for authentication.
tenant_name: The name of the user's project.
tenant_domain_id: The ID of the user's project.
service_region: The realm to be used.
domain_name: The authentication domain to be used.
api_version: The API Version to be used for communication.
"""
openstack = get_driver(Provider.OPENSTACK)
return openstack(
username,
password,
api_version=api_version,
ex_force_auth_url=auth_url,
ex_force_auth_version=auth_version,
ex_tenant_name=tenant_name,
ex_force_service_region=service_region,
ex_domain_name=domain_name,
ex_tenant_domain_id=tenant_domain_id,
)
# Custom exception objects
class ResourceNotFound(Exception):
pass
class ExactMatchFailed(Exception):
pass
class VolumeOpFailure(Exception):
pass
class NetworkOpFailure(Exception):
pass
class NodeError(Exception):
pass
class NodeDeleteFailure(Exception):
pass
class CephVMNodeV2:
"""Represent the VMNode required for cephci."""
def __init__(
self,
username: str,
password: str,
auth_url: str,
auth_version: str,
tenant_name: str,
tenant_domain_id: str,
service_region: str,
domain_name: str,
node_name: Optional[str] = None,
) -> None:
"""
Initialize the instance using the provided information.
The co
Args:
username: The name of the user to be set for the session.
password: The password of the provided user.
auth_url: The endpoint that can authenticate the user.
auth_version: The version to be used for authentication.
tenant_name: The name of the user's project.
tenant_domain_id: The ID of the user's project.
service_region: The realm to be used.
domain_name: The authentication domain to be used.
node_name: The name of the node to be retrieved.
"""
self.driver = get_openstack_driver(
username=username,
password=password,
auth_url=auth_url,
auth_version=auth_version,
tenant_name=tenant_name,
tenant_domain_id=tenant_domain_id,
service_region=service_region,
domain_name=domain_name,
)
self.node: Optional[Node] = None
# CephVM attributes
self._subnet: list = list()
self._roles: list = list()
# Fixme: determine if we can pick this information for OpenStack.
self.root_login: str
self.osd_scenario: int
self.keypair: Optional[str] = None
if node_name:
self.node = self._get_node(name=node_name)
def create(
self,
node_name: str,
image_name: str,
vm_size: str,
cloud_data: str,
vm_network: Optional[Union[List, str]] = None,
size_of_disks: int = 0,
no_of_volumes: int = 0,
) -> None:
"""
Create the instance with the provided data.
Args:
node_name: Name of the VM.
image_name: Name of the image to use for creating the VM.
vm_size: Flavor to be used to create the VM
vm_network: Name of the network/s
cloud_data: The cloud-init configuration information
size_of_disks: The storage capacity of the volumes
no_of_volumes: The number of volumes to be attached.
"""
LOG.info("Starting to create VM with name %s", node_name)
try:
image = self._get_image(name=image_name)
vm_size = self._get_vm_size(name=vm_size)
vm_network = self._get_network(vm_network)
self.node = self.driver.create_node(
name=node_name,
image=image,
size=vm_size,
ex_userdata=cloud_data,
ex_config_drive=True,
networks=vm_network,
)
self._wait_until_vm_state_running()
if no_of_volumes:
self._create_attach_volumes(no_of_volumes, size_of_disks)
except (ResourceNotFound, NetworkOpFailure, NodeError, VolumeOpFailure):
raise
except BaseException as be: # noqa
LOG.error(be, exc_info=True)
raise NodeError(f"Unknown error. Failed to create VM with name {node_name}")
# Ideally, we should be able to use HEAD to check if self.node is stale or not
# instead of pulling the node details always. As a workaround, the self.node
# is assigned the latest information after create is complete.
self.node = self.driver.ex_get_node_details(node_id=self.node.id)
def delete(self) -> None:
"""Remove the VM from the given OpenStack cloud."""
# Deleting of the node when in building or pending state will fail. We are
# checking for pending state as BUILD & PENDING map to the same value in
# libcloud module.
if self.node is None:
return
# Gather the current details of the node.
self.node = self.driver.ex_get_node_details(node_id=self.node.id)
if self.node.state == "pending":
raise NodeDeleteFailure(f"{self.node.name} cannot be deleted.")
logging.info("Removing the instance with name %s", self.node.name)
for ip in self.floating_ips:
self.driver.ex_detach_floating_ip_from_node(self.node, ip)
# At this point self.node is stale
for vol in self.volumes:
self.driver.detach_volume(volume=vol)
self.driver.destroy_volume(volume=vol)
self.driver.destroy_node(self.node)
self.node = None
def get_private_ip(self) -> str:
"""Return the private IP address of the VM."""
return self.node.private_ips[0] if self.node else ""
# Private methods to the object
def _get_node(self, name: str) -> Node:
"""
Retrieve the Node object using the provided name.
The artifacts that are retrieved are
- volumes
- ip address
- hostname
- node_name
- subnet
Args:
name: The name of the node whose details need to be retrieved.
Return:
Instance of the Node retrieved using the provided name.
"""
url = f"/servers?name={name}"
object_ = self.driver.connection.request(url).object
servers = object_["servers"]
if len(servers) != 1:
raise ExactMatchFailed(
f"Found none or more than one resource with name: {name}"
)
return self.driver.ex_get_node_details(servers[0]["id"])
def _get_image(self, name: str) -> NodeImage:
"""
Return a NodeImage instance using the provided name.
Args:
name: The name of the image to be retrieved.
Return:
NodeImage instance that is referenced by the image name.
Raises:
ExactMatchFailed - when the named image resource does not exist in the given
OpenStack cloud.
"""
try:
if UUID(hex=name):
return self.driver.get_image(name)
except ValueError:
LOG.debug("Given name is not an image ID")
url = f"/v2/images?name={name}"
object_ = self.driver.image_connection.request(url).object
images = self.driver._to_images(object_, ex_only_active=False)
if len(images) != 1:
raise ExactMatchFailed(
f"Found none or more than one image resource with name: {name}"
)
return images[0]
def _get_vm_size(self, name: str) -> NodeSize:
"""
Return a NodeSize instance found using the provided name.
Args:
name: The name of the VM size to be retrieved.
Example:
m1.small, m1.medium or m1.large
Return:
NodeSize instance that is referenced by the vm size name.
Raises:
ResourceNotFound - when the named vm size resource does not exist in the
given OpenStack Cloud.
"""
for flavor in self.driver.list_sizes():
if flavor.name == name:
return flavor
raise ResourceNotFound(f"Failed to retrieve vm size with name: {name}")
def _get_network_by_name(self, name: str) -> OpenStackNetwork:
"""
Retrieve the OpenStackNetwork instance using the provided name.
Args:
name: the name of the network.
Returns:
OpenStackNetwork instance referenced by the name.
Raises:
ResourceNotFound: when the named network resource does not exist in the
given OpenStack cloud
"""
url = f"{self.driver._networks_url_prefix}?name={name}"
object_ = self.driver.network_connection.request(url).object
networks = self.driver._to_networks(object_)
if not networks:
raise ResourceNotFound(f"No network resource with name {name} found.")
return networks[0]
def _has_free_ip_addresses(self, net: OpenStackNetwork) -> bool:
"""
Return True if the given network has more than 3 free ip addresses.
This buffer of 3 free IPs is in place to avoid failures during node creation.
As in OpenStack, the private IP request for allocation occurs towards the end
of the workflow.
When a subnet with free IPs is identified then it's CIDR information is
assigned to self.subnet attribute on this object.
Arguments:
net: The OpenStackNetwork instance to be checked for IP availability.
Returns:
True on success else False
"""
url = f"/v2.0/network-ip-availabilities/{net.id}"
resp = self.driver.network_connection.request(url)
subnets = resp.object["network_ip_availability"]["subnet_ip_availability"]
for subnet in subnets:
free_ips = subnet["total_ips"] - subnet["used_ips"]
if free_ips > 3:
self._subnet.append(subnet["cidr"])
return True
return False
def _get_network(
self, name: Optional[Union[List, str]] = None
) -> List[OpenStackNetwork]:
"""
Return the first available OpenStackNetwork with a free IP address to lease.
This method will search a preconfigured list of network names and return the
first one that has more than 3 IP addresses to lease. One can override the
preconfigured list by specifying a single network name.
Args:
name: (Optional), the network name to be retrieved in place of the default
list of networks.
Returns:
OpenStackNetwork instance that has free IP addresses to lease.
Raises:
ResourceNotFound when there no suitable networks in the environment.
"""
default_network_names = [
"provider_net_cci_12",
"provider_net_cci_11",
"provider_net_cci_9",
"provider_net_cci_8",
"provider_net_cci_7",
"provider_net_cci_6",
"provider_net_cci_5",
"provider_net_cci_4",
]
default_network_count = 1
if name:
network_names = name if isinstance(name, list) else [name]
default_network_count = len(network_names)
else:
network_names = default_network_names
rtn_nets = list()
for net in network_names:
# Treating an exception as a soft error as it is possible to find another
# suitable network from the list.
try:
os_net = self._get_network_by_name(name=net)
if not self._has_free_ip_addresses(net=os_net):
continue
rtn_nets.append(os_net)
if len(rtn_nets) == default_network_count:
return rtn_nets
except BaseException as be: # noqa
LOG.warning(be)
continue
raise ResourceNotFound(f"No networks had free IP addresses: {network_names}.")
def _wait_until_vm_state_running(self):
"""Wait till the VM moves to running state."""
start_time = datetime.now()
end_time = start_time + timedelta(seconds=1200)
node = None
while end_time > datetime.now():
sleep(5)
node = self.driver.ex_get_node_details(self.node.id)
if node.state == "running":
end_time = datetime.now()
duration = (end_time - start_time).total_seconds()
LOG.info(
"%s moved to running state in %d seconds.",
self.node.name,
int(duration),
)
return
if node.state == "error":
msg = (
"Unknown Error"
if not node.extra
else node.extra.get("fault").get("message")
)
raise NodeError(msg)
raise NodeError(f"{node.name} is in {node.state} state.")
def _create_attach_volumes(self, no_of_volumes: int, size_of_disk: int) -> None:
"""
Create and attach the volumes.
This method creates the requested number of volumes and then checks if each
volume has moved to available state. Once the volume has moved to available,
then it is attached to the node.
Args:
no_of_volumes: The number of volumes to be created.
size_of_disk: The storage capacity of the volume in GiB.
"""
LOG.info(
"Creating %d volumes with %sGiB storage for %s",
no_of_volumes,
size_of_disk,
self.node.name,
)
volumes = list()
for item in range(0, no_of_volumes):
vol_name = f"{self.node.name}-vol-{item}"
volume = self.driver.create_volume(size_of_disk, vol_name)
if not volume:
raise VolumeOpFailure(f"Failed to create volume with name {vol_name}")
volumes.append(volume)
for _vol in volumes:
if not self._wait_until_volume_available(_vol):
raise VolumeOpFailure(f"{_vol.name} failed to become available.")
for _vol in volumes:
if not self.driver.attach_volume(self.node, _vol):
raise VolumeOpFailure("Unable to attach volume %s", _vol.name)
def _wait_until_ip_is_known(self):
"""Retrieve the IP address of the VM node."""
end_time = datetime.now() + timedelta(seconds=120)
while end_time > datetime.now():
self.node = self.driver.ex_get_node_details(self.node.id)
if self.ip_address is not None:
break
sleep(5)
raise NetworkOpFailure("Unable to get IP for {}".format(self.node.name))
def _wait_until_volume_available(self, volume: StorageVolume) -> bool:
"""Wait until the state of the StorageVolume is available."""
tries = 0
while True:
sleep(3)
tries += 1
volume = self.driver.ex_get_volume(volume.id)
if volume.state.lower() == "available":
return True
if "error" in volume.state.lower():
LOG.error("%s state is %s", volume.name, volume.state)
break
if tries > 10:
LOG.error("Max retries for %s reached.", volume.name)
break
return False
def _get_subnet_cidr(self, id_: str) -> str:
"""Return the CIDR information of the given subnet id."""
url = f"{self.driver._subnets_url_prefix}/{id_}"
object_ = self.driver.network_connection.request(url).object
subnet = self.driver._to_subnet(object_)
if not subnet:
raise ResourceNotFound("No matching subnet found.")
return subnet.cidr
# properties
@property
def ip_address(self) -> str:
"""Return the private IP address of the node."""
if self.node is None:
return ""
if self.node.public_ips:
return self.node.public_ips[0]
return self.node.private_ips[0]
@property
def floating_ips(self) -> List[str]:
"""Return the list of floating IP's"""
return self.node.public_ips if self.node else []
@property
def public_ip_address(self) -> str:
"""Return the public IP address of the node."""
return self.node.public_ips[0]
@property
def hostname(self) -> str:
"""Return the hostname of the VM."""
end_time = datetime.now() + timedelta(seconds=30)
while end_time > datetime.now():
try:
name, _, _ = socket.gethostbyaddr(self.ip_address)
if name is not None:
return name
except socket.herror:
break
except BaseException as be: # noqa
LOG.warning(be)
sleep(5)
return self.node.name
@property
def volumes(self) -> List[StorageVolume]:
"""Return the list of storage volumes attached to the node."""
if self.node is None:
return []
return [
self.driver.ex_get_volume(vol["id"])
for vol in self.node.extra.get("volumes_attached", [])
]
@property
def subnet(self) -> str:
"""Return the subnet information."""
if self.node is None:
return ""
if self._subnet:
return self._subnet[0]
networks = self.node.extra.get("addresses")
for network in networks:
net = self._get_network_by_name(name=network)
subnet_id = net.extra.get("subnets")
self._subnet.append(self._get_subnet_cidr(subnet_id))
# Fixme: The CIDR returned needs to be part of the required network.
return self._subnet[0]
@property
def shortname(self) -> str:
"""Return the shortform of the hostname."""
return self.hostname.split(".")[0]
@property
def no_of_volumes(self) -> int:
"""Return the number of volumes attached to the VM."""
return len(self.volumes)
@property
def role(self) -> List:
"""Return the Ceph roles of the instance."""
return self._roles
@role.setter
def role(self, roles: list) -> None:
"""Set the roles for the VM."""
from copy import deepcopy
self._roles = deepcopy(roles)
| [
"ckulal@redhat.com"
] | ckulal@redhat.com |
bc0ad0f7ec39d42a50304cbfb1480cfe527a4b4f | d4df738d2066c5222080e043a95a9b230673af81 | /course_512/3.6_API/problem_3.6.4.py | fd758a474fa3c86d4e73a0aa1cafbcef08e81973 | [] | no_license | kazamari/Stepik | c2277f86db74b285e742854f1072897f371e87f5 | bf0224a4c4e9322e481263f42451cd263b10724c | refs/heads/master | 2021-05-04T19:06:02.110827 | 2018-03-26T09:06:09 | 2018-03-26T09:06:09 | 105,513,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,756 | py | '''
В этой задаче вам необходимо воспользоваться API сайта artsy.net
API проекта Artsy предоставляет информацию о некоторых деятелях искусства, их работах, выставках.
В рамках данной задачи вам понадобятся сведения о деятелях искусства (назовем их, условно, художники).
Вам даны идентификаторы художников в базе Artsy.
Для каждого идентификатора получите информацию о имени художника и годе рождения.
Выведите имена художников в порядке неубывания года рождения. В случае если у художников одинаковый год рождения,
выведите их имена в лексикографическом порядке.
Работа с API Artsy
Полностью открытое и свободное API предоставляют совсем немногие проекты. В большинстве случаев, для получения доступа
к API необходимо зарегистрироваться в проекте, создать свое приложение, и получить уникальный ключ (или токен),
и в дальнейшем все запросы к API осуществляются при помощи этого ключа.
Чтобы начать работу с API проекта Artsy, вам необходимо пройти на стартовую страницу документации к API
https://developers.artsy.net/start и выполнить необходимые шаги, а именно зарегистрироваться, создать приложение,
и получить пару идентификаторов Client Id и Client Secret. Не публикуйте эти идентификаторы.
После этого необходимо получить токен доступа к API. На стартовой странице документации есть примеры того, как можно
выполнить запрос и как выглядит ответ сервера. Мы приведем пример запроса на Python.
import requests
import json
client_id = '...'
client_secret = '...'
# инициируем запрос на получение токена
r = requests.post("https://api.artsy.net/api/tokens/xapp_token",
data={
"client_id": client_id,
"client_secret": client_secret
})
# разбираем ответ сервера
j = json.loads(r.text)
# достаем токен
token = j["token"]
Теперь все готово для получения информации о художниках. На стартовой странице документации есть пример того, как
осуществляется запрос и как выглядит ответ сервера. Пример запроса на Python.
# создаем заголовок, содержащий наш токен
headers = {"X-Xapp-Token" : token}
# инициируем запрос с заголовком
r = requests.get("https://api.artsy.net/api/artists/4d8b92b34eb68a1b2c0003f4", headers=headers)
# разбираем ответ сервера
j = json.loads(r.text)
Примечание:
В качестве имени художника используется параметр sortable_name в кодировке UTF-8.
Пример входных данных:
4d8b92b34eb68a1b2c0003f4
537def3c139b21353f0006a6
4e2ed576477cc70001006f99
Пример выходных данных:
Abbott Mary
Warhol Andy
Abbas Hamra
Примечание для пользователей Windows
При открытии файла для записи на Windows по умолчанию используется кодировка CP1251, в то время как для записи имен на
сайте используется кодировка UTF-8, что может привести к ошибке при попытке записать в файл имя с необычными символами.
Вы можете использовать print, или аргумент encoding функции open.
'''
import requests
import json
client_id = '8e3ae03a8bf8050b30c9'
client_secret = 'd3a41eb062e10a397dbcab18b31b317f'
# инициируем запрос на получение токена
r = requests.post("https://api.artsy.net/api/tokens/xapp_token",
data={
"client_id": client_id,
"client_secret": client_secret
}, verify=False)
# разбираем ответ сервера
j = json.loads(r.text)
# достаем токен
token = j["token"]
# создаем заголовок, содержащий наш токен
headers = {"X-Xapp-Token": token}
artists = []
with open('dataset_24476_4.txt', 'r') as f:
for line in f:
# инициируем запрос с заголовком
res = requests.get("https://api.artsy.net/api/artists/{}".format(line.strip()), headers=headers, verify=False)
res.encoding = 'utf-8'
j = res.json()
artists.append((j['birthday'], j['sortable_name']))
with open('test_24476_4.txt', 'w', encoding="utf-8") as file:
for bd, name in sorted(artists):
file.write(name + '\n') | [
"maha_on@yahoo.com"
] | maha_on@yahoo.com |
f5c721c8f68c4f1e1acca8588ea9de5a6ee51dab | 379fc4e0e98a7575b93ca4f60d1301adb5c155e3 | /morphling/cape_modules/signatures/app_presence_reg.py | d7f28470c2b5fe638565059e17200fd2e57e422d | [] | no_license | vangeance666/morphling_all | 7c386551dd09835268d1caf9c645cf76ede4d078 | 288b69b3e47f4585decfba980889b365d717d0ab | refs/heads/main | 2023-06-29T02:22:29.070904 | 2021-08-02T12:51:50 | 2021-08-02T12:51:50 | 391,045,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,890 | py | import json
import os
import re
from configs.config import *
from .signature import Signature
class AppPresenceReg(Signature):
name = "app_presence_reg"
description = "To extract chocolatey packages to install based on registry keys found"
severity = -1
categories = ["context"]
authors = ["boomer_kang"]
def __init__(self, results=[]):
self.results = results
self.data = []
def run(self) -> bool:
choco_sig_file = '/home/cape/Desktop/signatures/registry.json'
if not os.path.isfile(choco_sig_file):
self.data.append("Choco registry signature file not found.")
return True
if os.stat(choco_sig_file).st_size == 0:
self.data.append("Choco registry signature file is empty.")
return True
try:
with open(choco_sig_file, 'r', encoding="UTF-8", errors='ignore') as f:
pkgs_sig = json.load(f)
except:
self.data.append("Error loading json signature file.")
return True
store = set() #To make sure unique entries
for pkg in pkgs_sig:
if not all(K in pkg for K in ('package_name', 'version', 'signatures')):
continue
for pkg_reg_keys in pkg['signatures']:
for keys in self.results['behavior']['summary']['keys']:
try:
if re.match(pkg_reg_keys, keys, flags=re.IGNORECASE):
dict_str = "{{'package_name': '{}', 'version': '{}'}}".format(pkg['package_name'], pkg['version'])
store.add(dict_str)
except:
continue
if store:
self.data = [{"package_found": dict_str} for dict_str in store]
return True
return False
| [
"1902132@sit.singaporetech.edu.sg"
] | 1902132@sit.singaporetech.edu.sg |
f989b3d66de8c67aa344abf7820979188fa72ac8 | ea9f2bb8ba03ac8dff039c448d6288eed7b05b16 | /src/main/python/DataPrep.py | 33277344da1ba958f9b8606e58da6939b2ad95dc | [] | no_license | brightlaboratory/ACT | dfda62ba4d3fef838c30f3f36c29f469e942f64a | 230ac112edf8a858271af8e06aeb31b62919d19a | refs/heads/master | 2018-09-25T05:01:30.979479 | 2018-06-08T19:29:02 | 2018-06-08T19:29:02 | 120,819,696 | 0 | 0 | null | 2018-02-27T18:26:45 | 2018-02-08T21:23:10 | Python | UTF-8 | Python | false | false | 10,652 | py | import random, math, datetime, os
import numpy as np
class DataProperties:
def __init__(self, cohort, gender_odds_ratio, pt_odds_ratio, ptt_odds_ratio, platelet_odds_ratio, doa_rate=0.15,
male_ratio=0.75, pt_abnormal=0.27, ptt_abnormal=0.08, plat_abnormal=0.04):
self.gender_odds_ratio = gender_odds_ratio
self.pt_odds_ratio = pt_odds_ratio
self.ptt_odds_ratio = ptt_odds_ratio
self.platelet_odds_ratio = platelet_odds_ratio
self.doa_rate = doa_rate
self.male = math.floor(cohort*male_ratio)
self.female = cohort - self.male
self.dead = math.ceil(cohort * doa_rate)
self.gender_dead = self.dead
self.pt_dead = self.dead
self.ptt_dead = self.dead
self.platelet_dead = self.dead
# self.pt_dead = math.floor(cohort * 0.099)
# self.ptt_dead = math.floor(cohort*0.103)
# self.platelet_dead = math.floor(cohort*0.09)
self.gender_alive = cohort - self.gender_dead
self.pt_alive = cohort - self.pt_dead
self.ptt_alive = cohort - self.ptt_dead
self.platelet_alive = cohort - self.platelet_dead
self.pt_low = math.ceil(cohort * pt_abnormal)
self.ptt_low = math.ceil(cohort * ptt_abnormal)
self.platelet_low = math.ceil(cohort * plat_abnormal)
self.pt_high = cohort - self.pt_low
self.ptt_high = cohort - self.ptt_low
self.platelet_high = cohort - self.platelet_low
@staticmethod
def f(a, b, c):
if b ** 2 - 4 * a * c < 0:
# print(b ** 2 - 4 * a * c)
exit("Complex root detected: ")
root1 = (-b + math.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
root2 = (-b - math.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
if root1 > 0 and root2 > 0:
s = math.ceil(min(root1, root2))
elif root1 <= 0:
s = root2
else:
s = root1
return s
def get_gender_values(self):
p = self.gender_odds_ratio - 1
q = self.female - self.gender_alive - self.gender_odds_ratio * self.female - self.gender_odds_ratio * self.gender_dead
r = self.gender_odds_ratio * self.female * self.gender_dead
gender_dead_female = DataProperties.f(p, q, r)
gender_dead_male = self.gender_dead - gender_dead_female
gender_alive_male = self.male - gender_dead_male
gender_alive_female = self.female - gender_dead_female
return gender_dead_female, gender_dead_male, gender_alive_female, gender_alive_male
def get_pt_values(self):
p = self.pt_odds_ratio - 1
q = self.pt_low - self.pt_alive - self.pt_odds_ratio * self.pt_low - self.pt_odds_ratio * self.pt_dead
r = self.pt_odds_ratio * self.pt_low * self.pt_dead
pt_dead_low = DataProperties.f(p, q, r)
pt_dead_high = self.pt_dead - pt_dead_low
pt_alive_high = self.pt_high - pt_dead_high
pt_alive_low = self.pt_low - pt_dead_low
return pt_dead_low, pt_dead_high, pt_alive_low, pt_alive_high
def get_ptt_values(self):
p = self.ptt_odds_ratio - 1
q = self.ptt_low - self.ptt_alive - self.ptt_odds_ratio * self.ptt_low - self.ptt_odds_ratio * self.ptt_dead
r = self.ptt_odds_ratio * self.ptt_low * self.ptt_dead
ptt_dead_low = DataProperties.f(p, q, r)
ptt_dead_high = self.ptt_dead - ptt_dead_low
ptt_alive_high = self.ptt_high - ptt_dead_high
ptt_alive_low = self.ptt_low - ptt_dead_low
return ptt_dead_low, ptt_dead_high, ptt_alive_low, ptt_alive_high
def get_platelet_values(self):
p = self.platelet_odds_ratio - 1
q = self.platelet_low - self.platelet_alive - self.platelet_odds_ratio * self.platelet_low - self.platelet_odds_ratio * self.platelet_dead
r = self.platelet_odds_ratio * self.platelet_low * self.platelet_dead
platelet_dead_low = DataProperties.f(p, q, r)
platelet_dead_high = self.platelet_dead - platelet_dead_low
platelet_alive_high = self.platelet_high - platelet_dead_high
platelet_alive_low = self.platelet_low - platelet_dead_low
return platelet_dead_low, platelet_dead_high, platelet_alive_low, platelet_alive_high
class DataPrep:
def __init__(self, rows, columns):
self.rows = rows
self.columns = columns
# self.seed = seed
self.table = np.array([-1] * (rows * columns)).reshape(rows, columns)
self.table[:, 0] = [i for i in range(1, rows + 1)]
def available_ids(self, col, val):
row_ids = np.where(self.table[:, col] == val)
row_ids = [i for i in row_ids[0]]
return row_ids
def generate_random(self, available_ids, sample, seed=1, flag=0):
if flag == 1:
ref_doa_ids = self.available_ids(6, 0)
available_ids = list(set(available_ids) - set(ref_doa_ids))
# print(len(available_ids), sample)
random.seed(seed)
new_ids = [available_ids[index] for index in random.sample(range(0, len(available_ids)), sample)]
return new_ids
def add_data_to_col(self, col, count, val, seed=1):
free_ids = self.available_ids(col, -1)
ids = self.generate_random(free_ids, count, seed)
for id in ids:
self.table[id, col] = val
def add_age_data(self, col, sigma, mu, sample, seed=1):
random.seed(seed)
s = list(np.random.normal(mu, sigma, sample))
s = [int(round(i)) for i in s]
random.seed(seed)
ids = [s[i] for i in random.sample(range(0, len(s)), sample)]
for index, val in enumerate(ids):
self.table[index, col] = abs(val)
def add_data_wrt_doa(self, col, low_dead, high_dead, low_alive, high_alive, seed=1):
# Generate low_dead
ref_doa_ids = self.available_ids(6, 0)
new_ids = self.generate_random(ref_doa_ids, low_dead, seed)
for id in new_ids:
self.table[id, col] = 0
ref_doa_ids = list(set(ref_doa_ids) - set(new_ids))
# generating high_dead
new_ids = self.generate_random(ref_doa_ids, high_dead, seed)
for id in new_ids:
self.table[id, col] = 1
# generating low_alive
ref_col_ids = self.available_ids(col, -1)
new_ids = self.generate_random(ref_col_ids, low_alive, seed, flag=1)
# new_ids = list(set(new_ids) - set(ref_doa_ids))
for id in new_ids:
self.table[id, col] = 0
if self.table[id, 6] != 1:
self.table[id, 6] = 1
# generating high alive
ref_col_ids = self.available_ids(col, -1)
new_ids = self.generate_random(ref_col_ids, high_alive, seed, flag=1)
for id in new_ids:
self.table[id, col] = 1
if self.table[id, 6] != 1:
self.table[id, 6] = 1
def load_table(self):
pass
if __name__ == "__main__":
n = 10000
no_of_seed_sets = 10 # used to set the distinct number of seeds sets to be used. Default to 1
doa = 0.5
gender_OR = 10
gender_ratio = 0.5
pt_OR = 6
pt_abn = 0.4
ptt_OR = 10
ptt_abn = 0.3
plat_OR = 10
plat_abn = 0.3
seed_list = {'age': [], 'gender': [], 'pt': [], 'ptt': [], 'plat': [], 'doa': []}
j = 1
for _ in range(no_of_seed_sets):
seed_list['age'].append(j)
seed_list['gender'].append(j+1)
seed_list['pt'].append(j+2)
seed_list['ptt'].append(j+3)
seed_list[ 'plat'].append(j+4)
seed_list['doa'].append(j+5)
j = j+1
dp_obj = DataProperties(n, gender_OR, pt_OR, ptt_OR, plat_OR, doa, gender_ratio, pt_abn, ptt_abn, plat_abn)
dt1 = str('{:%Y%m%d_%H%M%S}'.format(datetime.datetime.now()))
x=0
for seed in range(no_of_seed_sets):
print("round: ", seed+1)
table = DataPrep(n, 7)
table.add_data_to_col(6, dp_obj.dead, 0, seed_list['doa'][seed])
# table.add_data_to_col(2, dp_obj.male, 1, seed_list['gender'][seed])
# table.add_data_to_col(2, dp_obj.female, 0, seed_list['gender'][seed])
table.add_age_data(col=1, sigma=19, mu=36, sample=n, seed=seed_list['age'][seed])
n_gen_dead_f, n_gen_dead_m, n_gen_alive_f, n_gen_alive_m = dp_obj.get_gender_values()
n_pt_dead_low, n_pt_dead_high, n_pt_alive_low, n_pt_alive_high = dp_obj.get_pt_values()
n_ptt_dead_low, n_ptt_dead_high, n_ptt_alive_low, n_ptt_alive_high = dp_obj.get_ptt_values()
n_plat_dead_low, n_plat_dead_high, n_plat_alive_low, n_plat_alive_high = dp_obj.get_platelet_values()
dt = str('{:%Y%m%d_%H%M%S}'.format(datetime.datetime.now()))
'''
filename = '/home/nms/PycharmProjects/ATC/data/' + "oddsratio_src_" + str(i) + "_" + str(j) + "_" \
+ str(k) + "_" + str(l) + "_" + dt + ".csv"
d = {'pt': [n_pt_dead_low, n_pt_dead_high, n_pt_alive_low, n_pt_alive_high],
'ptt': [n_ptt_dead_low, n_ptt_dead_high, n_ptt_alive_low, n_ptt_alive_high],
'plat': [n_plat_dead_low, n_plat_dead_high, n_plat_alive_low, n_plat_alive_high],
'pt_or': (n_pt_dead_low*n_pt_alive_high)/(n_pt_dead_high*n_pt_alive_low),
'ptt_or': (n_ptt_dead_low * n_ptt_alive_high) / (n_ptt_dead_high * n_ptt_alive_low),
'plat_or': (n_plat_dead_low * n_plat_alive_high) / (n_plat_dead_high * n_plat_alive_low)}
with open('/home/nms/PycharmProjects/ATC/data/oddsratio_src_' + dt1 + '.txt', 'a') as tf:
tf.write(filename)
tf.write(str(d))
# tf.write(str(d['ptt']))
# tf.write(str(d['plat']))
'''
table.add_data_wrt_doa(2, n_gen_dead_f, n_gen_dead_m, n_gen_alive_f, n_gen_alive_m, seed_list['gender'][seed])
table.add_data_wrt_doa(3, n_pt_dead_low, n_pt_dead_high, n_pt_alive_low, n_pt_alive_high, seed_list['pt'][seed])
table.add_data_wrt_doa(4, n_ptt_dead_low, n_ptt_dead_high, n_ptt_alive_low, n_ptt_alive_high,
seed_list['ptt'][seed])
table.add_data_wrt_doa(5, n_plat_dead_low, n_plat_dead_high, n_plat_alive_low, n_plat_alive_high,
seed_list['plat'][seed])
filename = '/home/nms/PycharmProjects/ATC/data/dataset_{0}{1}{2}{3}{4}{5}_{6}_{7}_{8}_{9}_{10}.csv'.format(
str(seed_list['age'][seed]), str(seed_list['gender'][seed]), str(seed_list['pt'][seed]),
str(seed_list['ptt'][seed]), str(seed_list['plat'][seed]), str(seed_list['doa'][seed]), str(gender_OR),
str(pt_OR), str(ptt_OR), str(plat_OR), dt)
np.savetxt(filename, table.table, delimiter=",")
| [
"noreply@github.com"
] | noreply@github.com |
3c47cdbf8f7591b872a295e5e0c15b6b9822385f | b0a3d41497860ebde9ce7cf42b4155f29a8cc93f | /tensorflow/QuestionAPart2_async.py | 614554eff8efe51720838b2981f2242ae71b9c0f | [] | no_license | Sripradha-karkala/Big-Data-CS744 | 9437141532d90bf62468c12ec00bf8a568fdc480 | b4337c391e9f262163c9baba16582a8efe56c45c | refs/heads/master | 2021-08-28T15:41:29.331336 | 2017-12-12T17:24:34 | 2017-12-12T17:24:34 | 105,720,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,357 | py | import tensorflow as tf
import os
import pdb
import time
import logging
# number of features in the criteo dataset after one-hot encoding
logging.basicConfig(level=logging.INFO, filename="output_async", filemode="a+",
format="%(asctime)-15s %(levelname)-8s %(message)s")
tf.app.flags.DEFINE_integer("task_index", 0, "Index of the worker task")
FLAGS = tf.app.flags.FLAGS
num_features = 33762578
eta = 0.01
iterations = 200000
test_set = 10000
freq = 1000
file_distributions = [[
"/home/ubuntu/criteo-tfr/tfrecords00",
"/home/ubuntu/criteo-tfr/tfrecords01",
"/home/ubuntu/criteo-tfr/tfrecords02",
"/home/ubuntu/criteo-tfr/tfrecords03",
"/home/ubuntu/criteo-tfr/tfrecords04",
],
[
"/home/ubuntu/tfrecords05",
"/home/ubuntu/tfrecords06",
"/home/ubuntu/tfrecords07",
"/home/ubuntu/tfrecords08",
"/home/ubuntu/tfrecords09",
],
[
"/home/ubuntu/tfrecords10",
"/home/ubuntu/tfrecords11",
"/home/ubuntu/tfrecords12",
"/home/ubuntu/tfrecords13",
"/home/ubuntu/tfrecords14",
],
[
"/home/ubuntu/tfrecords15",
"/home/ubuntu/tfrecords16",
"/home/ubuntu/tfrecords17",
"/home/ubuntu/tfrecords18",
"/home/ubuntu/tfrecords19",
],
[
"/home/ubuntu/tfrecords20",
"/home/ubuntu/tfrecords21",
]]
def increment_acc():
return tf.assign_add(total_acc_async, 1)
def do_nothing():
return tf.constant(0, dtype=tf.int64)
g = tf.Graph()
with g.as_default():
# creating a model variable on task 0. This is a process running on node vm-48-1
with tf.device("/job:worker/task:0"):
w_async = tf.Variable(tf.ones([num_features,]), name="model_async", dtype=tf.float32)
derived_label_async = tf.Variable(0, name="derived_label_async", dtype=tf.float32)
total_acc_async = tf.Variable(0, name='Total_acc_async', dtype=tf.int64)
file_test_queue = tf.train.string_input_producer(["/home/ubuntu/criteo-tfr/tfrecords22"], num_epochs=None)
test_reader = tf.TFRecordReader()
_, serialized_example = test_reader.read(file_test_queue)
test_features = tf.parse_single_example(serialized_example,
features={
'label': tf.FixedLenFeature([1], dtype=tf.int64),
'index' : tf.VarLenFeature(dtype=tf.int64),
'value' : tf.VarLenFeature(dtype=tf.float32),
})
test_label = test_features['label']
test_value = test_features['value']
test_index = test_features['index']
test_indices = test_index.values
test_values = test_value.values
w_small_test = tf.gather(w_async, test_indices)
# creating 5 reader operators to be placed on different operators
# here, they emit predefined tensors. however, they can be defined as reader
# operators as done in "exampleReadCriteoData.py"
with tf.device("/job:worker/task:%d" % FLAGS.task_index):
# We first define a filename queue comprising 5 files.
filename_queue = tf.train.string_input_producer(file_distributions[FLAGS.task_index], num_epochs=None)
# TFRecordReader creates an operator in the graph that reads data from queue
reader = tf.TFRecordReader()
# Include a read operator with the filenae queue to use. The output is a string
# Tensor called serialized_example
_, serialized_example = reader.read(filename_queue)
# The string tensors is essentially a Protobuf serialized string. With the
# following fields: label, index, value. We provide the protobuf fields we are
# interested in to parse the data. Note, feature here is a dict of tensors
features = tf.parse_single_example(serialized_example,
features={
'label': tf.FixedLenFeature([1], dtype=tf.int64),
'index' : tf.VarLenFeature(dtype=tf.int64),
'value' : tf.VarLenFeature(dtype=tf.float32),
}
)
label = features['label']
index = features['index']
value = features['value']
indices = index.values
values = value.values
w_small = tf.gather(w_async, indices)
# since we parsed a VarLenFeatures, they are returned as SparseTensors.
# To run operations on then, we first convert them to dense Tensors as below.
mat_mul = tf.reduce_sum(tf.mul(w_small, values))
sigmoid = tf.sigmoid(tf.mul(tf.cast(label, tf.float32), mat_mul))
local_gradient = tf.mul(tf.cast(label, tf.float32), tf.mul((sigmoid - 1), values))
# grad=tf.mul(local_gradient, eta)
# indices_list.append(indices)
# we create an operator to aggregate the local gradients
with tf.device("/job:worker/task:0"):
assign_op = tf.scatter_sub(w_async,indices, tf.mul(local_gradient, eta))
derived_label_async = tf.sign(tf.reduce_sum(tf.mul(w_small_test, test_values)))
equal_test = tf.equal(tf.reshape(test_label, []), tf.cast(derived_label_async, tf.int64))
accuracy = tf.cond(equal_test, increment_acc, do_nothing, name='Accuracy')
reset_acc_var = total_acc_async.assign(0)
with tf.Session("grpc://vm-17-%d:2222" % (FLAGS.task_index+1)) as sess:
if FLAGS.task_index == 0:
coord = tf.train.Coordinator()
sess.run(tf.initialize_all_variables())
file_threads = tf.train.start_queue_runners(sess=sess, coord = coord)
while iterations >= 0:
logging.info('Iteration no: %d' %(iterations))
start_time = time.time()
sess.run(assign_op)
end_time = time.time()
logging.info('Time taken: %f' %(end_time - start_time))
if iterations % freq == 0:
sess.run(reset_acc_var)
for j in range(0, test_set):
sess.run(accuracy)
logging.info('Accuracy: %f' % (float(total_acc_async.eval())/test_set))
print w_async.eval()
iterations -= 1
coord.request_stop()
coord.join(file_threads, stop_grace_period_secs=5)
sess.close()
| [
"skarkala@wisc.edu"
] | skarkala@wisc.edu |
ac603ff1c371454b11cf128921790f5144556df2 | 7af161332a67d905b33e6517a89c723f1725a378 | /unidade8/fila_de_atendimento_de_pacientes/fila_de_atendimento_de_pacientes.py | 803b8f9228ac8477cfd4749cfc0d8d1b6acdf4c5 | [] | no_license | Itamar-Farias/TST_P1_UFCG | f4839a18d18d3c5cecdf329a8fc576349c1bc2f9 | c02a0f3911f5cada6aa1d38d211e364ac0527075 | refs/heads/master | 2023-04-06T10:27:06.281918 | 2021-04-08T12:48:45 | 2021-04-08T12:48:45 | 355,337,389 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | # coding: utf-8
# Itamar da Silva Farias 115210021
# Programação I
fila_unica = raw_input().split()
n = int(raw_input())
quantidade_por_medico = len(fila_unica) / n
nova_fila = []
for i in range(len(fila_unica)):
| [
"81915356+Itamar-Farias@users.noreply.github.com"
] | 81915356+Itamar-Farias@users.noreply.github.com |
c2471403aa320202deac3015c37cb0a0ac6e08a3 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/storage/v20190601/get_private_endpoint_connection.py | 198ad11aa90ab756d8d0907c319a4050996605d9 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,983 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
'get_private_endpoint_connection_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
The Private Endpoint Connection resource.
"""
def __init__(__self__, id=None, name=None, private_endpoint=None, private_link_service_connection_state=None, provisioning_state=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state and not isinstance(private_link_service_connection_state, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
id=self.id,
name=self.name,
private_endpoint=self.private_endpoint,
private_link_service_connection_state=self.private_link_service_connection_state,
provisioning_state=self.provisioning_state,
type=self.type)
def get_private_endpoint_connection(account_name: Optional[str] = None,
private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
The Private Endpoint Connection resource.
:param str account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:storage/v20190601:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
id=__ret__.id,
name=__ret__.name,
private_endpoint=__ret__.private_endpoint,
private_link_service_connection_state=__ret__.private_link_service_connection_state,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
@_utilities.lift_output_func(get_private_endpoint_connection)
def get_private_endpoint_connection_output(account_name: Optional[pulumi.Input[str]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionResult]:
"""
The Private Endpoint Connection resource.
:param str account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
"""
...
| [
"noreply@github.com"
] | noreply@github.com |
ba1fa2b4ccb40d50a75e7fc2f2a9890a12d5bd1d | b5ebdcde6f13f7fcf077c5f95da4df41755f0b16 | /course2/week3/mbox.py | cd3847411b706f04f09f7bba065696562b9e5a38 | [] | no_license | jimtheguy/PY4E | da2207caccb7d5f99878784ac3201fbe0b269bfd | 8f399fe5ace65ba7b7322a938cc0b1ad126e5bfe | refs/heads/main | 2022-12-30T08:39:47.092263 | 2020-10-21T06:36:50 | 2020-10-21T06:36:50 | 305,027,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | # Jim R
# Write a program that prompts for a file name, then opens that file
# and reads through the file, looking for lines of the form:
# X-DSPAM-Confidence: 0.8475
# Count these lines and extract the floating point values
# from each of the lines and compute the average of those
# values and produce an output as shown below. Do not use
# the sum() function or a variable named sum in your solution.
# Get the filename from the user
fname = input("Enter file name:")
fhandle = open(fname)
# loop through file
count = 0
total = 0.0
for line in fhandle:
# search for the form mentioned in line 4
if not line.startswith("X-DSPAM-Confidence:"): continue
else:
count = count + 1 #increase the count for average calculation
decAsString = line[line.find("."):] # slice out the decimal
decAsString = decAsString.rstrip() # strip newlines
decAsFloat = float(decAsString) #convert to float
total = total + decAsFloat # running total of decimals
average = total / count # calculate average
#print the average
print("Average spam confidence:", average)
| [
"jamesrouse85@gmail.com"
] | jamesrouse85@gmail.com |
3d36334691ca67c3f7782449abe5a247bde7633c | 4d0afce4377752b2574f82911d21a1e7bab6973b | /tests/test_nothing.py | de347ec344c3be96ee31dfe47731670ab34462de | [] | no_license | Sunshard/csbot | a8546d1dc91a3614ad586a8709be6dfcc09a9ab7 | 6975ebf517ff82f68894a7f52498cfc6cc778621 | refs/heads/master | 2021-01-15T16:47:09.356787 | 2012-04-21T14:32:18 | 2012-04-21T14:32:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | import unittest
class TestNothing(unittest.TestCase):
def test_nothing(self):
self.assertEquals(True, True)
| [
"alan.briolat@gmail.com"
] | alan.briolat@gmail.com |
25bc3be33edf11b325941a166313b77fcd34b28a | cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc | /Python Books/Machine-Learning-Python/9781118961742_all code files/06/simpleBagging.py | 72c83810c832b413e58d7f6b9fbb92e5a85022e9 | [] | no_license | theGreenJedi/Path | df24fca355590efef0c6cb5c52e7216c6b5d2464 | b5ed2805dbb046480929e49e550bfd8af5bb4d6f | refs/heads/master | 2023-07-27T14:23:37.694546 | 2021-07-16T01:38:55 | 2021-07-16T01:38:55 | 87,686,563 | 8 | 2 | null | 2023-07-11T22:49:03 | 2017-04-09T05:57:30 | Jupyter Notebook | UTF-8 | Python | false | false | 2,973 | py | __author__ = 'mike-bowles'
import numpy
import matplotlib.pyplot as plot
from sklearn import tree
from sklearn.tree import DecisionTreeRegressor
from math import floor
import random
#Build a simple data set with y = x + random
nPoints = 1000
#x values for plotting
xPlot = [(float(i)/float(nPoints) - 0.5) for i in range(nPoints + 1)]
#x needs to be list of lists.
x = [[s] for s in xPlot]
#y (labels) has random noise added to x-value
#set seed
random.seed(1)
y = [s + numpy.random.normal(scale=0.1) for s in xPlot]
#take fixed test set 30% of sample
nSample = int(nPoints * 0.30)
idxTest = random.sample(range(nPoints), nSample)
idxTest.sort()
idxTrain = [idx for idx in range(nPoints) if not(idx in idxTest)]
#Define test and training attribute and label sets
xTrain = [x[r] for r in idxTrain]
xTest = [x[r] for r in idxTest]
yTrain = [y[r] for r in idxTrain]
yTest = [y[r] for r in idxTest]
#train a series of models on random subsets of the training data
#collect the models in a list and check error of composite as list grows
#maximum number of models to generate
numTreesMax = 20
#tree depth - typically at the high end
treeDepth = 1
#initialize a list to hold models
modelList = []
predList = []
#number of samples to draw for stochastic bagging
nBagSamples = int(len(xTrain) * 0.5)
for iTrees in range(numTreesMax):
idxBag = []
for i in range(nBagSamples):
idxBag.append(random.choice(range(len(xTrain))))
xTrainBag = [xTrain[i] for i in idxBag]
yTrainBag = [yTrain[i] for i in idxBag]
modelList.append(DecisionTreeRegressor(max_depth=treeDepth))
modelList[-1].fit(xTrainBag, yTrainBag)
#make prediction with latest model and add to list of predictions
latestPrediction = modelList[-1].predict(xTest)
predList.append(list(latestPrediction))
#build cumulative prediction from first "n" models
mse = []
allPredictions = []
for iModels in range(len(modelList)):
#average first "iModels" of the predictions
prediction = []
for iPred in range(len(xTest)):
prediction.append(sum([predList[i][iPred] for i in range(iModels + 1)])/(iModels + 1))
allPredictions.append(prediction)
errors = [(yTest[i] - prediction[i]) for i in range(len(yTest))]
mse.append(sum([e * e for e in errors]) / len(yTest))
nModels = [i + 1 for i in range(len(modelList))]
plot.plot(nModels,mse)
plot.axis('tight')
plot.xlabel('Number of Models in Ensemble')
plot.ylabel('Mean Squared Error')
plot.ylim((0.0, max(mse)))
plot.show()
plotList = [0, 9, 19]
for iPlot in plotList:
plot.plot(xTest, allPredictions[iPlot])
plot.plot(xTest, yTest, linestyle="--")
plot.axis('tight')
plot.xlabel('x value')
plot.ylabel('Predictions')
plot.show()
print('Minimum MSE')
print(min(mse))
#With treeDepth = 1
#Minimum MSE
#0.0242960117899
#With treeDepth = 5
#Minimum MSE
#0.0118893503384 | [
"GreenJedi@protonmail.com"
] | GreenJedi@protonmail.com |
e3da9b2baf69f616f5058aad74c9ea3a57688241 | 61d6fea82d4f130b7bac8a530dd002b978971987 | /venv/bin/django-admin | cdc281f372db89f952df5c45d00c4164631836bd | [] | no_license | wellingtonasilva/inventory | 0fefbf023821c60f1741136af31a6d2581235220 | 53cce948163a0c21025ea2f418886533fc30cd23 | refs/heads/master | 2020-04-18T05:54:27.937105 | 2019-01-24T03:33:39 | 2019-01-24T03:33:39 | 167,296,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | #!/home/wellingtonasilva/wsilva/desenvolvimento/inventory/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"wellingtonasilva@gmail.com"
] | wellingtonasilva@gmail.com | |
e1972021e769dad89f8661aa299f9df61c0a7e04 | 6ecd5864b59e50bb2a2163a8e2e40696cbc96f51 | /first_app/migrations/0010_bio_profile_pic.py | a32f9a51bf9523782acbf0e728a6cba6ce8758f3 | [] | no_license | nohmichel/WeHike | ad27bc48496e0071fbdaf14e60f0172af1809870 | c62a38a188f5009d055158591605ce47cb75de6e | refs/heads/main | 2023-04-20T04:34:43.679818 | 2021-05-03T03:52:16 | 2021-05-03T03:52:16 | 355,797,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | # Generated by Django 3.1.7 on 2021-04-28 19:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('first_app', '0009_trailfam'),
]
operations = [
migrations.AddField(
model_name='bio',
name='profile_pic',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
]
| [
"nohmichel@gmail.com"
] | nohmichel@gmail.com |
d6d7f92508e62da4625da30c26b6f39c90ee11ac | 8022f4f327fbd02cee9288147689b165658b52fe | /learn_django-master/web_config/urls.py | 4e6d1a1e45aae982289613e6262cba551ce568e9 | [] | no_license | kiiney/Develop-CIKIM | d69a37a37c11a782edb8855a43fe84a44fb8187a | 419fa42d4e61c9f411d5c81048ac7ce77d3ce7cd | refs/heads/master | 2022-12-19T19:01:29.755754 | 2020-10-14T06:17:15 | 2020-10-14T06:17:15 | 303,906,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,919 | py | """web_config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from hello import views as helloview
from board import views as boardview
from maps import views as mapsview
urlpatterns = [
path('admin/', admin.site.urls),
path("hello", helloview.hello, name="hello_home"),
path('', helloview.home, name='home'),
path('home', helloview.home),
path("hello/responsewithhtml/", helloview.responsewithhtml),
path("hello/form/", helloview.form, name="helloform"), # add
path("hello/requestwithservice/", helloview.requestwithservice),
path("hello/template/", helloview.template, name="template"), # add
path("hello/responsedeeplearning/", helloview.response_deeplearning, name="responsedeeplearning"), # add
path("board/listwithrawquery/", boardview.listwithrawquery, name="listwithrawquery"), # add
path("board/listwithrawquerywithpaginator/", boardview.listwithrawquerywithpaginator, name="listwithrawquerywithpaginator"), # add
path("board/listwithmongo/", boardview.listwithmongo, name="listwithmongo"), # add
path("board/listwithmongowithpaginator/", boardview.listwithmongowithpaginator, name="listwithmongowithpaginator"), # add
path('maps/showmapwithfolium', mapsview.showmapwithfolium, name='show_map'),
]
| [
"737jinifltcrew@gmail.com"
] | 737jinifltcrew@gmail.com |
1f60945be81d540128e6d2e147649ee8591c498f | 9d1e70e9e53dfcd19b0a1ede0e451bc5784132d6 | /day25/notes.py | 454f398e37d6d03e9eb12537f4564491b2a66dbe | [] | no_license | dmartinm95/Python-Bootcamp | bba32d2a22e7afef0592168ada9a84a2f91485aa | e13e1d0eb493ebce4b7f7e3fb4c10212b1c1b19a | refs/heads/master | 2023-08-15T07:15:23.440526 | 2021-10-14T17:02:07 | 2021-10-14T17:02:07 | 364,425,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,805 | py | # Day 25: Working with CSV Data and the Pandas Library
# import csv
# with open("day25\weather_data.csv") as file:
# data = csv.reader(file)
# temperatures = []
# for row in data:
# if row[1] != "temp":
# temperatures.append(int(row[1]))
# print(temperatures)
import pandas
from pandas.core.frame import DataFrame
data = pandas.read_csv("day25\weather_data.csv")
data_dict = data.to_dict()
print(data_dict)
temp_list = data["temp"].to_list()
print(temp_list)
print(data["temp"].max())
print(data["temp"].mean())
# Get data in Columns
print(data["condition"])
print(data.condition)
# Get data in Row
print(data[data.day == "Monday"])
print(data[data.temp == data.temp.max()])
monday = data[data.day == "Monday"]
print(f"Monday's temp in Fahrenheit = {int(monday.temp) * 9 / 5 + 32}")
# Create a DataFrame from scratch
data_dict = {
"students": ["Amy", "James", "Angela"],
"scores": [76, 56, 65],
}
data_frame = pandas.DataFrame(data_dict)
print(data_frame)
# Challenge: Figure out how many Gray, Black and Red squirrels there are, according to the csv file
squirrel_data = pandas.read_csv(
"day25\\2018_Central_Park_Squirrel_Census_-_Squirrel_Data.csv")
fur_color_column = squirrel_data["Primary Fur Color"]
grey_count = len(squirrel_data[squirrel_data["Primary Fur Color"] == "Gray"])
black_count = len(
squirrel_data[squirrel_data["Primary Fur Color"] == "Cinnamon"])
red_count = len(squirrel_data[squirrel_data["Primary Fur Color"] == "Black"])
output_dict = {
"Fur Color": ["grey", "red", "black"],
"Count": [grey_count, red_count, black_count],
}
output_data_frame = pandas.DataFrame(output_dict)
output_data_frame.to_csv("day25\squirrel_count.csv")
| [
"dmartinm1995@gmail.com"
] | dmartinm1995@gmail.com |
ffe934d1e29b84ffd554a5850fbc7df173f2951d | 597bc9c9c84861cdcb6e72505c9765b27dcca97a | /dcp/benchmark/ellipses.py | f95fec22316020af9cc166f5ebc9a2c148f5720d | [] | no_license | mschmidt25/deep-capsule-prior | 46b5b19bd5fd184321f6d6c0e70316b724b76f98 | 09b953911b201bffa7189b8e0276814dd876ecbc | refs/heads/master | 2023-06-10T09:26:35.553549 | 2021-06-28T07:56:41 | 2021-06-28T07:56:41 | 379,515,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,696 | py | import argparse
from dival import DataPairs
from dcp.reconstructors import get_reconstructor
from dcp.utils.helper import load_standard_dataset
from dcp.utils.plot import plot_reconstructors_tests
from dcp.utils.helper import set_use_latex
set_use_latex()
def get_parser():
"""Adds arguments to the command"""
parser = argparse.ArgumentParser()
parser.add_argument('--method', type=str, default='dcptv')
parser.add_argument('--dataset', type=str, default='ellipses')
parser.add_argument('--start', type=int, default=0)
parser.add_argument('--count', type=int, default=100)
return parser
def main():
options = get_parser().parse_args()
# load data
dataset = load_standard_dataset(options.dataset, ordered=True)
test_data = dataset.get_data_pairs('test', 100)
sizes = [1.00]
reconstructors = []
for size_part in sizes:
reconstructors.append(get_reconstructor(options.method,
dataset=options.dataset,
size_part=size_part,
pretrained=True))
for i in range(options.start, options.count):
obs, gt = test_data[i]
test_pair = DataPairs([obs], [gt], name='test')
# compute and plot reconstructions
plot_reconstructors_tests(reconstructors, dataset.ray_trafo, test_pair,
save_name='{}-{}-test-{}'.format(
options.dataset, options.method, i),
fig_size=(9, 3),
cmap='pink')
if __name__ == "__main__":
main()
| [
"schmiddie3@live.de"
] | schmiddie3@live.de |
b3cbcb1d5bbbf22e60bf51058c034822d2297c4c | f8da830331428a8e1bbeadf23345f79f1750bd98 | /msgraph-cli-extensions/beta/search_beta/azext_search_beta/vendored_sdks/search/_configuration.py | 76b296b982dacd86747b02dc4fa3d3ca51ea1334 | [
"MIT"
] | permissive | ezkemboi/msgraph-cli | e023e1b7589461a738e42cbad691d9a0216b0779 | 2ceeb27acabf7cfa219c8a20238d8c7411b9e782 | refs/heads/main | 2023-02-12T13:45:03.402672 | 2021-01-07T11:33:54 | 2021-01-07T11:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,605 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
VERSION = "unknown"
class SearchConfiguration(Configuration):
"""Configuration for Search.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param top: Show only the first n items.
:type top: int
:param skip: Skip the first n items.
:type skip: int
:param search: Search items by search phrases.
:type search: str
:param filter: Filter items by property values.
:type filter: str
:param count: Include count of items.
:type count: bool
"""
def __init__(
self,
credential, # type: "TokenCredential"
top=None, # type: Optional[int]
skip=None, # type: Optional[int]
search=None, # type: Optional[str]
filter=None, # type: Optional[str]
count=None, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
super(SearchConfiguration, self).__init__(**kwargs)
self.credential = credential
self.top = top
self.skip = skip
self.search = search
self.filter = filter
self.count = count
self.credential_scopes = ['https://management.azure.com/.default']
self.credential_scopes.extend(kwargs.pop('credential_scopes', []))
kwargs.setdefault('sdk_moniker', 'search/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
ac395c91283444e2dc581c9fa8210b27030a6524 | 85c618a75b722b7f3111c44262871bca4a9d0f8f | /tests/modifiedMexicanHatTest15.py | 283a1ab95dd30cf96ef9d964a2c93f524aee9aad | [
"CC0-1.0"
] | permissive | yaukwankiu/armor | 1969ba0fb850f8cec80f7f25f0c2d6cf1bc8dc22 | 6c57df82fe3e7761f43f9fbfe4f3b21882c91436 | refs/heads/master | 2020-05-18T09:17:13.654568 | 2014-12-12T00:08:49 | 2014-12-12T00:08:49 | 20,916,678 | 1 | 0 | null | 2014-07-15T06:20:22 | 2014-06-17T08:54:34 | Python | UTF-8 | Python | false | false | 7,765 | py | # modified mexican hat wavelet test.py
# spectral analysis for RADAR and WRF patterns
# NO plotting - just saving the results: LOG-response spectra for each sigma and max-LOG response numerical spectra
# pre-convolved with a gaussian filter of sigma=10
import os, shutil
import time, datetime
import pickle
import numpy as np
from scipy import signal, ndimage
import matplotlib.pyplot as plt
from armor import defaultParameters as dp
from armor import pattern
from armor import objects4 as ob
#from armor import misc as ms
dbz = pattern.DBZ
kongreywrf = ob.kongreywrf
kongreywrf.fix()
kongrey = ob.kongrey
monsoon = ob.monsoon
monsoon.list= [v for v in monsoon.list if '20120612' in v.dataTime] #fix
march2014 = ob.march2014
march2014wrf11 = ob.march2014wrf11
march2014wrf12 = ob.march2014wrf12
march2014wrf = ob.march2014wrf
march2014wrf.fix()
################################################################################
# hack
#kongrey.list = [v for v in kongrey.list if v.dataTime>="20130828.2320"]
################################################################################
# parameters
testName = "modifiedMexicanHatTest15"
sigmas = [1, 2, 4, 5, 8 ,10 ,16, 20, 32, 40, 64, 80, 128, 160, 256,]
dbzstreams = [march2014wrf]
sigmaPower=0
scaleSpacePower=0 #2014-05-14
testScriptsFolder = dp.root + 'python/armor/tests/'
sigmaPreprocessing = 10 # sigma for preprocessing, 2014-05-15
timeString = str(int(time.time()))
outputFolder = dp.root + 'labLogs/%d-%d-%d-%s/' % \
(time.localtime().tm_year, time.localtime().tm_mon, time.localtime().tm_mday, testName)
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
shutil.copyfile(testScriptsFolder+testName+".py", outputFolder+ timeString + testName+".py")
# end parameters
################################################################################
summaryFile = open(outputFolder + timeString + "summary.txt", 'a')
for ds in dbzstreams:
summaryFile.write("\n===============================================================\n\n\n")
streamMean = 0.
dbzCount = 0
#hack
#streamMean = np.array([135992.57472004235, 47133.59049120619, 16685.039217734946, 11814.043851969862, 5621.567482638702, 3943.2774923729303, 1920.246102887001, 1399.7855335686243, 760.055614122099, 575.3654495432361, 322.26668666562375, 243.49842951291757, 120.54647935045809, 79.05741086463254, 26.38971066782135])
#dbzCount = 140
for a in ds:
print "-------------------------------------------------"
print testName
print
print a.name
a.load()
a.setThreshold(0)
a.saveImage(imagePath=outputFolder+a.name+".png")
L = []
a.responseImages = [] #2014-05-02
#for sigma in [1, 2, 4, 8 ,16, 32, 64, 128, 256, 512]:
for sigma in sigmas:
print "sigma:", sigma
a.load()
a.setThreshold(0)
arr0 = a.matrix
#####################################################################
arr0 = ndimage.filters(arr0, sigma=sigmaPreprocessing) # <-- 2014-05-15
#####################################################################
#arr1 = signal.convolve2d(arr0, mask_i, mode='same', boundary='fill')
#arr1 = ndimage.filters.gaussian_laplace(arr0, sigma=sigma, mode="constant", cval=0.0) #2014-05-07
#arr1 = ndimage.filters.gaussian_laplace(arr0, sigma=sigma, mode="constant", cval=0.0) * sigma**2 #2014-04-29
arr1 = ndimage.filters.gaussian_laplace(arr0, sigma=sigma, mode="constant", cval=0.0) * sigma**scaleSpacePower #2014-05-14
a1 = dbz(matrix=arr1.real, name=a.name + "_" + testName + "_sigma" + str(sigma))
L.append({ 'sigma' : sigma,
'a1' : a1,
'abssum1': abs(a1.matrix).sum(),
'sum1' : a1.matrix.sum(),
})
print "abs sum", abs(a1.matrix.sum())
#a1.show()
#a2.show()
plt.close()
#a1.histogram(display=False, outputPath=outputFolder+a1.name+"_histogram.png")
###############################################################################
# computing the spectrum, i.e. sigma for which the LOG has max response
# 2014-05-02
a.responseImages.append({'sigma' : sigma,
'matrix' : arr1 * sigma**2,
})
pickle.dump(a.responseImages, open(outputFolder+a.name+"responseImagesList.pydump",'w'))
a_LOGspec = dbz(name= a.name + "Laplacian-of-Gaussian_numerical_spectrum",
imagePath=outputFolder+a1.name+"_LOGspec.png",
outputPath = outputFolder+a1.name+"_LOGspec.dat",
cmap = 'jet',
)
a.responseImages = np.dstack([v['matrix'] for v in a.responseImages])
#print 'shape:', a.responseImages.shape #debug
a.responseMax = a.responseImages.max(axis=2) # the deepest dimension
a_LOGspec.matrix = np.zeros(a.matrix.shape)
for count, sigma in enumerate(sigmas):
a_LOGspec.matrix += sigma * (a.responseMax == a.responseImages[:,:,count])
a_LOGspec.vmin = a_LOGspec.matrix.min()
a_LOGspec.vmax = a_LOGspec.matrix.max()
print "saving to:", a_LOGspec.imagePath
#a_LOGspec.saveImage()
print a_LOGspec.outputPath
#a_LOGspec.saveMatrix()
#a_LOGspec.histogram(display=False, outputPath=outputFolder+a1.name+"_LOGspec_histogram.png")
pickle.dump(a_LOGspec, open(outputFolder+ a_LOGspec.name + ".pydump","w"))
# end computing the sigma for which the LOG has max response
# 2014-05-02
##############################################################################
#pickle.dump(L, open(outputFolder+ a.name +'_test_results.pydump','w')) # no need to dump if test is easy
sigmas = np.array([v['sigma'] for v in L])
y1 = [v['abssum1'] for v in L]
plt.close()
plt.plot(sigmas,y1)
plt.title(a1.name+ '\n absolute values against sigma')
plt.savefig(outputFolder+a1.name+"-spectrum-histogram.png")
plt.close()
# now update the mean
streamMeanUpdate = np.array([v['abssum1'] for v in L])
dbzCount += 1
streamMean = 1.* ((streamMean*(dbzCount -1)) + streamMeanUpdate ) / dbzCount
print "Stream Count and Mean so far:", dbzCount, streamMean
# now save the mean and the plot
summaryText = '\n---------------------------------------\n'
summaryText += str(int(time.time())) + '\n'
summaryText += "dbzStream Name: " + ds.name + '\n'
summaryText += "dbzCount:\t" + str(dbzCount) + '\n'
summaryText +="sigma=\t\t" + str(sigmas.tolist()) + '\n'
summaryText += "streamMean=\t" + str(streamMean.tolist()) +'\n'
print summaryText
print "saving..."
# release the memory
a.matrix = np.array([0])
summaryFile.write(summaryText)
plt.close()
plt.plot(sigmas, streamMean* (sigmas**sigmaPower))
plt.title(ds.name + '- average laplacian-of-gaussian numerical spectrum\n' +\
'for ' +str(dbzCount) + ' DBZ patterns\n' +\
'suppressed by a factor of sigma^' + str(sigmaPower) )
plt.savefig(outputFolder + ds.name + "_average_LoG_numerical_spectrum.png")
plt.close()
summaryFile.close()
| [
"yaukwankiu@gmail.com"
] | yaukwankiu@gmail.com |
87688702886fce9b13c846c38449557febb2ac55 | 62980971f2742c0a1d541326b8eba555fe26f15c | /core/interviewquest/array_rotation.py | 4b1e6dab086730e040f46e1de0452c2f41790800 | [] | no_license | gsaukov/python-machine | 343a874b27b08f33bf8184ff7b66b3f1e43f79c3 | 2b814608d1e45a1cdff546726f71472943397360 | refs/heads/master | 2021-07-24T10:27:26.699455 | 2020-05-22T15:55:25 | 2020-05-22T15:55:25 | 176,815,098 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,507 | py |
def arr_rotation(arr1, arr2):
if len(arr1) != len(arr2):
return False
head = []
tail = []
move = 0
for n in arr1:
if n == arr2[move]:
tail.append(n)
move += 1
else:
head.append(n)
tail.extend(head)
return tail == arr2
print(arr_rotation([1, 2, 3, 4, 5, 6, 7], [3, 4, 5, 6, 7, 1, 2]))
print(arr_rotation([1, 2, 3, 4, 5, 6, 7], [3, 4, 5, 6, 7, 1, 8]))
print(arr_rotation([5, 6, 7, 1, 2, 3, 4], [3, 4, 5, 6, 7, 1, 2]))
print(arr_rotation([7, 1, 2, 3, 4, 5, 6], [3, 4, 5, 6, 7, 1, 2]))
print(arr_rotation([7, 1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6, 7]))
print(arr_rotation([2, 3, 4, 5, 6, 7, 1], [1, 2, 3, 4, 5, 6, 7]))
def rotation(list1, list2):
if len(list1) != len(list2):
return 'Arrays are not even'
key = list1[0]
key_index = 0
for i in range(len(list2)):
if list2[i] == key:
key_index = i
break
if key_index == 0:
return False
for x in range(len(list1)):
l2index = (key_index + x) % len(list1)
if list1[x] != list2[l2index]:
return False
return True
print('----------------------------------------')
print(rotation([1,2,3,4,5,6,7], [3,4,5,6,7,1,2]))
print(rotation([1,2,3,4,5,6,7], [3,4,5,6,7,1,8]))
print(rotation([5,6,7,1,2,3,4], [3,4,5,6,7,1,2]))
print(rotation([7,1,2,3,4,5,6], [3,4,5,6,7,1,2]))
print(rotation([7,1,2,3,4,5,6], [1,2,3,4,5,6,7]))
print(rotation([2,3,4,5,6,7,1], [1,2,3,4,5,6,7]))
| [
"georgy.saukov@wirecard.com"
] | georgy.saukov@wirecard.com |
6b30fe3caac3fffcc402a8552c72efc350f09b96 | ccdeae68e468ad399a89181c37bba4490bcdc259 | /scripts/bestExpressions_L_TOP26_WM_LASSO_1.py | a7b9e93f86d34a509b96ab60a1ac2df818dabe1d | [] | no_license | jameshughes89/NonlinearModelsFMRI-2 | 19262d4494aa6adc0e9bd9592069ad6b757dda6b | a507a41d0a0a728d02616023aea0e66fafc1c387 | refs/heads/master | 2021-09-06T17:05:38.086733 | 2018-02-07T15:19:23 | 2018-02-07T15:19:23 | 109,417,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,587 | py | from math import *
def funcL_WM_100307(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -3.09574729849e-13 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0897641196145 * v4 + 0.0 * v5 + 0.0 * v7 + -0.0 * v8 + 0.0 * v9 + 0.0 * v11 + 0.0 * v12 + 0.0961547221197 * v13 + 0.0 * v14 + 0.196939244764 * v15 + 0.0769394752556 * v16 + 0.344392610866 * v17 + 0.0 * v18 + 0.0814563743731 * v19 + 0.0 * v20 + 0.0 * v21 + 0.0735098800637 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v27 + 0.0 * v28
def funcL_WM_100408(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -6.27662838751e-14 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v4 + 0.170481233495 * v5 + 0.121231367064 * v7 + 0.0 * v8 + 0.0 * v9 + 0.0 * v10 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0 * v15 + 0.000870619700537 * v16 + 0.226194422979 * v17 + 0.0 * v18 + 0.0 * v19 + 0.080978384483 * v20 + 0.146662515218 * v21 + 0.113010043781 * v22 + 0.0 * v23 + 0.0997859210423 * v24 + 0.0316586494501 * v25 + 0.0 * v27 + 0.0706717429605 * v28
def funcL_WM_101006(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 1.17470316183e-13 * 1 + -0.0 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.186185804365 * v5 + -0.0 * v8 + 0.0625300451781 * v9 + 0.0 * v10 + -0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.1529647217 * v15 + 0.224851281639 * v16 + 0.0 * v17 + 0.0 * v18 + 0.222459750568 * v19 + 0.0 * v20 + 0.0 * v21 + 0.0 * v22 + 0.0 * v24 + 0.000214344441237 * v25 + 0.0 * v26 + 0.0 * v28
def funcL_WM_101107(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 9.43327671106e-14 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0206707862075 * v4 + -0.0 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + -0.0 * v9 + 0.0 * v10 + -0.0 * v11 + 0.0 * v13 + 0.0 * v14 + 0.0 * v15 + 0.0 * v16 + 0.249551371124 * v17 + 0.0934527718085 * v18 + 0.165709120823 * v20 + 0.0 * v21 + 0.363189982138 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v25 + 0.0 * v26 + 0.0 * v27 + -0.0 * v28
def funcL_WM_101309(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -2.26781198095e-13 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0523427442996 * v4 + 0.0960075086689 * v5 + 0.00889677468049 * v6 + 0.0 * v7 + 0.0 * v8 + 0.0 * v9 + 0.0 * v10 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0 * v17 + 0.0 * v18 + 0.145064432903 * v20 + 0.118383233007 * v21 + 0.0 * v22 + 0.0 * v24 + 0.253351212958 * v25 + 0.0 * v26 + 0.239639776793 * v27 + 0.0191803001548 * v28
def funcL_WM_101410(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 3.00238111698e-14 * 1 + 0.0 * v0 + 0.00745145383058 * v1 + -0.0 * v2 + 0.0 * v4 + 0.146560337568 * v5 + 0.0 * v7 + -0.0 * v8 + 0.0 * v9 + 0.125629017072 * v10 + 0.0 * v11 + -0.0 * v12 + 0.0 * v13 + 0.0658179570303 * v15 + 0.0 * v16 + 0.243234636022 * v17 + 0.0305085552523 * v18 + 0.0 * v19 + 0.0 * v20 + 0.0 * v21 + 0.0785959483455 * v22 + 0.246164864309 * v23 + -0.0 * v24 + 0.00777364636323 * v25 + 0.0 * v27 + 0.0 * v28
def funcL_WM_101915(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -1.6535109487e-13 * 1 + 0.0 * v0 + 0.181249062103 * v1 + 0.0 * v2 + 0.0 * v4 + 0.067232487182 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.0 * v10 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0547543886838 * v15 + 0.0 * v17 + 0.0 * v18 + 0.15007548187 * v20 + 0.30736940405 * v21 + 0.157690721709 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v25 + 0.0 * v26 + 0.0 * v27 + 0.00642298489153 * v28
def funcL_WM_102008(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -6.90771430695e-14 * 1 + 0.0 * v0 + 0.0420846960343 * v1 + 0.429353415755 * v2 + 0.0 * v3 + 0.0 * v5 + 0.0 * v6 + 0.0423139619633 * v7 + -0.0 * v8 + 0.0 * v10 + 0.0 * v11 + -0.0 * v12 + 0.0 * v13 + 0.0 * v15 + 0.0 * v16 + 0.0141188113612 * v17 + 0.0 * v18 + 0.0 * v19 + 0.287172076954 * v20 + 0.112493872227 * v21 + 0.0 * v22 + 0.0 * v23 + 0.0 * v25 + -0.0 * v26 + 0.0 * v27 + 0.0 * v28
def funcL_WM_102311(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 1.23705311249e-13 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.21646178955 * v5 + 0.0 * v7 + 0.0 * v8 + 0.0783034733505 * v9 + 0.0 * v10 + 0.0859870374143 * v11 + 0.0 * v12 + 0.0 * v13 + 0.155469912559 * v15 + 0.0 * v16 + 0.0769217791098 * v17 + 0.0 * v18 + 0.0487138153117 * v20 + 0.20481346756 * v21 + 0.0762311375244 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v25 + 0.0 * v27 + 0.0 * v28
def funcL_WM_102816(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -1.27640540216e-13 * 1 + 0.0 * v0 + 0.00217164824841 * v1 + 0.0 * v2 + 0.0 * v3 + 0.221921091481 * v4 + 0.0 * v5 + 0.0 * v6 + 0.0736713034579 * v7 + 0.0413899649829 * v8 + 0.0 * v10 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v15 + 0.0141698068682 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0468814411257 * v20 + 0.325253219436 * v21 + 0.168722747997 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v25 + 0.0402709493746 * v27 + 0.0 * v28
def funcL_WM_103111(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 6.39600296536e-14 * 1 + 0.0282317035808 * v0 + 0.0914005296067 * v1 + 0.0527335660881 * v2 + 0.0 * v3 + 0.0 * v4 + 0.146392178976 * v5 + 0.0 * v7 + 0.0 * v8 + 0.0 * v10 + 0.0 * v11 + 0.0 * v12 + 0.0 * v14 + 0.0 * v15 + 0.0699834737897 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0440351738491 * v19 + 0.0 * v20 + 0.230447449872 * v21 + 0.226321914682 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v25 + 0.0 * v27 + 0.0379824849654 * v28
def funcL_WM_103414(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 2.23439031746e-13 * 1 + 0.13338270754 * v1 + 0.0135930226624 * v2 + 0.0 * v3 + 0.0 * v4 + 0.0179463714468 * v5 + 0.0 * v6 + 0.080344455294 * v7 + 0.0 * v8 + 0.0 * v9 + 0.0 * v10 + 0.0907503219549 * v11 + 0.0 * v12 + 0.0 * v14 + 0.0233692891605 * v15 + 0.0 * v16 + 0.0365782808089 * v17 + 0.0 * v18 + 0.0855375365364 * v19 + 0.184270293584 * v20 + 0.132730321028 * v21 + 0.0739064512502 * v22 + 0.0581208178043 * v23 + 0.0651312823592 * v25 + 0.0 * v27 + 0.0 * v28
def funcL_WM_103515(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 1.16479046243e-14 * 1 + -0.0 * v0 + -0.0 * v1 + 0.0 * v2 + 0.249670977437 * v3 + 0.0 * v4 + 0.0 * v5 + -0.0 * v7 + 0.0 * v9 + 0.0243305758584 * v10 + 0.0 * v11 + -0.244962276674 * v12 + -0.0 * v13 + -0.0 * v14 + -0.0 * v15 + -0.0 * v16 + 0.547896859324 * v17 + 0.0 * v19 + 0.172197659282 * v20 + -0.0 * v21 + 0.0 * v22 + -0.0 * v23 + 0.0 * v24 + 0.0 * v25 + -0.0 * v26 + -0.0 * v28
def funcL_WM_103818(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 2.1976762151e-14 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v2 + -0.0 * v3 + 0.00764386428837 * v4 + 0.332648997162 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.0 * v9 + -0.0 * v11 + 0.0 * v12 + -0.0 * v14 + 0.28853360203 * v15 + -0.0 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0 * v19 + 0.135841202246 * v20 + 0.0393043158909 * v21 + 0.0530095356938 * v22 + 0.0 * v24 + 0.106735713624 * v25 + 0.0 * v26 + 0.0 * v27
def funcL_WM_104012(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 2.76313110393e-14 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.0536716379656 * v5 + 0.0 * v7 + 0.0 * v8 + 0.180056775785 * v9 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.458004837835 * v15 + 0.0 * v16 + 0.0615969946761 * v17 + 0.0 * v18 + 0.0 * v19 + 0.0 * v20 + 0.00551170290585 * v21 + 0.0 * v22 + 0.0 * v23 + 0.0 * v24 + 0.115441787104 * v25 + 0.0 * v27 + 0.0 * v28
def funcL_WM_104820(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -4.59518146726e-13 * 1 + 0.0974344271507 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.0 * v5 + 0.0 * v6 + 0.103758415396 * v7 + 0.0 * v8 + 0.0693871347721 * v9 + 0.0947608986232 * v10 + 0.0385364104584 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0 * v16 + 0.0 * v19 + 0.0493851991676 * v20 + 0.105536728482 * v21 + 0.165747690084 * v22 + 0.0409265492022 * v23 + 0.0454752403263 * v24 + 0.183402491219 * v25 + 0.0 * v26 + 0.0 * v27 + 0.049632895862 * v28
def funcL_WM_105014(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -1.97312315687e-14 * 1 + 0.0 * v0 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.0 * v5 + -0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.0 * v9 + 0.0 * v10 + 0.0 * v11 + -0.0 * v12 + -0.0 * v13 + 0.0932171550171 * v15 + 0.305861386466 * v16 + 0.0 * v17 + 0.0348896144543 * v19 + 0.275714784198 * v21 + 0.179513357404 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v25 + -0.0 * v26 + -0.0 * v27 + 0.12303530295 * v28
def funcL_WM_105115(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 1.35543073911e-13 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.0402271361033 * v5 + 0.0 * v6 + 0.108326620231 * v7 + 0.0 * v8 + 0.275859786861 * v9 + 0.0 * v10 + 0.0 * v11 + 0.0282262417893 * v12 + 0.0 * v13 + 0.119795238089 * v15 + 0.0 * v16 + 0.00629639184716 * v17 + 0.0 * v18 + 0.213426057168 * v21 + 0.0 * v22 + 0.0637131560992 * v23 + 0.0347157608695 * v24 + 0.0639936158033 * v25 + 0.0 * v27 + 0.0 * v28
def funcL_WM_105216(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -7.75903703346e-14 * 1 + 0.0 * v0 + 0.075535310574 * v1 + -0.0 * v2 + 0.0 * v4 + 0.145946072197 * v5 + 0.164246679434 * v6 + 0.0 * v7 + 0.0 * v8 + 0.0 * v9 + 0.183599394721 * v11 + -0.0 * v12 + -0.0 * v13 + 0.0 * v14 + 0.0 * v16 + -0.0 * v17 + 0.0 * v18 + 0.0 * v19 + 0.0 * v20 + 0.147876721668 * v21 + 0.0 * v22 + 0.195368587692 * v23 + 0.0 * v24 + 0.0 * v25 + 0.0 * v27 + 0.00821036955314 * v28
def funcL_WM_105923(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -7.33605602247e-14 * 1 + 0.0 * v0 + 0.0349669645688 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.0752104590769 * v5 + 0.0 * v7 + 0.0 * v8 + 0.110557487059 * v9 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0 * v15 + 0.0 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0 * v19 + 0.0795082348141 * v20 + 0.365235181142 * v21 + 0.120697280052 * v22 + 0.0 * v23 + 0.131754346553 * v25 + 0.0 * v27 + 0.0169544656609 * v28
def funcL_WM_106016(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -3.18366392262e-14 * 1 + 0.0 * v0 + 0.0663111226123 * v2 + 0.0 * v3 + 0.0 * v4 + 0.10278247806 * v5 + 0.0 * v7 + 0.0 * v8 + 0.0256708621639 * v9 + 0.0 * v10 + 0.0877778898898 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0 * v15 + 0.0 * v16 + 0.169356972353 * v17 + 0.0 * v18 + 0.0 * v19 + 0.130182732374 * v20 + 0.0121056730249 * v21 + 0.0511597292502 * v22 + 0.0 * v23 + 0.0130261780452 * v24 + 0.0417676040925 * v25 + 0.300229383962 * v28
def funcL_WM_106319(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -6.49133469297e-14 * 1 + 0.122953375484 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.0838423798382 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.0917216107252 * v9 + 0.0 * v10 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.148504078333 * v15 + 0.0 * v17 + 0.0 * v18 + 0.137835578391 * v19 + 0.288345925862 * v20 + 0.0549643056839 * v21 + 0.0 * v22 + 0.0 * v23 + 0.0 * v25 + 0.0 * v26 + 0.0 * v27 + 0.0 * v28
def funcL_WM_106521(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 1.2585820309e-13 * 1 + 0.0590449116979 * v1 + 0.0 * v2 + 0.10406216207 * v4 + 0.0961311936793 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.0437762360771 * v9 + 0.0 * v10 + 0.189289804632 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v15 + 0.0 * v16 + 0.0 * v17 + 0.0 * v18 + 0.16614709374 * v20 + 0.170037598777 * v21 + 0.150424556547 * v22 + 0.0106102829209 * v23 + 0.0 * v24 + 0.0 * v25 + 0.0 * v26 + 0.0 * v27 + 0.0 * v28
def funcL_WM_107321(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -3.21755927994e-15 * 1 + 0.0 * v0 + 0.0 * v1 + -0.0 * v2 + -0.0 * v3 + 0.0 * v4 + 0.0 * v5 + -0.0 * v6 + -0.0 * v7 + 0.0632582949122 * v8 + -0.0 * v9 + 0.0 * v10 + 0.0189756233606 * v11 + -0.0 * v12 + -0.0 * v13 + 0.0 * v15 + 0.0 * v16 + 0.253214365267 * v17 + -0.0 * v18 + 0.0 * v19 + 0.0228953021471 * v20 + 0.0 * v21 + 0.562931125094 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v28
def funcL_WM_107422(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -5.59947611145e-14 * 1 + 0.0 * v0 + 0.21993107236 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0189483723719 * v4 + 0.0 * v5 + 0.0 * v7 + 0.0 * v8 + 0.0 * v9 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0325708423151 * v15 + 0.0 * v16 + 0.226888461711 * v17 + 0.0 * v18 + 0.0 * v19 + 0.00946862836848 * v20 + 0.0184402799475 * v21 + 0.105470112372 * v22 + 0.21369921147 * v23 + 0.0 * v24 + 0.0 * v27 + 0.0435220234836 * v28
def funcL_WM_108121(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 7.23923939812e-14 * 1 + 0.0 * v0 + 0.0316091560521 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0379299395791 * v4 + 0.284128068061 * v5 + 0.199192575007 * v6 + 0.0 * v7 + 0.0 * v8 + -0.0 * v10 + 0.0 * v11 + 0.0 * v12 + 0.0 * v15 + 0.0 * v16 + 0.0 * v17 + 0.0 * v18 + 0.126017053707 * v19 + 0.0964234849031 * v20 + 0.15624966013 * v21 + 0.0 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0236640411651 * v25 + 0.0 * v27 + 0.0467761797744 * v28
def funcL_WM_108323(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 2.49512451667e-13 * 1 + 0.0 * v0 + 0.0330147521331 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0101640395469 * v4 + 0.0 * v5 + 0.0 * v7 + 0.0 * v8 + 0.253213549329 * v9 + 0.0 * v10 + 0.0489321947874 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0253797309493 * v15 + 0.0384743177634 * v16 + 0.0508230363631 * v17 + 0.0 * v18 + 0.0 * v19 + 0.0 * v20 + 0.221295607782 * v21 + 0.0408801259459 * v22 + 0.0386342284653 * v23 + 0.0 * v25 + 0.0 * v27 + 0.269571091096 * v28
def funcL_WM_108525(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -3.85691746603e-14 * 1 + 0.0 * v0 + 0.0329591645677 * v1 + 0.0 * v2 + 0.0 * v3 + 0.00197283453879 * v4 + 0.247594000944 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.0 * v9 + 0.0 * v12 + 0.0 * v14 + 0.0 * v15 + 0.0 * v17 + 0.0 * v18 + 0.0 * v19 + 0.130095933808 * v20 + 0.237188777869 * v21 + 0.0 * v22 + 0.0 * v23 + 0.0 * v24 + 0.185542857473 * v25 + 0.0 * v26 + 0.0 * v27 + 0.0961776603019 * v28
def funcL_WM_108828(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -2.38826340961e-13 * 1 + 0.122514517531 * v1 + 0.0 * v2 + 0.0 * v3 + 0.122985891352 * v4 + 0.147732440831 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.113647211708 * v9 + 0.0 * v11 + 0.0 * v12 + 0.0321437842397 * v13 + 0.0 * v15 + 0.028222161484 * v16 + 0.00578554086157 * v17 + 0.0 * v18 + 0.0 * v19 + 0.0 * v20 + 0.263110243492 * v21 + 0.0752460504744 * v22 + 0.0 * v23 + 0.0 * v25 + 0.0 * v26 + 0.0524828073302 * v27 + 0.0 * v28
def funcL_WM_109123(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 7.11389851012e-14 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.0 * v5 + 0.0 * v7 + 0.0 * v8 + 0.0259507242811 * v9 + 0.0 * v10 + 0.243535691374 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0882816680672 * v15 + 0.0 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0204199331955 * v19 + 0.0 * v20 + 0.235175718291 * v21 + 0.172827941001 * v22 + 0.0 * v23 + 0.0 * v25 + 0.141557993669 * v28
def funcL_WM_109325(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 4.38242998377e-13 * 1 + 0.0342800192939 * v0 + 0.0 * v1 + 0.0 * v3 + 0.0982808833235 * v4 + 0.0 * v5 + 0.0 * v7 + 0.0 * v8 + 0.0541006444817 * v9 + 0.0 * v10 + 0.00589742221588 * v11 + 0.0 * v12 + 0.0226716549101 * v13 + 0.0 * v15 + 0.00914969288889 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0 * v19 + 0.03600852689 * v20 + 0.443192235401 * v21 + 0.15416747145 * v22 + 0.110331624343 * v24 + 0.0 * v25 + 0.0 * v26 + 0.0 * v27 + 0.0 * v28
def funcL_WM_110411(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -1.39819077349e-13 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0495735582553 * v2 + 0.0 * v3 + 0.0428023892802 * v4 + 0.0 * v5 + 0.256885780849 * v7 + 0.0 * v8 + 0.0 * v9 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0162470146724 * v14 + 0.0 * v15 + 0.0 * v17 + 0.0 * v18 + 0.0 * v19 + 0.105637286003 * v20 + 0.311100247341 * v21 + 0.150403368082 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v25 + 0.0 * v27 + 0.0 * v28
def funcL_WM_111312(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 2.68040950573e-14 * 1 + 0.0 * v0 + 0.0600805449007 * v1 + 0.0194090243591 * v2 + 0.0 * v3 + 0.0 * v4 + 0.0 * v5 + 0.0 * v8 + 0.214081894394 * v9 + 0.0 * v10 + 0.0351554554672 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0 * v15 + 0.026362785539 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0 * v19 + 0.0 * v20 + 0.237131722238 * v21 + 0.226118181816 * v22 + 0.0 * v24 + 0.136073746448 * v25 + 0.0 * v27 + 0.0 * v28
def funcL_WM_111413(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 7.16639937389e-14 * 1 + -0.0 * v0 + 0.0 * v1 + -0.0 * v2 + -0.0 * v3 + 0.0 * v4 + 0.0 * v5 + -0.262889530611 * v6 + 0.0 * v7 + -0.0 * v8 + 0.0 * v9 + -0.0 * v10 + -0.0 * v13 + 0.0 * v14 + 0.0200643214971 * v15 + -0.0895040126474 * v16 + 0.0 * v17 + -0.0 * v18 + 0.247299878599 * v20 + 0.0595791181758 * v21 + 0.300951491234 * v22 + -0.0 * v23 + -0.0 * v24 + 0.0 * v26 + 0.0 * v27 + 0.0 * v28
def funcL_WM_111514(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 6.05989125703e-14 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v4 + 0.237648008034 * v5 + 0.0 * v6 + 0.0919937336656 * v7 + 0.120190657794 * v9 + 0.0 * v10 + 0.0 * v11 + 0.0 * v13 + 0.0 * v14 + 0.0 * v15 + 0.0 * v17 + 0.0 * v18 + 0.0112772072631 * v19 + 0.158742275228 * v20 + 0.0407088181441 * v21 + 0.291770031132 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v25 + 0.0 * v26 + 0.0 * v27 + 0.0 * v28
def funcL_WM_111716(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 1.01524013079e-13 * 1 + 0.305023135846 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + -0.0 * v5 + 0.299307045886 * v6 + 0.0 * v8 + 0.0 * v9 + 0.0 * v10 + -0.0 * v11 + -0.0 * v14 + -0.173495746744 * v15 + 0.0 * v16 + 0.24742679182 * v17 + -0.0 * v18 + 0.0 * v19 + -0.0 * v20 + 0.0 * v21 + 0.185805008936 * v22 + 0.0 * v23 + 0.0 * v24 + 0.0 * v25 + -0.0 * v27 + 0.146258574159 * v28
def funcL_WM_113215(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -8.73717812898e-14 * 1 + 0.0303828681139 * v0 + 0.0136229365316 * v1 + 0.0 * v2 + 0.112813822255 * v3 + 0.0489868522717 * v4 + 0.0 * v5 + 0.0 * v7 + 0.0 * v8 + 0.0 * v9 + 0.0240474669251 * v10 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v15 + 0.0542592598896 * v16 + 0.0 * v17 + 0.0 * v18 + 0.16409794668 * v20 + 0.377026593003 * v21 + 0.0 * v22 + 0.0 * v23 + 0.0 * v24 + 0.025711725253 * v25 + 0.0 * v27 + 0.170556218897 * v28
def funcL_WM_113619(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 4.72188905638e-14 * 1 + 0.132091733118 * v1 + 0.0 * v2 + 0.0 * v3 + 0.0 * v4 + 0.29991000266 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.0354096067876 * v9 + 0.0 * v10 + 0.0 * v11 + 0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.0 * v15 + 0.0433511569709 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0932961724683 * v19 + 0.0 * v20 + 0.0549734630224 * v21 + 0.208817044814 * v22 + 0.0189850330395 * v25 + 0.0306566332134 * v27 + 0.0505106243963 * v28
def funcL_WM_113922(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return -2.12748381379e-13 * 1 + 0.00575011322871 * v0 + 0.129489825793 * v1 + 0.0 * v2 + 0.0 * v4 + 0.0 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.181731657864 * v9 + 0.00621074590425 * v11 + -0.0 * v12 + 0.0 * v13 + 0.0 * v14 + 0.246445837984 * v15 + 0.0 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0 * v19 + 0.0593708402951 * v20 + 0.219860367134 * v21 + 0.0 * v24 + 0.0 * v25 + 0.0 * v26 + 0.0 * v27 + 0.06680548719 * v28
def funcL_WM_114419(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29): return 1.25244153069e-13 * 1 + 0.0 * v0 + 0.0 * v1 + 0.0 * v2 + 0.0 * v3 + 0.234645054449 * v4 + 0.0 * v5 + 0.0 * v6 + 0.0 * v7 + 0.0 * v8 + 0.15936042648 * v9 + 0.0 * v11 + 0.0 * v13 + 0.0 * v14 + 0.0 * v15 + 0.00369367704254 * v16 + 0.0 * v17 + 0.0 * v18 + 0.0537063490266 * v19 + 0.0 * v20 + 0.287635247731 * v21 + 0.121291245414 * v22 + 0.0 * v25 + 0.0886786936407 * v26 + 0.0 * v27 + 0.0451721400509 * v28
funcs = [funcL_WM_100307,funcL_WM_100408,funcL_WM_101006,funcL_WM_101107,funcL_WM_101309,funcL_WM_101410,funcL_WM_101915,funcL_WM_102008,funcL_WM_102311,funcL_WM_102816,funcL_WM_103111,funcL_WM_103414,funcL_WM_103515,funcL_WM_103818,funcL_WM_104012,funcL_WM_104820,funcL_WM_105014,funcL_WM_105115,funcL_WM_105216,funcL_WM_105923,funcL_WM_106016,funcL_WM_106319,funcL_WM_106521,funcL_WM_107321,funcL_WM_107422,funcL_WM_108121,funcL_WM_108323,funcL_WM_108525,funcL_WM_108828,funcL_WM_109123,funcL_WM_109325,funcL_WM_110411,funcL_WM_111312,funcL_WM_111413,funcL_WM_111514,funcL_WM_111716,funcL_WM_113215,funcL_WM_113619,funcL_WM_113922,funcL_WM_114419,]
def getFuncs(): return funcs
| [
"JamesHughes89@Gmail.com"
] | JamesHughes89@Gmail.com |
e9e5e6e687c30ced5b9a3832b25c4729706fe8eb | ba7c2e1d5e90ac8f5e5bc3d9791230f3c53ce418 | /WaterSaver/settings.py | 790c4c418443ed3e7c0bc1f74b702a1ea105faea | [] | no_license | gurus848/WaterSaver-DjangoServer | 3c05c66f7c29b347fdfad3385dbd997c2d48ba0d | 8c66160eb12569190ecd7b27c2cb2c2d3664e211 | refs/heads/master | 2021-06-26T00:17:27.769243 | 2017-09-10T08:35:47 | 2017-09-10T08:35:47 | 103,015,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,337 | py | """
Django settings for WaterSaver project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gotcha'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'WaterSaver.apps.WaterSaverConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'WaterSaver.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'WaterSaver.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
'raspi': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'WS_Duplicate',
'USER': 'guru',
'PASSWORD': 'continental787',
'HOST': 'localhost',
'PORT': ''
}
}
DATABASE_ROUTERS = ['WaterSaver.routers.WatersaverDatabaseRouter']
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"gurus848@gmail.com"
] | gurus848@gmail.com |
ff1897f0e2b6d877bc8c04dcf518687c6a2f8c8b | 293c68c6774239f3dec355199187992ecd9f8ca8 | /Cloud Computing 2017/Spark/Question-2.py | 25c2c9e8cf2eb92cd92d189bd3cdea4b0ad62569 | [] | no_license | Sohone-Guo/The-Unversity-of-Sydney | 8ff1b3041a72e6020f44b0df59175bc7aefa6d75 | fec72310cb09db3a3fae1d575a037bd7d32cc92f | refs/heads/master | 2020-04-08T18:12:25.693092 | 2018-11-29T02:49:08 | 2018-11-29T02:49:08 | 159,597,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,185 | py | '''
spark-submit --num-executors 3 q2.py
'''
from pyspark import SparkContext
import numpy as np
import math
def measure_filter(record): # Selected the valid measurement
data = record.strip().split(',')
sample,FSC,SSC = data[0],data[1],data[2]
if FSC != 'FSC-A': # If this is a tilte
if int(FSC) >=0 and int(FSC)<=150000 and int(SSC)>=0 and int(SSC)<=150000:
return True
else:
return False
else:
return False
def extract_measurement_function(record): # data ---> (sample,(Ly6C,CD11b,SCA1))
data = record.strip().split(',')
sample,Ly6C,CD11b,SCA1 = data[0],data[11],data[7],data[6]
return (sample,(Ly6C,CD11b,SCA1))
def cluster_function(record): # Calculate the argmin distant
center = broad_cluster_center.value
data = [float(record[1][0]),float(record[1][1]),float(record[1][2])]
value = []
for i in center:
value.append((i[0]-data[0])**2+(i[1]-data[1])**2+(i[2]*data[2])**2)
cluster_number = value.index(min(value))
return (cluster_number,(float(record[1][0]),float(record[1][1]),float(record[1][2])))
def map_result(record):
center = broad_cluster_center.value
data = [float(record[1][0]),float(record[1][1]),float(record[1][2])]
value = []
for i in center:
value.append((i[0]-data[0])**2+(i[1]-data[1])**2+(i[2]*data[2])**2)
cluster_number = value.index(min(value))
return (cluster_number+1,1)
if __name__ == "__main__":
sc = SparkContext(appName="Question 2 for assignment 2")
''' Read Data '''
measurements = sc.textFile("/share/cytometry/large")
after_filter_measurement = measurements.filter(measure_filter).map(extract_measurement_function) # with valid data ---> (sample,(Ly6C,CD11b,SCA1))
''' Initial the cluster center '''
number_of_cluster = 5
initial_cluster = np.random.rand(number_of_cluster,3) # Random generate the center
broad_cluster_center = sc.broadcast(initial_cluster) # As broadcast
''' Cluster Learning '''
learning_time = 10
for num in range(learning_time): #Learning numbers
cluster_ini = after_filter_measurement.map(cluster_function)
new_cluster_center = cluster_ini.groupByKey().map(lambda x : (x[0], np.sum((np.asarray(list(x[1]))),axis=0)/len(np.asarray(list(x[1])))))
data = new_cluster_center.collect()
data_list_tp = []
for i in range(number_of_cluster):
for j in data:
if j[0] == i:
data_list_tp.append(j[1])
broad_cluster_center = sc.broadcast(data_list_tp)
''' Finished Learning '''
new_cluster_center_result = after_filter_measurement.map(map_result).repartition(1).reduceByKey(lambda before,after: int(before)+int(after)) # Give the data a cluster number
number_of_cluster = new_cluster_center_result.map(lambda x : (x[0],x[1],np.asarray(broad_cluster_center.value)[x[0]-1])).sortBy(lambda record: int(record[0]))
result = number_of_cluster.map(lambda record: str(record[0])+'\t'+str(record[1])+'\t'+str(record[2][0])+'\t'+str(record[2][1])+'\t'+str(record[2][2]))
result.repartition(1).saveAsTextFile("pyspark/q2")
| [
""
] | |
0eb8e8fed9f7fdf7bfc7d385c153922343e728db | b882fc60004324ad892edf2b93cac2b59c36dfc9 | /redturtle/sqlcontents/config.py | c1b91bdf3c73afa55f2f698b14f683b5b12ce29c | [] | no_license | RedTurtle/redturtle.sqlcontents | 7c31e5e6d9d9dc23f9dddd447a2282c7e4d0a1da | 91456a493c7b6c10568962e11d1c15e2256b1c83 | refs/heads/master | 2021-01-02T22:58:20.935910 | 2013-07-29T10:10:39 | 2013-07-29T10:10:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | """Common configuration constants
"""
PROJECTNAME = 'redturtle.sqlcontents'
ADD_PERMISSIONS = {
# -*- extra stuff goes here -*-
'SQLQuery': 'redturtle.sqlcontents: Add SQLQuery',
'SQLFolder': 'redturtle.sqlcontents: Add SQLFolder',
}
| [
"alessandro.pisa@redturtle.it"
] | alessandro.pisa@redturtle.it |
4496d5f07d39a193ef3fbfd8710da46756d19ecc | c62dbc5715fe80e106a666a8f7a6aeb051d0b40e | /analytical_solution.py | 016425a97e584c740f05ad933c74f8b757d5a4e2 | [] | no_license | mishaukr7/MM_LAB_5 | 14ebb2c8553cfb1f1b13293e6160294fb2684a9c | 610a623d1a63ddf0c231575c2b78c4fc1bb4a454 | refs/heads/master | 2021-08-23T15:16:34.096484 | 2017-12-05T09:03:46 | 2017-12-05T09:03:46 | 113,076,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | import math
def analytical_method_find_solution_free(t0, N0, r, T):
N = []
time = []
for t in range(t0, T+1):
N_new = N0*math.exp(r*(t-20))
N.append(N_new)
time.append(t)
return time, N
def analytical_method_find_solution_limited(t0, N0, r, k, T):
N = []
time = []
for t in range(t0, T):
N_new = (k * N0 * math.exp(r * (t - 20)))/(k + N0 * (math.exp(r * (t - 20)) - 1))
N.append(N_new)
time.append(t)
return time, N
| [
"mishaukr22@gmail.com"
] | mishaukr22@gmail.com |
f0e8094819090389a3da4a67e32863077e67c2a4 | 8f93ae4a6fa1a6a4330be9387009424606e39012 | /analyticResults_func.py | 698aae9ce68fe797523f5205163f41846d9abf2a | [] | no_license | RonTeichner/filtering_vs_smoothing | 541dd7bfa4da438ac53220808c803e5afa135792 | 1a4240cee890fe6b7b91f7b16ba3a21311d879bb | refs/heads/master | 2022-10-19T22:52:57.379666 | 2022-03-13T10:16:17 | 2022-03-13T10:16:17 | 248,265,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,265 | py | import numpy as np
import matplotlib.pyplot as plt
from filterpy.kalman import KalmanFilter, update, predict, batch_filter
from filterpy.common import Q_discrete_white_noise, kinematic_kf, Saver
from scipy.linalg import solve_discrete_are, solve_discrete_lyapunov
from scipy.spatial.distance import mahalanobis as scipy_mahalanobis
def calc_sigma_bar(H, R, F, Q):
F_t = F.transpose()
H_t = H.transpose()
d = F.shape[0]
sigma_bar = np.eye(d)
nIter = 0
factor = 1
while True:
nIter += 1
if nIter == 100000:
factor = 20000
print('calc_sigma_bar: factor changes')
a = np.matmul(H_t, np.matmul(sigma_bar, H)) + R
a_inv = np.linalg.inv(a)
b = np.matmul(sigma_bar, np.matmul(H, np.matmul(a_inv, np.matmul(H_t, sigma_bar))))
sigma_bar_new = np.matmul(F, np.matmul(sigma_bar - b, F_t)) + Q
sigma_bar_ratio = np.sum(np.abs(sigma_bar_new / sigma_bar)) / F.size
sigma_bar = sigma_bar_new
if np.abs(sigma_bar_ratio - 1) <= factor * np.finfo(np.float).resolution:
break
return sigma_bar
def calc_sigma_smoothing(sigma_bar, H, F, R):
H_t = H.transpose()
inv_mat = np.linalg.inv(np.matmul(H_t, np.matmul(sigma_bar, H)) + R)
K = np.matmul(F, np.matmul(sigma_bar, np.matmul(H, inv_mat)))
F_tilde = F - np.matmul(K, H_t)
F_tilde_t = F_tilde.transpose()
a = np.matmul(H_t, np.matmul(sigma_bar, H)) + R
a_inv = np.linalg.inv(a)
core = np.matmul(H, np.matmul(a_inv, H_t))
# i==j:
summand = 0
# start with s=k-j=(100-1)
sInitRange = 100
for s in range(sInitRange):
summand += np.matmul(np.linalg.matrix_power(F_tilde_t, s), np.matmul(core, np.linalg.matrix_power(F_tilde, s)))
sigma_j_k = sigma_bar - np.matmul(sigma_bar, np.matmul(summand, sigma_bar))
# continue while sigma_j_k changes:
s = sInitRange
nIter = 0
factor = 1
while True:
nIter += 1
if nIter == 100000:
factor = 20000
print('calc_sigma_smoothing: factor changes')
summand += np.matmul(np.linalg.matrix_power(F_tilde_t, s), np.matmul(core, np.linalg.matrix_power(F_tilde, s)))
sigma_j_k_new = sigma_bar - np.matmul(sigma_bar, np.matmul(summand, sigma_bar))
sigma_j_k_ratio = np.sum(np.abs(sigma_j_k_new / sigma_j_k)) / F.size
sigma_j_k = sigma_j_k_new
s += 1
if np.abs(sigma_j_k_ratio - 1) <= factor * np.finfo(np.float).resolution:
break
return sigma_j_k
def calc_analytic_values(F, H, std_process_noises, std_meas_noises, firstDimOnly):
F_t = F.transpose()
H_t = H.transpose()
d = F.shape[0]
deltaFS, E_filtering, E_smoothing = np.zeros((std_meas_noises.size, std_process_noises.size)), np.zeros((std_meas_noises.size, std_process_noises.size)), np.zeros((std_meas_noises.size, std_process_noises.size))
sigma_bar_all, sigma_j_k_all = np.zeros((std_meas_noises.size, std_process_noises.size, d, d)), np.zeros((std_meas_noises.size, std_process_noises.size, d, d))
i = 0
for pIdx, std_process_noise in enumerate(std_process_noises):
for mIdx, std_meas_noise in enumerate(std_meas_noises):
i += 1
if firstDimOnly:
Q = np.array([[np.power(std_process_noise, 2)]])
else:
Q = np.array([[np.power(std_process_noise, 2), 0], [0, 0]])
R = np.power(std_meas_noise, 2)
# print(f'eigenvalues of F are: {np.linalg.eig(F)[0]}')
sigma_bar = calc_sigma_bar(H, R, F, Q)
sigma_j_k = calc_sigma_smoothing(sigma_bar, H, F, R)
sigma_bar_all[mIdx, pIdx], sigma_j_k_all[mIdx, pIdx] = sigma_bar, sigma_j_k
E_f = np.trace(sigma_bar)
E_s = np.trace(sigma_j_k)
deltaFS[mIdx, pIdx] = (E_f - E_s) / (0.5*(E_f + E_s))
E_filtering[mIdx, pIdx], E_smoothing[mIdx, pIdx] = E_f, E_s
# print(f'deltaFS[pIdx, mIdx] = {deltaFS[pIdx, mIdx]}')
print(f'finished: {100*i/(std_process_noises.size * std_meas_noises.size)} %')
return deltaFS, E_filtering, E_smoothing, sigma_bar_all, sigma_j_k_all
def plot_analytic_figures(std_process_noises, std_meas_noises, deltaFS, E_filtering, E_smoothing, sigma_bar_all, sigma_j_k_all, enable_db_Axis, with_respect_to_processNoise=False):
d = sigma_bar_all.shape[-1]
if enable_db_Axis:
std_process_noises_dbm = 20*np.log10(std_process_noises) + 30 #/std_process_noises[0])
std_meas_noises_dbm = 20*np.log10(std_meas_noises) + 30 #/std_meas_noises[0])
X, Y = np.meshgrid(std_process_noises_dbm, std_meas_noises_dbm)
else:
X, Y = np.meshgrid(np.power(std_process_noises, 2), np.power(std_meas_noises, 2))
Z = deltaFS
fig, ax = plt.subplots()
CS = ax.contour(X, Y, Z)
ax.clabel(CS, inline=1, fontsize=10)
if enable_db_Axis:
ax.set_ylabel('meas noise [dbm]')
ax.set_xlabel('process noise [dbm]')
else:
ax.set_ylabel(r'$\sigma_v^2$')
ax.set_xlabel(r'$\sigma_\omega^2$')
ax.set_title(r'$\frac{tr(\Sigma^F)-tr(\Sigma^S)}{0.5(tr(\Sigma^F)+tr(\Sigma^S))}$')
#plt.show()
if not(with_respect_to_processNoise):
Z = 10*np.log10(E_filtering) + 30
#Z = Z - Z.max()
else:
Z = 10*np.log10(E_filtering/np.power(std_process_noises, 2))
fig, ax = plt.subplots()
CS = ax.contour(X, Y, Z)
ax.clabel(CS, inline=1, fontsize=10)
if enable_db_Axis:
ax.set_ylabel('meas noise [dbm]')
ax.set_xlabel('process noise [dbm]')
else:
ax.set_ylabel(r'$\sigma_v^2$')
ax.set_xlabel(r'$\sigma_\omega^2$')
if not (with_respect_to_processNoise):
ax.set_title(r'$tr(\Sigma^F)$ [dbm]')
plt.plot(std_process_noises_dbm, std_meas_noises_dbm, 'r--')
else:
ax.set_title(r'$tr(\Sigma^F)/\sigma_\omega^2$ [db]')
plt.grid(True)
#plt.show()
if not (with_respect_to_processNoise):
Z = 10*np.log10(E_smoothing) + 30
#Z = Z - Z.max()
else:
Z = 10 * np.log10(E_smoothing / np.power(std_process_noises, 2))
fig, ax = plt.subplots()
CS = ax.contour(X, Y, Z)
ax.clabel(CS, inline=1, fontsize=10)
if enable_db_Axis:
ax.set_ylabel('meas noise [dbm]')
ax.set_xlabel('process noise [dbm]')
else:
ax.set_ylabel(r'$\sigma_v^2$')
ax.set_xlabel(r'$\sigma_\omega^2$')
if not (with_respect_to_processNoise):
ax.set_title(r'$tr(\Sigma^S)$ [dbm]')
plt.plot(std_process_noises_dbm, std_meas_noises_dbm, 'r--')
else:
ax.set_title(r'$tr(\Sigma^S)/\sigma_\omega^2$ [db]')
plt.grid(True)
#plt.show()
'''
n_bins = 50
n, bins, patches = plt.hist(E_filtering.flatten(), n_bins, density=True, histtype='step', cumulative=False, label='Ef')
n, bins, patches = plt.hist(E_smoothing.flatten(), n_bins, density=True, histtype='step', cumulative=False, label='Es')
plt.title('Filtering & Smoothing hist')
plt.xlabel('trace values')
plt.legend()
plt.grid(True)
plt.show()
'''
for dIdx in range(d):
componentVarFiltering = sigma_bar_all[:, :, dIdx, dIdx]
componentVarSmoothing = sigma_j_k_all[:, :, dIdx, dIdx]
component_deltaFS = (componentVarFiltering - componentVarSmoothing) / (0.5*(componentVarFiltering + componentVarSmoothing))
Z = component_deltaFS
Z = Z - Z.max()
fig, ax = plt.subplots()
CS = ax.contour(X, Y, Z)
ax.clabel(CS, inline=1, fontsize=10)
if enable_db_Axis:
ax.set_ylabel('meas noise [dbm]')
ax.set_xlabel('process noise [dbm]')
else:
ax.set_ylabel(r'$\sigma_v^2$')
ax.set_xlabel(r'$\sigma_\omega^2$')
ax.set_title(r'component %d: $\frac{\Sigma^F(d,d)-\Sigma^S(d,d)}{0.5(\Sigma^F(d,d)+\Sigma^S(d,d))}$ (scaled)' % (dIdx + 1))
Z = 10 * np.log10(componentVarFiltering)
Z = Z - Z.max()
fig, ax = plt.subplots()
CS = ax.contour(X, Y, Z)
ax.clabel(CS, inline=1, fontsize=10)
if enable_db_Axis:
ax.set_ylabel('meas noise [dbm]')
ax.set_xlabel('process noise [dbm]')
else:
ax.set_ylabel(r'$\sigma_v^2$')
ax.set_xlabel(r'$\sigma_\omega^2$')
ax.set_title(r'component %d: $\Sigma^F(d,d)$ [db]' % (dIdx+1))
# plt.show()
Z = 10 * np.log10(componentVarSmoothing)
Z = Z - Z.max()
fig, ax = plt.subplots()
CS = ax.contour(X, Y, Z)
ax.clabel(CS, inline=1, fontsize=10)
if enable_db_Axis:
ax.set_ylabel('meas noise [dbm]')
ax.set_xlabel('process noise [dbm]')
else:
ax.set_ylabel(r'$\sigma_v^2$')
ax.set_xlabel(r'$\sigma_\omega^2$')
ax.set_title(r'component %d: $\Sigma^S(d,d)$ [db]' % (dIdx+1))
plt.show()
def steady_state_1d_filtering_err(processNoiseVar, eta, f):
arg = eta*(np.power(f, 2) - 1) + 1
errorVariance = 0.5*processNoiseVar*(arg + np.sqrt(np.power(arg, 2) + 4*eta)) # [W]
return errorVariance
def steady_state_1d_smoothing_err(processNoiseVar, eta, f):
gamma = steady_state_1d_filtering_err(processNoiseVar, eta, f) / (0.5*processNoiseVar)
errorVariance = 0.5*processNoiseVar*(gamma - (0.5*(0.5*gamma+eta)*np.power(gamma, 2))/(np.power(0.5*gamma+eta, 2) - np.power(f*eta, 2)))
return errorVariance
def steady_state_1d_Delta_FS(processNoiseVar, eta, f):
gamma = steady_state_1d_filtering_err(processNoiseVar, eta, f) / (0.5 * processNoiseVar)
arg = gamma * (0.5*gamma+eta) / (np.power(0.5*gamma+eta, 2) - np.power(f, 2)*np.power(eta, 2))
Delta_FS = arg / (2-0.5*arg)
return Delta_FS
def gen_1d_measurements(f, processNoiseVar, measurementNoiseVar, initState, N, unmodeledParamsDict = {}, enableUnmodeled = False):
# unmodeled behaviour:
unmodeledBehaiour = np.zeros((N, 1, 1))
if enableUnmodeled:
alpha, fs = unmodeledParamsDict['alpha'], unmodeledParamsDict['fs']
phi_0 = np.random.rand()*(2*np.pi)
unmodeledBehaiour[:, 0, 0] = np.sin(2*np.pi*f/fs*np.arange(0, N) + phi_0)
else:
alpha = 0
# generate state
x, z = np.zeros((N, 1, 1)), np.zeros((N, 1, 1))
modeldPower, unmodeledPower = np.zeros(N), np.zeros(N) # Watt
x[0] = initState
processNoises = np.sqrt(processNoiseVar) * np.random.randn(N)
measurementNoises = np.sqrt(measurementNoiseVar) * np.random.randn(N)
z[0] = x[0] + unmodeledBehaiour[0] + measurementNoises[0]
modeldPower[0], unmodeledPower[0] = np.power(x[0], 2), np.power(unmodeledBehaiour[0,0,0], 2)
for i in range(1, N):
x[i] = f*x[i-1] + processNoises[i]
z[i] = x[i] + alpha*unmodeledBehaiour[i] + measurementNoises[i]
modeldPower[i], unmodeledPower[i] = np.power(x[i, 0, 0], 2), np.power(alpha*unmodeledBehaiour[i, 0, 0], 2)
return x, z, modeldPower.mean(), unmodeledPower.mean()
def simVarEst(f, processNoiseVar, eta, unmodeledParamsDict = {}, enableUnmodeled = False):
nIter = 10
N = 10000
measurementNoiseVar = eta / processNoiseVar
x_err_array, x_err_s_array = np.array([]), np.array([])
for i in range(nIter):
k_filter = KalmanFilter(dim_x=1, dim_z=1)
x, z, meanModeledPower, meanUnmodeledPower = gen_1d_measurements(f, processNoiseVar, measurementNoiseVar, np.sqrt(k_filter.P) * np.random.randn(1, 1), N, unmodeledParamsDict, enableUnmodeled)
filterStateInit = np.sqrt(k_filter.P) * np.random.randn(1, 1) # 1D only!
k_filter.x = filterStateInit
k_filter.Q = processNoiseVar * np.ones((1, 1))
k_filter.R = measurementNoiseVar * np.ones((1, 1))
k_filter.H = np.ones((1, 1))
k_filter.F = f * np.ones((1, 1))
# run filter:
Fs = [k_filter.F for t in range(N)]
Hs = [k_filter.H for t in range(N)]
x_est, cov, _, _ = k_filter.batch_filter(z, update_first=False, Fs=Fs, Hs=Hs) # , saver=s)
x_est_s, _, _, _ = k_filter.rts_smoother(x_est, cov, Fs=Fs, Qs=None)
# x_est[k] has the estimation of x[k] given z[k]. so for compatability with Anderson we should propagate x_est:
x_est[1:] = k_filter.F * x_est[:-1]
'''
x_est, k_gain, x_err = np.zeros((N, 1, 1)), np.zeros((N, 1, 1)), np.zeros((N, 1, 1))
x_est[0] = filterStateInit
for k in range(1, N):
k_filter.predict()
k_filter.update(z[k-1])
x_est[k], k_gain[k] = k_filter.x, k_filter.K
'''
x_err = x - x_est
x_err_array = np.append(x_err_array, x_err[int(np.round(3 / 4 * N)):].squeeze())
x_err_s = x - x_est_s
x_err_s_array = np.append(x_err_s_array, x_err_s[int(np.round(3 / 8 * N)):int(np.round(5 / 8 * N))].squeeze())
'''
plt.plot(k_gain.squeeze()[1:])
plt.title('kalman gain')
plt.show()
'''
'''
plt.figure()
n_bins = 100
n, bins, patches = plt.hist(volt2dbW(np.abs(x_err_array)), n_bins, density=True, histtype='step', cumulative=True, label='hist')
plt.xlabel(r'$\sigma_e^2$ [dbW]')
plt.title(r'CDF of $\sigma_e^2$; f=%0.1f' % f)
plt.grid()
plt.show()
'''
return np.var(x_err_array), np.var(x_err_s_array), x_err_array, x_err_s_array, meanModeledPower, meanUnmodeledPower
def calcDeltaR(a, q):
dim_x = a.shape[0]
tildeR = np.zeros((dim_x, dim_x))
thr = 1e-20 * np.abs(a).max()
maxValAboveThr = True
k = 0
while maxValAboveThr:
a_k = np.linalg.matrix_power(a, k)
summed = np.dot(a_k, np.dot(q, np.transpose(a_k)))
tildeR = tildeR + summed
k+=1
if np.abs(summed).max() < thr:
break
return tildeR
def gen_measurements(F, H, Q, R, P, N):
dim_x, dim_z = F.shape[0], H.shape[0]
# generate state
x, z = np.zeros((N, dim_x, 1)), np.zeros((N, dim_z, 1))
x[0] = np.dot(np.linalg.cholesky(P), np.random.randn(dim_x, 1))
processNoises = np.expand_dims(np.dot(np.linalg.cholesky(Q), np.random.randn(dim_x, N)).transpose(), -1)
measurementNoises = np.expand_dims(np.dot(np.linalg.cholesky(R), np.random.randn(dim_z, N)).transpose(), -1)
for i in range(1, N):
x[i] = np.dot(F, x[i-1]) + processNoises[i-1]
z = np.matmul(H, x) + measurementNoises
return x, z
def unmodeledBehaviorSim(DeltaFirstSample, unmodeledNoiseVar, unmodeledNormalizedDecrasePerformanceMat, k_filter, N, tilde_z, filterStateInit, filter_P_init, tilde_x, nIter):
dim_x = k_filter.F.shape[0]
x_err_f_u_array, x_err_s_firstMeas_u_array, x_err_s_u_array = np.array([]), np.array([]), np.array([])
# add unmodeled behavior:
theoreticalFirstMeasImprove_u = np.trace(DeltaFirstSample) - unmodeledNoiseVar * np.trace(unmodeledNormalizedDecrasePerformanceMat)
for i in range(nIter):
s = np.matmul(k_filter.H, np.expand_dims(np.dot(np.linalg.cholesky(unmodeledNoiseVar * np.eye(dim_x)), np.random.randn(dim_x, N)).transpose(), -1))
z = tilde_z + s
# run filter on unmodeled measurement:
k_filter.x = filterStateInit.copy()
k_filter.P = filter_P_init.copy()
x_est_u, cov_u, x_est_f_u, _ = k_filter.batch_filter(zs=z, update_first=False)
x_est_s_u, _, _, _ = k_filter.rts_smoother(x_est_u, cov_u)
x_err_f_u = np.power(np.linalg.norm(tilde_x - x_est_f_u, axis=1), 2)
x_err_f_u_array = np.append(x_err_f_u_array, x_err_f_u[int(np.round(3 / 4 * N)):].squeeze())
x_err_s_u = np.power(np.linalg.norm(tilde_x - x_est_s_u, axis=1), 2)
x_err_s_u_array = np.append(x_err_s_u_array, x_err_s_u[int(np.round(3 / 8 * N)):int(np.round(5 / 8 * N))].squeeze())
x_err_firstMeas_u = np.power(np.linalg.norm(tilde_x - x_est_u, axis=1), 2)
x_err_s_firstMeas_u_array = np.append(x_err_s_firstMeas_u_array, x_err_firstMeas_u[int(np.round(3 / 4 * N)):].squeeze())
traceCovFiltering_u, traceCovSmoothing_u = np.mean(x_err_f_u_array), np.mean(x_err_s_u_array)
traceCovFirstMeas_u = np.mean(x_err_s_firstMeas_u_array)
firstMeasTraceImprovement_u = traceCovFiltering_u - traceCovFirstMeas_u
totalSmoothingImprovement_u = traceCovFiltering_u - traceCovSmoothing_u
return traceCovFiltering_u, traceCovSmoothing_u, traceCovFirstMeas_u, firstMeasTraceImprovement_u, theoreticalFirstMeasImprove_u, totalSmoothingImprovement_u
def calc_tildeE(tildeF, D_int, k, i, n):
tildeF_pow_n_minus_k = np.linalg.matrix_power(tildeF, n - k)
tildeF_pow_n_minus_i_minus_1 = np.linalg.matrix_power(tildeF, n - i - 1)
tildeE = np.matmul(tildeF_pow_n_minus_k.transpose(), np.matmul(D_int, tildeF_pow_n_minus_i_minus_1))
return tildeE
def calc_tildeD(tildeF, D_int, k, i, m, N):
dim_x = tildeF.shape[0]
thr = 1e-20 * np.abs(tildeF).max()
E_summed_m_to_inf = np.zeros((dim_x, dim_x))
n = m-1
while True:
n += 1
if n > N-1:
break
tmp = calc_tildeE(tildeF, D_int, k, i, n)
E_summed_m_to_inf = E_summed_m_to_inf + tmp
if np.abs(tmp).max() < thr:
break
return E_summed_m_to_inf
def calc_tildeB(tildeF, theoreticalBarSigma, D_int, k, i, N):
tildeF_pow_k_minus_i_minus_1 = np.linalg.matrix_power(tildeF, k - i - 1)
tildeB = tildeF_pow_k_minus_i_minus_1 - np.matmul(theoreticalBarSigma, calc_tildeD(tildeF, D_int, k, i, k, N))
return tildeB
def calc_tildeC(tildeF, theoreticalBarSigma, D_int, inv_F_Sigma, k, i, N):
tildeF_pow_i_minus_k = np.linalg.matrix_power(tildeF, i - k)
tildeC = np.matmul(theoreticalBarSigma, np.matmul(tildeF_pow_i_minus_k.transpose(), inv_F_Sigma) - calc_tildeD(tildeF, D_int, k, i, i+1, N))
return tildeC
def recursive_calc_smoothing_anderson(z, K, H, tildeF, F, theoreticalBarSigma): # Anderson's notations
# time index k is from 0 to z.shape[0]
N = z.shape[0]
inv_F_Sigma = np.linalg.inv(np.matmul(F, theoreticalBarSigma))
K_HT = np.matmul(K, H.transpose())
D_int = np.matmul(inv_F_Sigma, K_HT)
inv_tildeF = np.linalg.inv(tildeF)
x_dim = tildeF.shape[0]
z_dim = z.shape[1]
# filtering, inovations:
hat_x_k_plus_1_given_k = np.zeros((N, x_dim, 1))# hat_x_k_plus_1_given_k is in index [k+1]
bar_z_k = np.zeros((N, z_dim, 1))
hat_x_k_plus_1_given_k[0] = np.dot(K, z[0])
bar_z_k[0] = z[0]
for k in range(N-1):
hat_x_k_plus_1_given_k[k+1] = np.dot(tildeF, hat_x_k_plus_1_given_k[k]) + np.dot(K, z[k])
for k in range(N):
bar_z_k[k] = z[k] - np.dot(H.transpose(), hat_x_k_plus_1_given_k[k])
# smoothing:
hat_x_k_given_N = np.zeros((N, x_dim, 1))
Sint = np.matmul(np.linalg.inv(np.matmul(F, theoreticalBarSigma)), K)
thr = 1e-20 * np.abs(tildeF).max()
for k in range(N):
for i in range(k, N):
Ka_i_minus_k = np.matmul(theoreticalBarSigma, np.matmul(np.linalg.matrix_power(tildeF, i-k).transpose(), Sint))
if i > k:
hat_x_k_given_i = hat_x_k_given_i + np.dot(Ka_i_minus_k, bar_z_k[i])
else:
hat_x_k_given_i = hat_x_k_plus_1_given_k[k] + np.dot(Ka_i_minus_k, bar_z_k[i])
if np.abs(Ka_i_minus_k).max() < thr:
break
hat_x_k_given_N[k] = hat_x_k_given_i
return hat_x_k_plus_1_given_k, hat_x_k_given_N
def direct_calc_filtering(z, K, tildeF): # Anderson's notations
# time index k is from 0 to z.shape[0]
N = z.shape[0]
x_dim = tildeF.shape[0]
thr = 1e-20 * np.abs(tildeF).max()
x_est_f_direct_calc = np.zeros((N, x_dim, 1)) # x_est_f_direct_calc[k] has the estimation of x[k] given z[k-1]
for k in range(N-1):
for i in range(k+1):
tildeF_pow_i = np.linalg.matrix_power(tildeF, i)
tmp = np.matmul(tildeF_pow_i, np.matmul(K, z[k-i]))
x_est_f_direct_calc[k+1] = x_est_f_direct_calc[k+1] + tmp
if np.abs(tmp).max() < thr:
break
return x_est_f_direct_calc
def direct_calc_smoothing(z, K, H, tildeF, F, theoreticalBarSigma): # Anderson's notations
enable_B_C_expression_verification = True
# time index k is from 0 to z.shape[0]
N = z.shape[0]
inv_F_Sigma = np.linalg.inv(np.matmul(F, theoreticalBarSigma))
K_HT = np.matmul(K, H.transpose())
D_int = np.matmul(inv_F_Sigma, K_HT)
inv_tildeF = np.linalg.inv(tildeF)
x_dim = tildeF.shape[0]
y = np.matmul(K, z)
x_est_s_direct_calc = np.zeros((N, x_dim, 1))
if enable_B_C_expression_verification:
B_C_FirstExpression_max, tildeD_expression_max, tildeD_futureExpression_max, tildeBC_recursive_max, initB_max = np.zeros((N, N)), np.zeros((N, N)), np.zeros((N, N)), np.zeros((N, N)), np.zeros((N, N))
for k in range(N):
print(f'direct smoothing calc of time {k} out of {N}')
# past measurements:
past, future = np.zeros((x_dim, 1)), np.zeros((x_dim, 1))
for i in range(k):
#if not(np.mod(i,100)): print(f'direct smoothing calc of time {k} out of {N}: processing past measurement {i} out of {k}')
tildeB = calc_tildeB(tildeF, theoreticalBarSigma, D_int, k, i, N)
assert not(np.isnan(tildeB).any()), "tildeB is nan"
past = past + np.matmul(tildeB, y[i])
if enable_B_C_expression_verification:
# check expression that exists in shifted time-series:
B_C_FirstExpression_max[k, i] = np.abs(calc_tildeB(tildeF, theoreticalBarSigma, D_int, k, i, 10*N) - calc_tildeB(tildeF, theoreticalBarSigma, D_int, k+1, i+1, 10*N)).max()
tildeD_k_i_k = calc_tildeD(tildeF, D_int, k, i, k, 10*N)
tildeD_k_plus_1_i_plus_1_k_plus_1 = calc_tildeD(tildeF, D_int, k+1, i+1, k+1, 10*N)
tildeD_expression = tildeD_k_i_k - tildeD_k_plus_1_i_plus_1_k_plus_1
tildeD_expression_max[k, i] = np.abs(tildeD_expression).max()
tildeB_recursive = np.matmul(calc_tildeB(tildeF, theoreticalBarSigma, D_int, k, i, 10*N), tildeF) - calc_tildeB(tildeF, theoreticalBarSigma, D_int, k, i-1, 10*N)
tildeBC_recursive_max[k,i] = np.abs(tildeB_recursive).max()
if i == k-1:
initB = (np.eye(x_dim) - np.matmul(theoreticalBarSigma, calc_tildeD(tildeF, D_int, 0, -1, 0, 10*N))) - calc_tildeB(tildeF, theoreticalBarSigma, D_int, k, i, 10*N)
initB_max[k, i] = np.abs(initB).max()
for i in range(k, N):
#if not(np.mod(i,100)): print(f'direct smoothing calc of time {k} out of {N}: processing future measurement {i} out of {N}')
tildeC = calc_tildeC(tildeF, theoreticalBarSigma, D_int, inv_F_Sigma, k, i, N)
assert not (np.isnan(tildeC).any()), "tildeC is nan"
future = future + np.matmul(tildeC, y[i])
if enable_B_C_expression_verification:
# check expression that exists in shifted time-series:
B_C_FirstExpression_max[k, i] = np.abs(calc_tildeC(tildeF, theoreticalBarSigma, D_int, inv_F_Sigma, k, i, 10*N) - calc_tildeC(tildeF, theoreticalBarSigma, D_int, inv_F_Sigma, k+1, i+1, 10*N)).max()
tildeD_k_i_i_plus_1 = calc_tildeD(tildeF, D_int, k, i, i+1, 10 * N)
tildeD_k_plus_1_i_plus_1_i_plus_2 = calc_tildeD(tildeF, D_int, k+1, i+1, i+2, 10*N)
tildeD_futureExpression = tildeD_k_i_i_plus_1 - tildeD_k_plus_1_i_plus_1_i_plus_2
tildeD_futureExpression_max[k, i] = np.abs(tildeD_futureExpression).max()
if i == k:
tildeC_recursive = calc_tildeC(tildeF, theoreticalBarSigma, D_int, inv_F_Sigma, k, k, 10*N) - np.matmul(theoreticalBarSigma, inv_F_Sigma) + np.matmul(np.matmul(theoreticalBarSigma, np.matmul(tildeF.transpose(), np.linalg.inv(theoreticalBarSigma))), (np.eye(x_dim) - calc_tildeB(tildeF, theoreticalBarSigma, D_int, k, k-1, 10*N)))
else:
tildeC_recursive = calc_tildeC(tildeF, theoreticalBarSigma, D_int, inv_F_Sigma, k, i, 10*N) - np.matmul(theoreticalBarSigma, np.matmul(tildeF.transpose(), np.matmul(np.linalg.inv(theoreticalBarSigma), calc_tildeC(tildeF, theoreticalBarSigma, D_int, inv_F_Sigma, k, i-1, 10*N))))
tildeBC_recursive_max[k,i] = np.abs(tildeC_recursive).max()
x_est_s_direct_calc[k] = past + future
if enable_B_C_expression_verification:
plt.figure(figsize=(16,10))
plt.subplot(3,2,1)
plt.imshow(B_C_FirstExpression_max, cmap='viridis')
plt.colorbar()
plt.xlabel('i')
plt.ylabel('k')
plt.title(f'B C expressions for shifted time-series (1), max = {B_C_FirstExpression_max.max()}')
plt.subplot(3, 2, 2)
plt.imshow(tildeD_expression_max, cmap='viridis')
plt.colorbar()
plt.xlabel('i')
plt.ylabel('k')
plt.title(r'$max(|\tilde{D}_{k,i,k} - \tilde{D}_{k+1,i+1,k+1}|)\forall{k;i<k}$ maxVal=%f' % (tildeD_expression_max.max()))
plt.subplot(3, 2, 4)
plt.imshow(tildeD_futureExpression_max, cmap='viridis')
plt.colorbar()
plt.xlabel('i')
plt.ylabel('k')
plt.title(r'$max(|\tilde{D}_{k,i,i+1} - \tilde{D}_{k+1,i+1,i+2}|)\forall{k;i \geq k}$ maxVal=%f' % (tildeD_futureExpression_max.max()))
plt.subplot(3, 2, 5)
plt.imshow(tildeBC_recursive_max, cmap='viridis')
plt.colorbar()
plt.xlabel('i')
plt.ylabel('k')
plt.title(r'$max(|\tilde{B}_{k,i-1} - \tilde{B}_{k,i}\tilde{F}|)\forall{k;i \geq k}$; also for $\tilde{C}$ maxVal=%f' % (tildeBC_recursive_max.max()))
plt.show()
print(f'maxVal of initB: {initB_max.max()}')
return x_est_s_direct_calc
def direct_calc_smoothing_eq_startSmoothingFromAllMeas(z, K, H, tildeF, F, theoreticalBarSigma): # Anderson's notations
# time index k is from 0 to z.shape[0]
N = z.shape[0]
inv_F_Sigma = np.linalg.inv(np.matmul(F, theoreticalBarSigma))
inv_F_Sigma_mult_K = np.matmul(inv_F_Sigma, K)
K_HT = np.matmul(K, H.transpose())
D_int = np.matmul(inv_F_Sigma, K_HT)
inv_tildeF = np.linalg.inv(tildeF)
thr = 1e-20 * np.abs(tildeF).max()
x_dim = tildeF.shape[0]
x_est_s_direct_calc = np.zeros((N, x_dim, 1))
for k in range(N):
print(f'direct_calc_smoothing_eq_startSmoothingFromAllMeas: time {k} out of {N}')
# term 1:
term1 = np.zeros((x_dim, 1))
i = k
while True:
i -= 1
tildeF_pow_k_minus_i_minus_1 = np.linalg.matrix_power(tildeF, k - i - 1)
tmp = np.matmul(tildeF_pow_k_minus_i_minus_1, np.matmul(K, z[i]))
term1 = term1 + tmp
if i <= 0 or np.abs(tildeF_pow_k_minus_i_minus_1).max() < thr:
break
# term 2:
term2 = np.zeros((x_dim, 1))
i = k-1
while True:
i += 1
tildeF_pow_i_minus_k = np.linalg.matrix_power(tildeF, i-k)
K_a_i_minus_k = np.matmul(theoreticalBarSigma, np.matmul(tildeF_pow_i_minus_k.transpose(), inv_F_Sigma_mult_K))
tmp = np.matmul(K_a_i_minus_k, z[i])
term2 = term2 + tmp
if i == N-1 or np.abs(K_a_i_minus_k).max() < thr:
break
# term 3:
term3 = np.zeros((x_dim, 1))
n = k-1
while True:
n += 1
if n > N-1:
break
tildeF_pow_n_minus_k = np.linalg.matrix_power(tildeF, n-k)
K_a_n_minus_k = np.matmul(theoreticalBarSigma, np.matmul(tildeF_pow_n_minus_k.transpose(), inv_F_Sigma_mult_K))
chi_n = np.zeros((x_dim, 1))
#if n >= 1 and n < N:
i=n
while True:
i -= 1
if i < 0 or i > N-1:
break
tildeF_pow_n_minus_i_minus_1 = np.linalg.matrix_power(tildeF, n - i - 1)
tmp = np.matmul(tildeF_pow_n_minus_i_minus_1, np.matmul(K, z[i]))
chi_n = chi_n + tmp
if i <= 0 or np.abs(tildeF_pow_n_minus_i_minus_1).max() < thr:
break
tmp = np.matmul(K_a_n_minus_k, np.matmul(H.transpose(), chi_n))
term3 = term3 + tmp
if np.abs(K_a_n_minus_k).max() < thr:
break
x_est_s_direct_calc[k] = term1 + term2 - term3
return x_est_s_direct_calc
def simCovEst(F, H, processNoiseVar, measurementNoiseVar, enableTheoreticalResultsOnly, enableDirectVsRecursiveSmoothingDiffCheck):
enableSanityCheckOnShiftedTimeSeries = False
N = 300#10000
nIterUnmodeled = 20
uN = 30
if enableTheoreticalResultsOnly:
nIterUnmodeled = 1
dim_x, dim_z = F.shape[0], H.shape[1]
k_filter = KalmanFilter(dim_x=dim_x, dim_z=dim_z)
k_filter.Q = processNoiseVar * np.eye(dim_x)
k_filter.R = measurementNoiseVar * np.eye(dim_z)
k_filter.H = H.transpose()
k_filter.F = F
theoreticalBarSigma = solve_discrete_are(a=np.transpose(k_filter.F), b=np.transpose(k_filter.H), q=k_filter.Q, r=k_filter.R)
Ka_0 = np.dot(theoreticalBarSigma, np.dot(np.transpose(k_filter.H), np.linalg.inv(np.dot(k_filter.H, np.dot(theoreticalBarSigma, np.transpose(k_filter.H))) + k_filter.R)))# first smoothing gain
DeltaFirstSample = np.dot(Ka_0, np.dot(k_filter.H, theoreticalBarSigma))
steadyKalmanGain = np.dot(k_filter.F, Ka_0)
tildeF = k_filter.F - np.dot(steadyKalmanGain, k_filter.H)
theoreticalSmoothingFilteringDiff = solve_discrete_lyapunov(a=np.dot(theoreticalBarSigma, np.dot(np.transpose(tildeF), np.linalg.inv(theoreticalBarSigma))) , q=DeltaFirstSample)
theoreticalSmoothingSigma = theoreticalBarSigma - theoreticalSmoothingFilteringDiff
theoreticalFirstMeasImprove = np.trace(DeltaFirstSample)
KH_t = np.dot(steadyKalmanGain, k_filter.H)
tildeR = solve_discrete_lyapunov(a=tildeF, q=np.dot(KH_t, np.transpose(KH_t)))
tildeR_directSum = calcDeltaR(a=tildeF, q=np.dot(KH_t, np.transpose(KH_t)))
assert np.abs(tildeR_directSum - tildeR).max() < 1e-5
# check smoothing on a series that is shifted by a single time-instance equations:
inv_F_Sigma = np.linalg.inv(np.matmul(k_filter.F, theoreticalBarSigma))
K_HT = np.matmul(steadyKalmanGain, k_filter.H.transpose().transpose())
inv_F_Sigma_mult_K_HT = np.matmul(inv_F_Sigma, K_HT)
tildeR_directSum = calcDeltaR(a=tildeF.transpose(), q=inv_F_Sigma_mult_K_HT)
diff = tildeR_directSum - np.matmul(np.linalg.inv(tildeF).transpose(), np.matmul(tildeR_directSum, tildeF)) - inv_F_Sigma_mult_K_HT
#assert np.abs(diff).max() < 1e-5
Ka_0H_t = np.dot(Ka_0, k_filter.H)
unmodeledNormalizedDecrasePerformanceMat = np.dot(Ka_0H_t, np.dot(tildeR + np.eye(dim_x), np.transpose(Ka_0H_t))) - (np.dot(Ka_0H_t, tildeR) + np.dot(tildeR, np.transpose(Ka_0H_t)))
theoreticalThresholdUnmodeledNoiseVar = np.trace(DeltaFirstSample) / np.trace(unmodeledNormalizedDecrasePerformanceMat)
if theoreticalThresholdUnmodeledNoiseVar > 0:
unmodeledNoiseVarVec = np.logspace(np.log10(1e-2 * theoreticalThresholdUnmodeledNoiseVar), np.log10(10 * theoreticalThresholdUnmodeledNoiseVar), uN, base=10)
else:
unmodeledNoiseVarVec = np.logspace(np.log10(1e-2 * np.abs(theoreticalThresholdUnmodeledNoiseVar)), np.log10(10 * np.abs(theoreticalThresholdUnmodeledNoiseVar)), uN, base=10)
x_err_f_array, x_err_s_array, x_err_s_firstMeas_array = np.array([]), np.array([]), np.array([])
filter_P_init = k_filter.P.copy()
filterStateInit = np.dot(np.linalg.cholesky(filter_P_init), np.random.randn(dim_x, 1))
if enableTheoreticalResultsOnly:
enableDirectFormInvestigation = False
if enableDirectFormInvestigation:
# investigate the direct form:
thr = 1e-10 * np.abs(tildeF).max()
inv_F_Sigma = np.linalg.inv(np.matmul(k_filter.F, theoreticalBarSigma))
K_HT = np.matmul(steadyKalmanGain, k_filter.H.transpose().transpose())
D_int = np.matmul(inv_F_Sigma, K_HT)
tildeB_k_k_minus_1 = np.eye(dim_x) - np.matmul(theoreticalBarSigma, calc_tildeD(tildeF, D_int, 0, -1, 0, 100000))
eigenValues, eigenVectors = np.linalg.eig(tildeB_k_k_minus_1)
idx = eigenValues.argsort()[::-1]
Bw = eigenValues[idx]
Bv = eigenVectors[:, idx]
tildeB_k_k_minus_2 = np.matmul(tildeB_k_k_minus_1, tildeF)
eigenValues, eigenVectors = np.linalg.eig(tildeB_k_k_minus_2)
idx = eigenValues.argsort()[::-1]
Bw_2 = eigenValues[idx]
Bv_2 = eigenVectors[:, idx]
tildeB_k_k_minus_3 = np.matmul(tildeB_k_k_minus_2, tildeF)
eigenValues, eigenVectors = np.linalg.eig(tildeB_k_k_minus_3)
idx = eigenValues.argsort()[::-1]
Bw_3 = eigenValues[idx]
Bv_3 = eigenVectors[:, idx]
tildeB_k_k_minus_4 = np.matmul(tildeB_k_k_minus_3, tildeF)
eigenValues, eigenVectors = np.linalg.eig(tildeB_k_k_minus_4)
idx = eigenValues.argsort()[::-1]
Bw_4 = eigenValues[idx]
Bv_4 = eigenVectors[:, idx]
tildeB_k_k_minus_5 = np.matmul(tildeB_k_k_minus_4, tildeF)
eigenValues, eigenVectors = np.linalg.eig(tildeB_k_k_minus_5)
idx = eigenValues.argsort()[::-1]
Bw_5 = eigenValues[idx]
Bv_5 = eigenVectors[:, idx]
C_k_k = np.matmul(theoreticalBarSigma, inv_F_Sigma - np.matmul(tildeF.transpose(), np.matmul(np.linalg.inv(theoreticalBarSigma), np.eye(dim_x) - tildeB_k_k_minus_1)))
C_k_k_second_for_sanity = np.matmul(theoreticalBarSigma, inv_F_Sigma - np.matmul(tildeF.transpose(), calc_tildeD(tildeF, D_int, 0, -1, 0, 100000)))
assert np.abs(C_k_k_second_for_sanity - C_k_k).max() < thr, 'C_k_k problem'
eigenValues, eigenVectors = np.linalg.eig(C_k_k)
idx = eigenValues.argsort()[::-1]
Cw = eigenValues[idx]
Cv = eigenVectors[:, idx]
plt.figure()
origin = [0, 0]
plt.grid()
'''
maxVal = max(np.maximum(*np.abs([Bw, Cw])))
plt.xlim([-maxVal, maxVal])
plt.ylim([-maxVal, maxVal])
plt.quiver(*origin, *Bv[:, 0], angles='xy', scale_units='xy', scale=1 / np.abs(Bw[0]), color='g', label=r'$\tildeB_{k,k-1}$')
plt.quiver(*origin, *Bv[:, 1], angles='xy', scale_units='xy', scale=1 / np.abs(Bw[1]), color='g')
plt.quiver(*origin, *Cv[:, 0], angles='xy', scale_units='xy', scale=1 / np.abs(Cw[0]), color='b', label=r'$\tildeC_{k,k}$')
plt.quiver(*origin, *Cv[:, 1], angles='xy', scale_units='xy', scale=1 / np.abs(Cw[1]), color='b')
plt.title(r'Eigenvectors with $||v_i||_2=\lambda_i$')
'''
maxVal = 1
plt.xlim([-maxVal, maxVal])
plt.ylim([-maxVal, maxVal])
plt.quiver(*origin, *Bv[:, 0], angles='xy', scale_units='xy', scale=1, color='g', label=r'$\tildeB_{k,k-1}$')
plt.quiver(*origin, *Bv[:, 1], angles='xy', scale_units='xy', scale=1, color='g')
plt.quiver(*origin, *Cv[:, 0], angles='xy', scale_units='xy', scale=1, color='b', label=r'$\tildeC_{k,k}$')
plt.quiver(*origin, *Cv[:, 1], angles='xy', scale_units='xy', scale=1, color='b')
plt.title(r'Eigenvectors')
plt.legend()
plt.figure()
origin = [0, 0]
plt.grid()
maxVal = 1
plt.xlim([-maxVal, maxVal])
plt.ylim([-maxVal, maxVal])
plt.quiver(*origin, *Bv[:, 0], angles='xy', scale_units='xy', scale=1, color='g', label=r'$\tildeB_{k,k-1}$')
#plt.quiver(*origin, *Bv[:, 1], angles='xy', scale_units='xy', scale=1, color='g')
plt.quiver(*origin, *Bv_2[:, 0], angles='xy', scale_units='xy', scale=1, color='b', label=r'$\tildeB_{k,k-2}$')
#plt.quiver(*origin, *Bv_2[:, 1], angles='xy', scale_units='xy', scale=1, color='b')
plt.quiver(*origin, *Bv_3[:, 0], angles='xy', scale_units='xy', scale=1, color='r', label=r'$\tildeB_{k,k-3}$')
#plt.quiver(*origin, *Bv_3[:, 1], angles='xy', scale_units='xy', scale=1, color='r')
plt.quiver(*origin, *Bv_4[:, 0], angles='xy', scale_units='xy', scale=1, color='k', label=r'$\tildeB_{k,k-4}$')
plt.quiver(*origin, *Bv_5[:, 0], angles='xy', scale_units='xy', scale=1, color='m', label=r'$\tildeB_{k,k-5}$')
plt.legend()
plt.show()
if not enableTheoreticalResultsOnly:
enableFilterAdversarialInvestigation = True
tilde_x, tilde_z = gen_measurements(k_filter.F, k_filter.H, k_filter.Q, k_filter.R, k_filter.P, N)
enableFilterAdversarialInvestigation = True
if enableFilterAdversarialInvestigation:
# run the filter on adversarial optimal time-series
x=3
# run filter on modeled measurement:
k_filter.x = filterStateInit.copy()
k_filter.P = filter_P_init.copy()
x_est, cov, x_est_f, _ = k_filter.batch_filter(zs=tilde_z, update_first=False)
x_est_s, _, _, _ = k_filter.rts_smoother(x_est, cov)
# x_est[k] has the estimation of x[k] given z[k]. so for compatability with Anderson we should propagate x_est:
# x_est[1:] = k_filter.F * x_est[:-1]
# x_est_f is compatible with Anderson ==> x_est_f[k] has the estimation of x[k] given z[k-1]
if enableDirectVsRecursiveSmoothingDiffCheck:
# compare smoothing estimation to a direct (not recursive) calculation
x_est_f_direct_calc = direct_calc_filtering(tilde_z, steadyKalmanGain, tildeF)
# x_est_s_direct_calc_eq_startSmoothingFromAllMeas = direct_calc_smoothing_eq_startSmoothingFromAllMeas(tilde_z, steadyKalmanGain, k_filter.H.transpose(), tildeF, k_filter.F, theoreticalBarSigma)
x_est_s_direct_calc = direct_calc_smoothing(tilde_z, steadyKalmanGain, k_filter.H.transpose(), tildeF, k_filter.F, theoreticalBarSigma)
x_est_f_recursive_calc, x_est_s_recursive_calc = recursive_calc_smoothing_anderson(tilde_z, steadyKalmanGain, k_filter.H.transpose(), tildeF, k_filter.F, theoreticalBarSigma)
if enableSanityCheckOnShiftedTimeSeries:
# sanity check: direct calc on shifted time-series:
shifted_tilde_z = np.concatenate((np.random.rand(1, dim_z, 1), tilde_z[:-1]), axis=0) # shifted_tilde_z[k] = tilde_z[k-1]
x_est_s_direct_calc_on_shifted = direct_calc_smoothing(shifted_tilde_z, steadyKalmanGain, k_filter.H.transpose(), tildeF, k_filter.F, theoreticalBarSigma)
smoothing_shiftedDirect_direct_diff_energy = np.power(np.linalg.norm(x_est_s_direct_calc_on_shifted[1:] - x_est_s_direct_calc[:-1], axis=1), 2)
plt.figure()
plt.plot(smoothing_shiftedDirect_direct_diff_energy, label='DirectVsShiftedDirect')
plt.title(r'Smoothing: direct vs shiftedDirect diff')
plt.ylabel('W')
plt.legend()
plt.grid()
plt.show()
filtering_recursiveSimon_direct_diff_energy = watt2db(np.divide(np.power(np.linalg.norm(x_est_f_direct_calc - x_est_f, axis=1), 2), np.power(np.linalg.norm(x_est_f, axis=1), 2)))
filtering_recursiveAnderson_direct_diff_energy = watt2db(np.divide(np.power(np.linalg.norm(x_est_f_direct_calc - x_est_f_recursive_calc, axis=1), 2), np.power(np.linalg.norm(x_est_f, axis=1), 2)))
filtering_recursiveAnderson_recursiveSimon_diff_energy = watt2db(np.divide(np.power(np.linalg.norm(x_est_f - x_est_f_recursive_calc, axis=1), 2), np.power(np.linalg.norm(x_est_f, axis=1), 2)))
#smoothing_eq_startSmoothingFromAllMeas_recursiveSimon_direct_diff_energy = watt2db(np.divide(np.power(np.linalg.norm(x_est_s_direct_calc_eq_startSmoothingFromAllMeas - x_est_s, axis=1), 2), np.power(np.linalg.norm(x_est_s, axis=1), 2)))
#smoothing_eq_startSmoothingFromAllMeas_recursiveAnderson_direct_diff_energy = watt2db(np.divide(np.power(np.linalg.norm(x_est_s_direct_calc_eq_startSmoothingFromAllMeas - x_est_s_recursive_calc, axis=1), 2), np.power(np.linalg.norm(x_est_s, axis=1), 2)))
smoothing_eq_startSmoothingFromAllMeas_recursiveAnderson_recursiveSimon_diff_energy = watt2db(np.divide(np.power(np.linalg.norm(x_est_s - x_est_s_recursive_calc, axis=1), 2), np.power(np.linalg.norm(x_est_s, axis=1), 2)))
smoothing_recursiveSimon_direct_diff_energy = watt2db(np.divide(np.power(np.linalg.norm(x_est_s_direct_calc - x_est_s, axis=1), 2), np.power(np.linalg.norm(x_est_s, axis=1), 2)))
smoothing_recursiveAnderson_direct_diff_energy = watt2db(np.divide(np.power(np.linalg.norm(x_est_s_direct_calc - x_est_s_recursive_calc, axis=1), 2), np.power(np.linalg.norm(x_est_s, axis=1), 2)))
smoothing_recursiveAnderson_recursiveSimon_diff_energy = watt2db(np.divide(np.power(np.linalg.norm(x_est_s - x_est_s_recursive_calc, axis=1), 2), np.power(np.linalg.norm(x_est_s, axis=1), 2)))
plt.figure(figsize=(16, 8))
plt.subplot(3, 3, 1)
plt.plot(filtering_recursiveSimon_direct_diff_energy, label='DirectVsSimon')
plt.title(r'Filtering: direct vs recursive diff')
plt.ylabel('db')
plt.legend()
plt.grid()
plt.subplot(3, 3, 4)
plt.plot(filtering_recursiveAnderson_direct_diff_energy, label='DirectVsAnderson')
#plt.title(r'Filtering: direct vs recursive diff')
plt.ylabel('db')
plt.legend()
plt.grid()
plt.subplot(3, 3, 7)
plt.plot(filtering_recursiveAnderson_recursiveSimon_diff_energy, label='SimonVsAnderson')
#plt.title(r'Filtering: direct vs recursive diff')
plt.ylabel('db')
plt.legend()
plt.grid()
plt.subplot(3, 3, 2)
#plt.plot(smoothing_eq_startSmoothingFromAllMeas_recursiveSimon_direct_diff_energy, label='DirectVsSimon')
plt.title(r'Smoothing: eq_startSmoothingFromAllMeas vs recursive diff')
plt.ylabel('db')
plt.legend()
plt.grid()
plt.subplot(3, 3, 5)
#plt.plot(smoothing_eq_startSmoothingFromAllMeas_recursiveAnderson_direct_diff_energy, label='DirectVsAnderson')
plt.title(r'Smoothing: eq_startSmoothingFromAllMeas vs recursive diff')
plt.ylabel('db')
plt.legend()
plt.grid()
plt.subplot(3, 3, 8)
plt.plot(smoothing_eq_startSmoothingFromAllMeas_recursiveAnderson_recursiveSimon_diff_energy, label='SimonVsAnderson')
#plt.title(r'Smoothing: eq_startSmoothingFromAllMeas vs recursive diff')
plt.ylabel('db')
plt.legend()
plt.grid()
plt.subplot(3, 3, 3)
plt.plot(smoothing_recursiveSimon_direct_diff_energy, label='DirectVsSimon')
plt.title(r'Smoothing: direct vs recursive diff')
plt.ylabel('db')
plt.legend()
plt.grid()
plt.subplot(3, 3, 6)
plt.plot(smoothing_recursiveAnderson_direct_diff_energy, label='DirectVsAnderson')
#plt.title(r'Smoothing: direct vs recursive diff')
plt.ylabel('db')
plt.legend()
plt.grid()
plt.subplot(3, 3, 9)
plt.plot(smoothing_recursiveAnderson_recursiveSimon_diff_energy, label='SimonVsAnderson')
#plt.title(r'Smoothing: direct vs recursive diff')
plt.ylabel('db')
plt.legend()
plt.grid()
plt.show()
x_err_f = np.power(np.linalg.norm(tilde_x - x_est_f, axis=1), 2)
x_err_f_array = np.append(x_err_f_array, x_err_f[int(np.round(3 / 4 * N)):].squeeze())
x_err_s = np.power(np.linalg.norm(tilde_x - x_est_s, axis=1), 2)
x_err_s_array = np.append(x_err_s_array, x_err_s[int(np.round(3 / 8 * N)):int(np.round(5 / 8 * N))].squeeze())
x_err_firstMeas = np.power(np.linalg.norm(tilde_x - x_est, axis=1), 2)
x_err_s_firstMeas_array = np.append(x_err_s_firstMeas_array, x_err_firstMeas[int(np.round(3 / 4 * N)):].squeeze())
else:
tilde_x, tilde_z = 0, 0
traceCovFiltering, traceCovSmoothing = np.mean(x_err_f_array), np.mean(x_err_s_array)
theoreticalTraceCovFiltering, theoreticalTraceCovSmoothing = np.trace(theoreticalBarSigma), np.trace(theoreticalSmoothingSigma)
traceCovFirstMeas = np.mean(x_err_s_firstMeas_array)
firstMeasTraceImprovement = traceCovFiltering - traceCovFirstMeas
uN = unmodeledNoiseVarVec.shape[0]
traceCovFiltering_u, traceCovSmoothing_u, traceCovFirstMeas_u, firstMeasTraceImprovement_u, theoreticalFirstMeasImprove_u, totalSmoothingImprovement_u = np.zeros(uN), np.zeros(uN), np.zeros(uN), np.zeros(uN), np.zeros(uN), np.zeros(uN)
for uIdx, unmodeledNoiseVar in enumerate(unmodeledNoiseVarVec):
traceCovFiltering_u[uIdx], traceCovSmoothing_u[uIdx], traceCovFirstMeas_u[uIdx], firstMeasTraceImprovement_u[uIdx], theoreticalFirstMeasImprove_u[uIdx], totalSmoothingImprovement_u[uIdx] = unmodeledBehaviorSim(DeltaFirstSample, unmodeledNoiseVar, unmodeledNormalizedDecrasePerformanceMat, k_filter, N, tilde_z, filterStateInit, filter_P_init, tilde_x, nIterUnmodeled)
print(f'finished unmodeled var no. {uIdx} out of {unmodeledNoiseVarVec.shape[0]}')
return traceCovFiltering, traceCovSmoothing, theoreticalTraceCovFiltering, theoreticalTraceCovSmoothing, theoreticalThresholdUnmodeledNoiseVar, unmodeledNoiseVarVec, firstMeasTraceImprovement, theoreticalFirstMeasImprove, firstMeasTraceImprovement_u, theoreticalFirstMeasImprove_u, totalSmoothingImprovement_u
def dbm2var(x_dbm):
return np.power(10, np.divide(x_dbm - 30, 10))
def volt2dbm(x_volt):
return 10*np.log10(np.power(x_volt, 2)) + 30
def volt2dbW(x_volt):
return 10*np.log10(np.power(x_volt, 2))
def volt2db(x_volt):
return 10*np.log10(np.power(x_volt, 2))
def watt2dbm(x_volt):
return 10*np.log10(x_volt) + 30
def watt2db(x_volt):
return 10*np.log10(x_volt) | [
"ron.teichner@campus.technion.ac.il"
] | ron.teichner@campus.technion.ac.il |
4b2fa6673d63d5e719510a8281c35d5055a55f66 | b3d552675b36cb88a1388fcfc531e497ad7cbee9 | /qfpython/apps/news/templatetags/news_filters.py | 3a666825994e57a123163079c2f8ecd8013170d7 | [
"LicenseRef-scancode-mulanpsl-1.0-en",
"MulanPSL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | gaohj/1902_django | 3cea1f0935fd983f25c6fd832b103ac5165a2e30 | 822af7b42120c6edc699bf97c800887ff84f5621 | refs/heads/master | 2022-12-11T10:02:50.233398 | 2019-11-26T08:33:38 | 2019-11-26T08:33:38 | 209,241,390 | 2 | 0 | null | 2022-12-08T07:28:24 | 2019-09-18T07:05:48 | Python | UTF-8 | Python | false | false | 957 | py | from datetime import datetime
from django import template
from django.utils.timezone import now as now_func,localtime
register = template.Library()
@register.filter
def time_since(value):
if not isinstance(value,datetime):
return value
now = now_func()
timestamp = (now-value).total_seconds()
if timestamp < 60:
return '刚刚'
elif timestamp >=60 and timestamp < 60*60:
minitues = int(timestamp/60)
return '%s分钟前'% minitues
elif timestamp >=60*60 and timestamp < 60*60*24:
hours = int(timestamp/3600)
return '%s小时前'% hours
elif timestamp >=60*60*24 and timestamp < 60*60*24*30:
days = int(timestamp/3600*24)
return '%s天前'% days
else:
return value.strftime('%Y/%m/%d %H:%M')
@register.filter
def time_format(value):
if not isinstance(value,datetime):
return value
return localtime(value).strftime('%Y/%m/%d %H:%M:%S') | [
"gaohj@163.com"
] | gaohj@163.com |
275ef639bc3efd7cae47bd89b1b51f5ce0d524aa | 07798124d82a1f6fc86bfe9c71c8ed7a0d8f8988 | /CodeSignal/firstDuplicate.py | 30a561833c76f2c4dad9efa659b4a55722d5fb10 | [] | no_license | ksjksjwin/practice-coding-problem | 84ced9e5cf65978a8193fa3b21956595491fb465 | 1e692ae6fcc154f173018f5bf274154a949c9ed7 | refs/heads/master | 2022-12-03T02:31:02.715835 | 2020-08-25T21:49:30 | 2020-08-25T21:49:30 | 262,863,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | '''
Given an array a that contains only numbers in the range from 1 to a.length, find the first duplicate number for which the second occurrence has the minimal index. In other words, if there are more than 1 duplicated numbers, return the number for which the second occurrence has a smaller index than the second occurrence of the other number does. If there are no such elements, return -1.
Example
For a = [2, 1, 3, 5, 3, 2], the output should be firstDuplicate(a) = 3.
There are 2 duplicates: numbers 2 and 3. The second occurrence of 3 has a smaller index than the second occurrence of 2 does, so the answer is 3.
For a = [2, 2], the output should be firstDuplicate(a) = 2;
For a = [2, 4, 3, 5, 1], the output should be firstDuplicate(a) = -1.
Copyright to © 2020 BrainFights Inc. All rights reserved
'''
def firstDuplicate(a):
'''
Use linear search algorithm
Linear search algorithm with a 'set' is faster than using 'list' .in method
'''
temp_set = set()
for number in a:
if number not in temp_set:
temp_set.add(number)
else:
return number
return -1
'''
index_distance = 10000
res_num = 0
for i in range(len(a)):
for j in range(i+1,len(a)):
if (a[i] == a[j]) and (j - i < index_distance):
index_distance = j - i
res_num = a[i]
if res_num == 0:
res_num = -1
return res_num
'''
| [
"ksjksjwin@gmail.com"
] | ksjksjwin@gmail.com |
54340785999a0d6852c8fbb31d023e36b67ae999 | aedd30e4f4c230722ada171ed11c22e693324f3c | /LDay_100_SpiralMatrix.py | 972cf2c7034d08860f1f18e50195b135a81911e3 | [] | no_license | loetcodes/100-Days-of-Code---PythonChallenge-01 | 173afb7241bfd2c8f43fde0bbe072c597b0fb756 | b186bfa0ba74bd72edcd7ef8065f026c579b0cc4 | refs/heads/master | 2021-07-21T03:59:51.806004 | 2020-06-01T23:36:47 | 2020-06-01T23:36:47 | 176,126,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,734 | py | """
Day 100 - Spiral Matrix
Leetcode - Medium
Given a matrix of m x n elements (m rows, n columns), return all elements of the matrix in spiral order.
Example 1:
Input:
[
[ 1, 2, 3 ],
[ 4, 5, 6 ],
[ 7, 8, 9 ]
]
Output: [1,2,3,6,9,8,7,4,5]
Example 2:
Input:
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9,10,11,12]
]
Output: [1,2,3,4,8,12,11,10,9,5,6,7]
"""
# My solution
class Solution:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
# Check if the matrix is empty or contains only 1 row
if len(matrix) == 0:
return []
elif len(matrix) == 1:
return matrix[0]
#Check if matrix contains only 1 column
if len(matrix[0]) <= 1:
return [item[0] for item in matrix]
matrix_i = [item for item in matrix]
first_row, last_row = matrix_i[0], matrix_i[-1][::-1]
first_col, last_col, mid_matrix = [], [], []
if len(matrix_i) > 2:
first_col, last_col = [], []
for row in range(1, len(matrix_i) - 1):
first_col.append(matrix_i[row][0])
last_col.append(matrix_i[row][-1])
if len(matrix_i[row]) > 2:
mid_matrix.append(list(matrix_i[row][1:-1]))
result = first_row + last_col + last_row + first_col[::-1] + self.spiralOrder(mid_matrix)
return result
class Solution2:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
if not matrix: return None
res=[]
while matrix:
res.extend([i for i in matrix.pop(0)])
matrix=list(zip(*matrix))[::-1]
print("Matrix is nw", matrix)
return res
if __name__ == '__main__': | [
"46016065+lulets@users.noreply.github.com"
] | 46016065+lulets@users.noreply.github.com |
156130cd7d52ce78d3ffe0cfb0f1316f7548cdbf | d125c002a6447c3f14022b786b07712a7f5b4974 | /tests/functional/intfunc/math/test_ceil_01.py | 97611af9f2cd0d2d47f57bfbf85d1844845159dc | [
"MIT"
] | permissive | FirebirdSQL/firebird-qa | 89d5b0035071f9f69d1c869997afff60c005fca9 | cae18186f8c31511a7f68248b20f03be2f0b97c6 | refs/heads/master | 2023-08-03T02:14:36.302876 | 2023-07-31T23:02:56 | 2023-07-31T23:02:56 | 295,681,819 | 3 | 2 | MIT | 2023-06-16T10:05:55 | 2020-09-15T09:41:22 | Python | UTF-8 | Python | false | false | 707 | py | #coding:utf-8
"""
ID: intfunc.math.ceil
TITLE: CEIL( <number>)
DESCRIPTION:
Returns a value representing the smallest integer that is greater than or equal to the input argument.
FBTEST: functional.intfunc.math.ceil_01
"""
import pytest
from firebird.qa import *
db = db_factory()
test_script = """select CEIL( 2.1) from rdb$database;
select CEIL( -2.1) from rdb$database;
"""
act = isql_act('db', test_script)
expected_stdout = """
CEIL
=====================
3
CEIL
=====================
-2
"""
@pytest.mark.version('>=3')
def test_1(act: Action):
act.expected_stdout = expected_stdout
act.execute()
assert act.clean_stdout == act.clean_expected_stdout
| [
"pcisar@ibphoenix.cz"
] | pcisar@ibphoenix.cz |
e4e3718ba742d3e3651c307b8f760f019146916f | d6be5892131194e6ce46f09bc1bdab5f7f0b3d07 | /Week07/word_search_II.py | 693869d2a4a541336c9a5acdb5950b0838cda268 | [] | no_license | twoknowapes/algorithm010 | d2ba47084835d5bc800c38bdd08d27f886c21662 | 4686956781982fe8c81d37db6fbefec5ed77157a | refs/heads/master | 2022-12-04T03:04:17.107552 | 2020-08-21T09:38:38 | 2020-08-21T09:38:38 | 270,298,274 | 0 | 0 | null | 2020-06-07T12:23:18 | 2020-06-07T12:23:17 | null | UTF-8 | Python | false | false | 2,431 | py | import collections
from typing import List
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
END_OF_WORD = '#'
class Solution:
def findWords(self, board: List[List[str]], words: List[str]) -> List[str]:
if not board or not board[0]: return []
if not words: return []
self.res = set()
# 构建 Trie
root = collections.defaultdict()
for word in words:
node = root
for char in word:
node = node.setdefault(char, collections.defaultdict())
node[END_OF_WORD] = END_OF_WORD
self.m, self.n = len(board), len(board[0])
for i in range(self.m):
for j in range(self.n):
if board[i][j] in root:
self._dfs(board, i, j, '', root)
return list(self.res)
def _dfs(self, board, i, j, cur_word, cur_dict):
cur_word += board[i][j]
cur_dict = cur_dict[board[i][j]]
if END_OF_WORD in cur_dict:
self.res.add(cur_word)
tmp, board[i][j] = board[i][j], '@'
for k in range(4):
x, y = i + dx[k], j + dy[k]
if 0 <= x < self.m \
and 0 <= y < self.n \
and board[x][y] != '@' \
and board[x][y] in cur_dict:
self._dfs(board, x, y, cur_word, cur_dict)
board[i][j] = tmp
def findWords(self, board: List[List[str]], words: List[str]) -> List[str]:
# 构建 Trie
trie = {}
for word in words:
node = trie
for char in word:
node = node.setdefault(char, {})
node['#'] = True
def search(i, j, node, pre, visited):
if '#' in node:
res.add(pre)
for di, dj in ((-1, 0), (1, 0), (0, -1), (0, 1)):
_i, _j = i + di, j + dj
if -1 < _i < h \
and -1 < _j < w \
and board[_i][_j] in node \
and (_i, _j) not in visited:
search(_i, _j,
node[board[_i][_j]],
pre + board[_i][_j],
visited | {(_i, _j)})
res, h, w = set(), len(board), len(board[0])
for i in range(h):
for j in range(w):
if board[i][j] in trie:
search(i, j,
trie[board[i][j]],
board[i][j],
{(i, j)})
return list(res)
| [
"1007845131@qq.com"
] | 1007845131@qq.com |
ac5d720e1bfc20f5f698a3d339518a31eda1c75f | 14357a24d1db7e14e1bc6eb5c98ff85b50292257 | /文件/python-100-days练习/@property@setter.py | ba5ff41c4004affe8ff078e5b2f92abc436627ed | [] | no_license | wenxin8/Lenrning | 5b08cb814517fc0706c43fefae2c127ee2ec604b | a0a10c8d94773417e8363caed5dae7306de72e5c | refs/heads/master | 2021-04-20T07:43:24.378949 | 2020-05-04T13:44:36 | 2020-05-04T13:44:36 | 249,666,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 30 17:28:34 2020
@author: zhong
"""
class Student:
def __init__(self, name, age):
self.__name = name
self.__age = age
@property # 访问器
def name(self):
return self.__name
@name.setter # 修改器
def name(self, name):
self.__name = name
@property
def age(self):
return self.__age
@age.setter
def age(self, age):
self.__age = age
def main():
a = Student("张三", 25)
print(a.name, a.age)
a.name = "李四"
a.age = 30
print(a.name, a.age)
main() | [
"62378308+wenxin8@users.noreply.github.com"
] | 62378308+wenxin8@users.noreply.github.com |
031c7116657578383ab8dc215dca02a632155237 | 6b63e7c602588ad7f607cf5f1d1ca3eef6b3d54a | /Week_01/[26]删除排序数组中的重复项.py | d494f66852a41f478e0cec0f3ee39ae7f1d6c794 | [] | no_license | Jnewgeek/-algorithm015 | 5e497d89e0b51d39721e8f16955a3a25821d5ce5 | 8f7f1cfbe9f750a70c94c765ca22477f133f3a38 | refs/heads/master | 2022-12-31T13:50:46.785824 | 2020-10-26T13:48:11 | 2020-10-26T13:48:11 | 289,710,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,954 | py | # 给定一个排序数组,你需要在 原地 删除重复出现的元素,使得每个元素只出现一次,返回移除后数组的新长度。
#
# 不要使用额外的数组空间,你必须在 原地 修改输入数组 并在使用 O(1) 额外空间的条件下完成。
#
#
#
# 示例 1:
#
# 给定数组 nums = [1,1,2],
#
# 函数应该返回新的长度 2, 并且原数组 nums 的前两个元素被修改为 1, 2。
#
# 你不需要考虑数组中超出新长度后面的元素。
#
# 示例 2:
#
# 给定 nums = [0,0,1,1,1,2,2,3,3,4],
#
# 函数应该返回新的长度 5, 并且原数组 nums 的前五个元素被修改为 0, 1, 2, 3, 4。
#
# 你不需要考虑数组中超出新长度后面的元素。
#
#
#
#
# 说明:
#
# 为什么返回数值是整数,但输出的答案是数组呢?
#
# 请注意,输入数组是以「引用」方式传递的,这意味着在函数里修改输入数组对于调用者是可见的。
#
# 你可以想象内部操作如下:
#
# // nums 是以“引用”方式传递的。也就是说,不对实参做任何拷贝
# int len = removeDuplicates(nums);
#
# // 在函数里修改输入数组对于调用者是可见的。
# // 根据你的函数返回的长度, 它会打印出数组中该长度范围内的所有元素。
# for (int i = 0; i < len; i++) {
# print(nums[i]);
# }
#
# Related Topics 数组 双指针
# 👍 1597 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
if not nums or len(nums)==1:
return
i=0
for j in range(len(nums)):
if nums[j]!=nums[i]:
i+=1
nums[i]=nums[j]
# nums[:]=nums[:i+1]
for _ in range(i+1,len(nums)):
nums.pop()
return i+1
# leetcode submit region end(Prohibit modification and deletion)
| [
"jiangjun1102@hotmail.com"
] | jiangjun1102@hotmail.com |
67f12dfd5286bc7e93edff337e196619dbd993d3 | 4a293d52ad10b23e50fd6fa6cc27eef1cb8b8abc | /config.py | ac3459c4c05c3107d3326aca8789247f6c89ffae | [
"MIT"
] | permissive | tws0002/Saya | ef9aef951ab4417f99860577bb5448b73bcd4a06 | a8cb1ab8bd0a66545746095b50efd359469488b9 | refs/heads/develop | 2021-06-18T19:52:40.056919 | 2017-06-23T17:08:30 | 2017-06-23T17:08:30 | 104,034,300 | 1 | 0 | null | 2017-09-19T06:16:06 | 2017-09-19T06:16:06 | null | UTF-8 | Python | false | false | 1,397 | py | import yaml
import os
import platform
_CURRENTPATH = os.path.dirname(os.path.realpath(__file__))
def getLauncherConfig():
if os.environ.get('SAYA_CONFIG_PATH'):
config_file = os.path.join(os.environ.get('SAYA_CONFIG_PATH'), 'saya.yaml')
f = open(config_file, 'r')
CONFIG = yaml.load(f)
else:
f = open(os.path.join(_CURRENTPATH, 'config', 'saya.yaml'), 'r')
CONFIG = yaml.load(f)
print "\n[[ LOADING ]] :: Loading launcher config data."
print CONFIG
return CONFIG
def getUserConfig():
if os.environ.get('SAYA_USER_CONFIG_PATH'):
config_file = os.path.join(os.environ.get('SAYA_USER_CONFIG_PATH'), 'saya_user.yaml')
f = open(config_file, 'r')
CONFIG = yaml.laod(f)
else:
if platform.system() == 'Windows':
path = os.environ.get('APPDATA')
elif platform.system() == 'Linux' or 'Mac':
path = os.environ.get('HOME')
f = open(os.path.join(path, 'saya_user.yaml'), 'r')
CONFIG = yaml.load(f)
print "\n[[ LOADING ]] :: Loading Preset config data."
print CONFIG
return CONFIG
def parseUserData(data):
for i in range(len(data)):
project = data[i].get('project')
application = data[i].get('application')
version = data[i].get('version')
option = data[i].get('option')
def writeUserConfig():
pass | [
"auratus.lemma@gmail.com"
] | auratus.lemma@gmail.com |
ede7da990163194be761b4be64835d394d704b58 | 8a6f0aa713ee5992fa9462c3dac0666d329c61b2 | /src/virlHost/models.py | d2791066179480756f51328759c87775dbe1af4e | [] | no_license | titopluto/vlabs-server | 6632e2f49b097d688989465264972976734f1d2e | 830cc86d8aaf32ee6339e57a0ed6007688e7ccf3 | refs/heads/master | 2022-05-25T02:03:35.881734 | 2019-07-21T18:48:40 | 2019-07-21T18:48:40 | 192,233,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,372 | py | from random import randint
import datetime
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from virtualLab.models import VirtualLab
from simulation.models import Simulation
from simulation.constants import LAB_PER_VIRL, MAX_BAD_FLAG
LAB_PER_VIRL = LAB_PER_VIRL
# Create your models here.
class VirlHostQuerySet(models.query.QuerySet):
def not_assigned(self):
return self.filter(busy=False, online=True, usage__lt=LAB_PER_VIRL,
bad_flag__lte=MAX_BAD_FLAG)
class VirlHostManager(models.Manager):
def get_queryset(self):
return VirlHostQuerySet(self.model, using=self._db)
def random(self):
# count = self.aggregate.not_assigned()(count=Count('id'))['count']
# count = self.all().not_assigned().count()
# random_index = randint(0, count - 1)
# print (self.all().not_assigned())
# return self.all().not_assigned()[random_index]
# count = self.all().not_assigned().order_by('usage','last_action_time').count()
# random_index = randint(0, 1) if count > 0 else 0
return self.all().not_assigned().order_by('usage', 'last_action_time', 'bad_flag').first()
def less_busy(self):
return self.all().not_assigned().order_by('usage')[0]
class VirlHost(models.Model):
ip_address = models.GenericIPAddressField(_('IP-Address of VIRL Host'), unique=True)
# current_lab = models.ForeignKey(VirtualLab, related_name="assigned_lab", on_delete=models.CASCADE, blank=True, null=True)
users = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name="virl_user", blank=True)
simulation = models.ManyToManyField(Simulation, blank=True)
busy = models.BooleanField(default=False)
usage = models.IntegerField(default=0)
online = models.BooleanField(default=False)
bad_flag = models.IntegerField(default=0)
# last_op_time = models.DateTimeField()
last_action_time = models.DateTimeField(auto_now=True)
objects = VirlHostManager()
def __str__(self):
return self.ip_address
@property
def simulations(self):
return "#".join([str(sim.admin_display) for sim in self.simulation.all()])
@property
def users_list(self):
return "#".join([user.username for user in self.users.all()])
| [
"b.tito@dal.ca"
] | b.tito@dal.ca |
28960a2c252af82f0524c93a693e9b16dcce2b01 | 8ae6bc0b9da3be2b5c29b258bc981d0863499773 | /venv/bin/f2py3.9 | 8702a65ceebf63f9e1fa24f542a6fa1f4c9032bf | [] | no_license | p123hx/rel | 60fd34331af02464922316efa9e283fa7a148f27 | a9c9a385b66a243ec53ad550cbcc29654193e152 | refs/heads/master | 2023-08-23T04:49:01.242609 | 2021-10-22T23:59:42 | 2021-10-22T23:59:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | 9 | #!/Users/bj/PycharmProjects/pythonProject1/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"p123hx@gmail.com"
] | p123hx@gmail.com |
c44888b51398e66bd24229ba196d35bab9e6acdb | 7abc56728aace2fa848d1bc7f30bc6d9f9aeaab8 | /megamenu/apps.py | 688f7a1346ccb062ffc8dc6b4bccf7efff163b57 | [] | no_license | elyasa7/TMWCMS | f31e78ff18c7cba43e0f37d67dd78046e8509b9b | f12d8ab1e5066b6bf89273e923863dbb9a9ae1fe | refs/heads/master | 2020-03-22T13:33:53.098637 | 2018-07-08T06:57:40 | 2018-07-08T06:57:40 | 140,116,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class MegamenuConfig(AppConfig):
name = 'megamenu'
| [
"ilyas.j@open.tm"
] | ilyas.j@open.tm |
c679d012c33bf1ca2342b1a1f8505f495ac23103 | c6b0be6821851484097d508e4db26813a5ffe009 | /urlsmod/urlsmod/settings.py | 36e374a28736f2869f727e0fe32cbf12bd90cc76 | [] | no_license | salmanshaik-470/all_django | c6b2badd850f0c7284ee302e1ad06559f2b21eb3 | 4220ffb7fcc2a5bc6abdbc2c0f0ecd20505b6a85 | refs/heads/master | 2023-06-25T03:45:05.212427 | 2021-07-28T13:30:08 | 2021-07-28T13:30:08 | 387,466,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,346 | py | """
Django settings for urlsmod project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-ix)^j)a35b0l$m0wy)p+3!vn694x_nx204gy3+fjn@ir3zhyd*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app1'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'urlsmod.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'urlsmod.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[os.path.join(BASE_DIR,'static')]
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"ss21100@corp.ojas-it.com"
] | ss21100@corp.ojas-it.com |
69a8b88a7232530093bf5fbd65ab346492437fbc | 95367a7a88df25ed3aaaeb7c26427749fa814868 | /prog1/guia1/ej4.py | a095bab65dbb4ed2260eb5668edb0ef3c50fa1a8 | [] | no_license | levleonhardt/guias_Programacion1 | e83c27ba3594572842cf116e9d8e915d7cf27fd2 | c43a77ee13a4c1a49a22eafdfe77c4c16d120abd | refs/heads/main | 2023-09-04T07:37:39.441676 | 2021-10-15T05:20:09 | 2021-10-15T05:20:09 | 365,112,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | #Nombre de alumno: Leonardo Roman Leonhardt
#Leer dos números y decir cuál es el mayor.
num1 = float(input("\n Inserte un número: "))
num2 = float(input("\n Inserte otro número: "))
if (num1 > num2):
print("\n" + str(num1) + " es mayor que " + str(num2))
elif (num1 < num2):
print("\n" + str(num1) + " es menor que " + str(num2))
else:
print("\n" + str(num1) + " es igual a " + str(num2)) | [
"levleonhardt@gmail.com"
] | levleonhardt@gmail.com |
98acee6af1eb61d11c2f8b30039dd8a68e1f2ff4 | e8a9bdcf91350cf0371ebe5a1e481a29017d906b | /apps/message/apps.py | 199cdc9265cfd3bdb2e5a8d6f63f899130ff15f7 | [] | no_license | 0xiaobao0/wx_sm_app | 1082ab8da6aac849eb559479574326c9b27f740d | dd5cbdce04da65fa958ada3b8bf6184cfd73804d | refs/heads/master | 2022-12-19T17:50:50.614377 | 2019-08-12T10:42:46 | 2019-08-12T10:42:46 | 178,772,978 | 0 | 0 | null | 2022-12-08T01:48:27 | 2019-04-01T02:41:45 | Python | UTF-8 | Python | false | false | 122 | py | from django.apps import AppConfig
class MessageConfig(AppConfig):
name = 'message'
verbose_name = '用户消息' | [
"1920566573@qq.com"
] | 1920566573@qq.com |
e92090672df6dbc77947cca8dd3f20b98894a501 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_2/rffada002/question2.py | 5ad1e0412877bdf376192722edcf2c9130f0adb5 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,664 | py | print ("Welcome to the 30 Second Rule Expert")
print ("------------------------------------")
print ("Answer the following questions by selecting from among the options.")
seen=input("Did anyone see you? (yes/no)\n")
if (seen == 'no'):
sticky=input("Was it sticky? (yes/no)\n")
if (sticky == 'no'):
emausaurus=input("Is it an Emausaurus? (yes/no)\n")
if (emausaurus == 'no'):
cat=input("Did the cat lick it? (yes/no)\n")
if (cat == 'no'):
print ("Decision: Eat it.")
elif (cat == 'yes'):
healthy=input("Is your cat healthy? (yes/no)\n")
if (healthy == 'yes'):
print ("Decision: Eat it.")
elif (healthy == 'no'):
print ("Decision: Your call.")
elif (emausaurus == 'yes'):
megalosaurus=input("Are you a Megalosaurus? (yes/no)\n")
if (megalosaurus == 'yes'):
print ("Decision: Eat it.")
elif (megalosaurus == 'no'):
print ("Decision: Don't eat it.")
elif (sticky == 'yes'):
steak=input("Is it a raw steak? (yes/no)\n")
if (steak == 'no'):
cat=input("Did the cat lick it? (yes/no)\n")
if (cat == 'no'):
print ("Decision: Eat it.")
elif (cat == 'yes'):
healthy=input("Is your cat healthy? (yes/no)\n")
if (healthy == 'yes'):
print ("Decision: Eat it.")
elif (healthy == 'no'):
print ("Decision: Your call.")
elif (steak == 'yes'):
puma=input("Are you a puma? (yes/no)\n")
if (puma == 'yes'):
print ("Decision: Eat it.")
elif (puma == 'no'):
print ("Decision: Don't eat it.")
elif (seen == 'yes'):
friend=input("Was it a boss/lover/parent? (yes/no)\n")
if (friend == 'no'):
print ("Decision: Eat it.")
elif (friend == 'yes'):
price=input("Was it expensive? (yes/no)\n")
if (price == 'no'):
chocolate=input("Is it chocolate? (yes/no)\n")
if (chocolate == 'no'):
print ("Decision: Don't eat it.")
elif (chocolate == 'yes'):
print ("Decision: Eat it.")
elif (price == 'yes'):
cut=input("Can you cut off the part that touched the floor? (yes/no)\n")
if (cut == 'yes'):
print ("Decision: Eat it.")
elif (cut == 'no'):
print ("Decision: Your call.") | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
6442fef7126244ecc2d657e3a8f8f29b07ab0672 | 2db555aa649389e377d85dd33b09bf30bf3a58c8 | /sh/m.py | 598c11242b3d67b8412c221a55a69cc7492c9150 | [] | no_license | sinoory/django-jobfind | 21c91e78087f374af87491b1995f09ce1e78215d | da7dc57dbe861f2b2551e1a4a616d0c5f4d51d3c | refs/heads/master | 2022-11-21T02:43:25.206491 | 2020-07-19T05:34:39 | 2020-07-19T05:34:39 | 264,134,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,126 | py | # -*- coding:utf-8 -*-
#!/usr/bin/python
import sys,os,traceback,time
sys.path.append(os.path.join(os.path.dirname(__file__),"../pypub/utility"))
sys.path.append(os.path.join(os.path.dirname(__file__),"../pypub/web"))
from webLogin import LoginBroser
from getPage import HtmlReader
from QtPage import Render,WebkitRender
from uty import *
import urllib
from bs4 import BeautifulSoup
#from jobdb import ormsettingconfig
from jangopub import ormsettingconfig
if __name__=='__main__':
print "config ormsettingconfig"
ormsettingconfig()
from jobdb import Job,JobDbOpr,JobCompScoreOpr
import re
class BadUrl():
def __init__(self,url,reason,title=""):
self.url=url
self.reason=reason
self.urltitle=title
def toStr(self):
return "BadUrl<%s , %s , %s>" %(self.url,self.reason,self.urltitle)
def __unicode__(self):
return "BadUrl<%s , %s>" %(self,url,self.reason)
USER_STOPED=-1
UNDEFINDED=-2
class JobStrategy():
def isJobSuilt(self,jobstr,keysDict):
for k in keysDict:
p=jobstr.find(k.upper())
if p != -1:
#print "isJobSuilt hitKey="+k
return True,k,p
return False,0,0
class HtmlGetStrategy():
mExtralInfo={'jobDescribe':'','companyDesc':''}
lastDescConame=[]
def load(self,url):
r=HtmlReader(url,timeout=120)
r.run()
self.outdata=r.outdata
def data(self):
return self.outdata
def getDescribeIntrestingUrl(self):
return self.mExtralInfo['jobDetailPageUrl']
def needScore(self):
return False
def needJobCompDesc(self):
return True
def isDescValid(self):
return len(self.mExtralInfo['jobDescribe'])>5
def needIgnoreCompany(self,coname):
return False
class RenderHtmlGetStrategy(HtmlGetStrategy):
def load(self,url):
wr=WebkitRender(url,60,5)
wr.load()
self.date="%s" %wr.data()
def data(self):
return self.date
def getDescribeIntrestingUrl(self):
return self.mExtralInfo['companyUrl']
def needScore(self):
return True
def needJobCompDesc(self):
return False
def isDescValid(self):
return self.mExtralInfo['score']>=0
def needIgnoreCompany(self,nowconame):
if not nowconame in self.lastDescConame :
self.lastDescConame.append(nowconame)
print "Current Total companys : %d" %(len(self.lastDescConame))
return False
return True
class StrategyFactory():
def __init__(self,factype):
if factype==1:
self.htmlGetor=RenderHtmlGetStrategy()
self.jobOpr=JobCompScoreOpr()
print "StrategyFactory[RenderHtmlGetStrategy,JobCompScoreOpr]"
else:
self.htmlGetor=HtmlGetStrategy()
self.jobOpr=JobDbOpr()
print "StrategyFactory[HtmlGetStrategy,JobDbOpr]"
class Job51Adder():
unprocessedUrls=[]
isRuning=False
userStopped=False
mJobStrategy=JobStrategy()
def init(self):
self.unprocessedUrls=[]
self.userStopped=False
self.mHtmlGetStrategy.lastDescConame=[]
def setQuerryDict(self,querryDict):
self.mQuerryDic=querryDict
print "setQuerryDict querryDict=%s" %querryDict
self.mFilterKeys=querryDict.get("filterkeys").split(",")
print "self.mFilterKeys type=%s l=%s" %(type(self.mFilterKeys),self.mFilterKeys)
strategyFactory=StrategyFactory(int(self.mQuerryDic['serverActionType']))
self.mJobOprStrategy=strategyFactory.jobOpr
self.mHtmlGetStrategy=strategyFactory.htmlGetor
def addJob(self,keyword,jobarea,issuedate,startpage=1,endpage=50):
keyword=urllib.quote(keyword.encode('utf-8'))
self.init()
loop=startpage
isRuning=True
self.mFinishReason="FINISH_OK"
st=getCurTime() #from uty.py
while(loop<=endpage or endpage==-1):
jobs=UNDEFINDED
try:
jobs,url,totalpage=self.addOnePageJob(keyword,jobarea,issuedate,loop)
except Exception,ex:
err= "Exception ex=%s in addOnePageJob ,saved data in Error.txt" %(ex)
print err
saveFile("%s\n" %(err),"Error.txt",'a')
print traceback.print_exc()
print "addJob<<<<<<",loop,totalpage
if loop>=totalpage :
print loop,totalpage, "reach page end "+url
self.mFinishReason="REACH_END"
break;
elif jobs==USER_STOPED or self.userStopped:
print "user stopped,exit addJob"
self.mFinishReason="STOP"
break;
loop+=1;
print "====StartPage=%s===Loop=%s=EndPage=%s=================" %(startpage,loop,endpage)
print "============%s===>%s=======================================" %(st,getCurTime())
for bu in self.unprocessedUrls:
print bu.toStr()
def addOnePageJob(self,keyword,jobarea,issuedate,pageindex):
jbo = self.mJobOprStrategy #JobCompScoreOpr() #JobDbOpr()
pagesearchurl="http://search.51job.com/jobsearch/search_result.php?fromJs=1&jobarea="+jobarea+"&district=000000&funtype=0000&industrytype=00&issuedate="+issuedate+"&providesalary=99&keyword="+keyword+"&keywordtype="+self.mQuerryDic.get('keywordtype')+"&curr_page="+str(pageindex)+"&providesalary="+self.mQuerryDic.get("salaryarea")+"&lang=c&stype=2&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=01&companysize=99&lonlat=0%2C0&radius=-1&ord_field=0&list_type=0&fromType=14&dibiaoid=0&confirmdate=9"
ck="guid=14559615973991260064; ps=us%3DATgGbFAwBS1SNQ5mAHtSZ1FiUX5VYVIzBjBWeFphUWUMMVc5A2gBMVc3WzEAZFdnU2hQYlFgV35QGlBxCHQOSAFT%26%7C%26nv_3%3D; adv=adsnew%3D0%26%7C%26adsresume%3D1%26%7C%26adsfrom%3Dhttp%253A%252F%252Fbzclk.baidu.com%252Fadrc.php%253Ft%253D0fKL00c00f7A79n0jn-w00uiAsjtPT9y00000r6zeHY00000TD0ttK.THYdnyGEm6K85yF9pywd0Znqmvn3uWFhrHcsnj04nyRkP0Kd5HNKwHbknH0srRPafb7Krjw7P1TYwHDLrjN7rRcYPHwD0ADqI1YhUyPGujYzPH6zrjfYPHc1FMKzUvwGujYkPBuEThbqniu1IyFEThbqFMKzpHYz0ARqpZwYTjCEQLwzmyP-QWRkphqBQhPEUiqYTh7Wui4spZ0Omyw1UMNV5HT3rHc1nzu9pM0qmR9inAPDULunnvf1uZbYnRdgTZuupHNJmWcsI-0zyM-BnW04yydAT7GcNMI-u1YqFh_qnARkPHcYPjFbrAFWrHRsuHR4PhFWPjmkryPhrHKhuhc0mLFW5HD1PHfz%2526tpl%253Dtpl_10085_12986_1%2526l%253D1038955240%2526ie%253DUTF-8%2526f%253D8%2526tn%253Dbaidu%2526wd%253D51job%26%7C%26adsnum%3D789233; guide=1; nolife=fromdomain%3D; search=jobarea%7E%60020000%7C%21ord_field%7E%600%7C%21list_type%7E%600%7C%21recentSearch0%7E%602%A1%FB%A1%FA020000%2C00%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA00%A1%FB%A1%FA3%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA01%A1%FB%A1%FA99%A1%FB%A1%FAlinux%A1%FB%A1%FA0%A1%FB%A1%FA%A1%FB%A1%FA-1%A1%FB%A1%FA1456818574%A1%FB%A1%FA0%A1%FB%A1%FA%7C%21"
lb=LoginBroser()
#lb.nomalOpen("http://www.51job.com/");
reader=HtmlReader(pagesearchurl,cookie=ck,retrycnt=5)
#reader=HtmlReader(pagesearchurl,retrycnt=5,jsondata={})#use jsondata for post request
reader.run()
#BeautifulSoup will try to get encode from page <meta content="text/html; charset=gb2312">
#here the data from HtmlReader is already utf8,not meta gb2312,so pass utf-8 to its construct to force encoding,
#otherwise the BeautifulSoup can't work
soup=BeautifulSoup(reader.outdata,fromEncoding="gbk")
ttcnt=soup.findAll("input",{"id":"hidTotalPage"})[0].get("value")
print ttcnt, "process page %s" %pagesearchurl
#print soup.findAll("ul",{"class":"dict-basic-ul"})[0].li.strong.string
#find the table firest ,then find the job items
#a itme looks like : checkbox jobname companyname locate udatedata
#olTag=soup.findAll("table",{"class":"resultList resultListWide"})[0].findAll("tr",{"class":"tr0"})
#olTag=soup.findAll("div",{"class":"resultListDiv"})[0].findAll("tr",{"class":"tr0"})
olTag=soup.findAll("div",{"id":"resultList"})[0].findAll("div",{"class":"el"})
cnt,jloop=0,1
while jloop<len(olTag) :
if self.userStopped :
return USER_STOPED,pagesearchurl
j=olTag[jloop]
jloop+=1
jobDetailPageUrl=j.findAll("p",{"class":"t1"})[0].findAll("a")[0].get("href")
#needn't encode chinese to utf-8 with django db models
jobname=j.findAll("p",{"class":"t1"})[0].findAll("a")[0].get("title")
#cols[1].get_text() #.encode('utf-8') #remove tags
company=j.findAll("span",{"class":"t2"})[0].findAll("a")[0].get("title")
companyUrl=j.findAll("span",{"class":"t2"})[0].findAll("a")[0].get("href")
local=j.findAll("span",{"class":"t3"})[0].get_text() #.encode('utf-8')
salary=j.findAll("span",{"class":"t4"})[0].get_text() #.encode('utf-8')
ud=j.findAll("span",{"class":"t5"})[0].get_text()
self.mHtmlGetStrategy.mExtralInfo['jobDetailPageUrl']=jobDetailPageUrl
self.mHtmlGetStrategy.mExtralInfo['companyUrl']=companyUrl
if self.mHtmlGetStrategy.needIgnoreCompany(company):
print "Ignore company %s,the same as last one" %company
continue
self.getDescript(self.mHtmlGetStrategy.getDescribeIntrestingUrl())
jd=self.mHtmlGetStrategy.mExtralInfo['jobDescribe']
cd=self.mHtmlGetStrategy.mExtralInfo['companyDesc']
jbo.mExtraInfoDict=self.mHtmlGetStrategy.mExtralInfo
#print "%s %s\n %s \n %s " %(jobname,company,jd,cd)
if not self.mHtmlGetStrategy.isDescValid():
print "xxxxinvalid job descxxxxxx"
continue
if self.mHtmlGetStrategy.needJobCompDesc():
#jobstring="%s%s" %(jobname,jd.decode("utf-8")) #
jobstring="%s" %(jd.decode("utf-8")) #TODO why type(jd)=str but type(jobname)=u?
jd=jobstring
#isjobok,k,p=self.mJobStrategy.isJobSuilt(jobstring.upper(),self.mFilterKeys)
isjobok,k,p=self.mJobStrategy.isJobSuilt(jd.upper(),self.mFilterKeys)
if not isjobok:
print "Ignore Job<%s,%s> NOT contain keyword %s" %(jobname,company,self.mFilterKeys)
continue
else:
#set bold for keyword in job desc
jd=jd[:p]+"<font color='red'>"+k+"</font>"+jd[p+len(k):]
#print "get a job %s,%s" %(jobDetailPageUrl,jd)
#time.sleep(10)
job=Job(job=jobname,jobu=jobDetailPageUrl,local=local,coname=company,courl=companyUrl,jd=jd,cd=cd,udate=ud,salary=salary)
if not jbo.isJobExist(job):
jbo.add(job)
elif jbo.isOutData(job) :
jbo.update(job)
else:
print ("Exist %s, ignore" %(job))
cnt=cnt+1
return cnt,pagesearchurl,int(ttcnt)
#jbo.showAll()
def getDescript(self,joburl):
self.mHtmlGetStrategy.load(joburl)
outdata=self.mHtmlGetStrategy.data()
#print outdata
try:
#getDescript should print the right chinese content with the right fromEncoding
s=BeautifulSoup(outdata,fromEncoding='gbk')
#s=BeautifulSoup(outdata,features="html5lib")
if self.mHtmlGetStrategy.needJobCompDesc():
jd=s.findAll("div",{"class":"bmsg job_msg inbox"})[0]
sjd="%s" %jd
sjd=sjd.replace("<br/>","\n")
sjd=self.rmHtmlTag(sjd)
cd=s.findAll("div",{"class":"tmsg inbox"})[0]
scd="%s" %cd
scd=self.rmHtmlTag(scd)
cdtype=s.findAll("p",{"class":"msg ltype"})[0].get_text() #return unicode
cdtype=cdtype.encode("utf8") #unicode to str type , as sjd is str type
cdtype=cdtype.replace("\t","").replace(" ","")
sjd=cdtype+"\n"+sjd
#print sjd
update_i = cdtype.find("发布")
update=cdtype[update_i-5:update_i]
self.mHtmlGetStrategy.mExtralInfo['update']=update
self.mHtmlGetStrategy.mExtralInfo['jobDescribe']=sjd
self.mHtmlGetStrategy.mExtralInfo['companyDesc']=scd
if self.mHtmlGetStrategy.needScore():
self.mHtmlGetStrategy.mExtralInfo['score']=-1
score=s.findAll('a',{"id":"company_url"})[0].get_text().strip()[4:][:-1]
self.mHtmlGetStrategy.mExtralInfo['score']=score
print "%s , %s" %(score,joburl)
except Exception,ex:
#print "%s" %outdata
err= "Exception ex=%s in getDescript(%s),saved data in Error.txt" %(ex,joburl)
print err
if outdata==None :
self.mHtmlGetStrategy.mExtralInfo['update']="expired"
return
saveFile("%s\n" %(err),"Error.txt",'a')
#saveFile("%s" %(outdata),"Error.txt",'a')
#exit()
#print traceback.print_exc()
jobstoped=s.findAll("div",{"class":"qxjyxszw"})
sjd=""
scd=""
if len(jobstoped)>0:
print jobstoped[0] #the job has expired
self.unprocessedUrls.append(BadUrl(url=joburl,reason="Job expired"))
elif joburl.find("search.51job.com")==-1:
print ("Can't get job description from %s" %(joburl))
self.unprocessedUrls.append(BadUrl(url=joburl,reason="invalid job url,Can't get job description"))
elif s and s.title:
self.unprocessedUrls.append(BadUrl(url=joburl,reason="Unknown reason",title=s.title))
else:
self.unprocessedUrls.append(BadUrl(url=joburl,reason="Unknown reason"))
def getUpdate(self,jobDetailUrl):
self.getDescript(jobDetailUrl)
return self.mHtmlGetStrategy.mExtralInfo['update']
def rmHtmlTag(self,html):
html=html.replace("<br>","\n").replace("</br>","")
html=html.replace("<div>","\n").replace("</div>","")
html=html.replace("<p>","\n").replace("</p>","")
html=re.sub(r'</?\w+[^>]*>','',html)
return html
def tst(self):
print "hello"
if __name__=="__main__":
jobadder=Job51Adder()
qd={'filterkeys':'android','keywordtype':'100','serverActionType':55}
jobadder.setQuerryDict(qd)
#jobadder.addJob("android","020000",'1',3,3)
#jobadder.tst()
jobadder.getDescript("https://jobs.51job.com/shanghai/116403419.html?s=01&t=0")
#jobadder.getDescript('http://jobs.51job.com/shanghai-ptq/74316976.html?s=0') #job url
#jobadder.getDescript('http://search.51job.com/list/co,c,2245593,000000,10,1.html') #company url
#jobadder.getDescript('http://search.51job.com/list/co,c,3289243,000000,10,1.html') #company url
#getDescript('http://ac.51job.com/phpAD/adtrace.php?ID=15736875&JobID=56483257')
| [
"sinoory@126.com"
] | sinoory@126.com |
5abe4a61676d8ca77cb32b913e0f0f2306942f13 | 679183a38194a3f51924d12e39e623373acf0c4c | /api/urls.py | 9303f84b9b7f78d922ae29bab863db0a3cbbda2b | [] | no_license | kuldeepyaduvanshi/djangorestapi_using_thirdparty_app | d7a9381f906290387db7bd82177fca5651df3b3e | d756957214710ffa17128b321629e0383559206d | refs/heads/master | 2023-06-29T11:17:35.929257 | 2021-08-11T08:33:40 | 2021-08-11T08:33:40 | 394,861,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | from django.contrib import admin
from django.urls import path
from api import views
urlpatterns = [
path('',views.index,name="home"),
]
| [
"kuldeepyaduvanshi03@gmail.com"
] | kuldeepyaduvanshi03@gmail.com |
c9fc713cf2794caa1273263e919685f3a03babad | 1da9c9cb2142ed110249e1cdba2833510ebf44c0 | /setup.py | d4a34ca47e8434a952b358672b4b9b7cbc2a736d | [
"BSD-3-Clause"
] | permissive | greggyNapalm/firebat-overlord | a1f9226e646b7d0db7f2c94a4b5e2e0e24010b7a | 01d6850c3ba09aa6b82b41ec0df1686d2fd76ec4 | refs/heads/master | 2020-05-18T10:57:15.491842 | 2013-02-12T07:33:58 | 2013-02-12T07:33:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,832 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
if not hasattr(sys, 'version_info') or sys.version_info < (2, 7, 0, 'final'):
raise SystemExit("Firebat-manager requires Python 2.7 or later.")
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
#from firemanager import __version__
install_requirements = [
'Flask==0.9',
'Flask-SQLAlchemy',
'SQLAlchemy',
'psycopg2',
'celery==3.0.5',
'requests',
'validictory',
'PyYAML',
'jinja2',
'simplejson',
]
with open("README.rst") as f:
README = f.read()
#with open("docs/changelog.rst") as f:
# CHANGES = f.read()
CHANGES = ''
setup(
name='firebat-overlord',
version='0.0.1',
author='Gregory Komissarov',
author_email='gregory.komissarov@gmail.com',
description='REST application to manage load tests,' +
' store and display results.',
long_description=README + '\n' + CHANGES,
license='BSD',
url='https://github.com/greggyNapalm/firebat-overlord',
keywords=['phantom', 'firebat'],
#scripts=[
# "bin/fire",
# "bin/daemon_fire",
# "bin/fire-chart",
#],
packages=[
'fireoverlord',
'fireoverlord.test',
],
package_data={
'docs': [
'changelog.rst',
],
},
zip_safe=False,
install_requires=install_requirements,
tests_require=['nose'],
test_suite='nose.collector',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
"Topic :: Software Development :: Testing :: Traffic Generation",
],
)
| [
"gregory.komissarov@gmail.com"
] | gregory.komissarov@gmail.com |
6d164cfc391db5ee4400cf4280c951a39b8e146a | 443585e4fc146308b18bc2f9234d0947da38d3e5 | /practice/yj/csv/Quiz2.py | cc4f15f0435d1e5ad3b650c79dc1a5fe19b07be9 | [] | no_license | ggyudongggyu/20201208commit | b524c4a7fb241cacaacffa5882c55d1d0ccba11f | fbb58a8ed06f454a2a79a9b8c75deabaec62b317 | refs/heads/master | 2023-02-02T21:59:51.518218 | 2020-12-24T14:32:21 | 2020-12-24T14:32:21 | 319,578,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | from matplotlib.pyplot import *
title('plot graph')
plot([1, 2, 3, 4], [10, 20, 30, 40], marker='.', color= 'green', label = '1st')
plot([1, 2, 3, 4], [30, 15, 25, 10], marker= '^' ,color = 'pink', label = '2nd')
# plot([1, 2, 3, 4], [15, 25, 15, 25], linestyle= '-.' ,color = 'red', label = '3rd')
# plot([1, 2, 3, 4], [20, 10, 30, 5], linestyle= '-' ,color = 'blue', label = '4th')
legend()
show()
| [
"donggyu0219@gmail.com"
] | donggyu0219@gmail.com |
dd1baa59268b60d7d8e6c9a30dd4be4fd8fe01c2 | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/infra/rtclusterpol.py | 1b736c601de6b4c7027d78510986ca0e568afc10 | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 4,589 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtClusterPol(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = TargetRelationMeta("cobra.model.infra.RtClusterPol", "cobra.model.vns.CtrlrMgmtPol")
meta.moClassName = "infraRtClusterPol"
meta.rnFormat = "rtvnsClusterPol-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "Management Policy"
meta.writeAccessMask = 0x40000000000001
meta.readAccessMask = 0x4040000000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.infra.ClusterPol")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.pol.NFromRef")
meta.rnPrefixes = [
('rtvnsClusterPol-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 20603, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4934
prop.defaultValueStr = "vnsCtrlrMgmtPol"
prop._addConstant("unspecified", "unspecified", 0)
prop._addConstant("vnsCtrlrMgmtPol", None, 4934)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 20602, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"collinsctk@qytang.com"
] | collinsctk@qytang.com |
432518836c6dafda96f0ab65cbeec918fc3356c6 | 36dd2c72c876422fd6bd2a6853cf7d4165235497 | /rlscope/parser/exceptions.py | 13ce2ce4cbae77dfda884ee9d4ae478f7d32d560 | [
"Apache-2.0"
] | permissive | UofT-EcoSystem/rlscope | 2f83030c0b04099c5e98e344c4c36bd3f2b52c5b | cdd9bbdc2a3a832be24f20105b8c9fe28149cb63 | refs/heads/master | 2023-04-14T07:35:46.763061 | 2021-04-07T23:16:17 | 2021-04-07T23:16:17 | 161,432,140 | 42 | 1 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | """
RL-Scope related errors and exceptions.
"""
class RLScopeConfigurationError(Exception):
"""
Error raised when the host/container isn't properly configured.
For example:
- installation dependency missing
"""
pass
class RLScopeAPIError(Exception):
"""
Error raised when the rlscope user API is used improperly.
"""
pass
class RLScopeRunError(Exception):
"""
Error raised when an error is encountered while running the training script and collecting trace files.
"""
pass
class RLScopeAnalysisError(Exception):
"""
Error raised when an error is encountered while processing trace files.
"""
pass
| [
"jagleeso@gmail.com"
] | jagleeso@gmail.com |
9e78017150168223104eb9f48f00ba6c175ac6d7 | 5abb0468becfa788e039d6643d974bee6200a9c5 | /venv/bin/pip3 | 617555e50e924f0566f92f9e6eed25af19ec1d63 | [] | no_license | riyadeb4321/flaskproject | 40358ff33ddb369979f1b951979b59deb5afbb0e | 7d0783144d54795cda9ab59f295e8da6f2587606 | refs/heads/master | 2020-07-10T04:13:03.507846 | 2019-08-24T14:40:09 | 2019-08-24T14:40:09 | 204,164,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | #!/home/riya/flaskproject/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"riya.deb.fiem.cse16@teamfuture.in"
] | riya.deb.fiem.cse16@teamfuture.in | |
4864a29f82a72dfdd99d5f5cff7e8974e1509df4 | 5c192be6b95e996d64de3230cfeac081bb2d470f | /app/lib/email_address.py | 1904f23f104f3974198b9d569a62af1bc4274e6f | [] | no_license | AlexanderPease/personal-crm | fb78f77b8923b32f13b38cc6da41a8f8cf4e723f | b57122b34e6765ed28ad5e281666355b0e3ec5d5 | refs/heads/master | 2023-01-24T15:08:35.777894 | 2019-06-19T19:12:46 | 2019-06-19T19:12:46 | 178,596,749 | 1 | 0 | null | 2023-01-13T23:15:25 | 2019-03-30T18:32:40 | Python | UTF-8 | Python | false | false | 825 | py | from email.utils import parseaddr
def valid_email(email_str):
"""Returns a valid email address or False"""
ea = parseaddr(email_str)[1]
if not ea or ea == '' or '@' not in ea or '.' not in ea or not ea.split('@')[0]:
return False
return ea
def clean_email(email_str):
"""Cleans email addresses.
Ex. Myra.Gupta@gmail.com is equivalent to myra.gupta@gmail.com
"""
address, domain = email_str.split('@')
address = address.lower()
return f'{address}@{domain}'
def generate_name(email_str):
"""Given an email address, guess the name.
Useful when a name is not given."""
name = email_str.split('@')[0]
name = name.replace('.', ' ').replace('_', ' ')
for num in range(0, 10):
name = name.replace(str(num), '')
name = name.title()
return name
| [
"zander@nomadhealth.com"
] | zander@nomadhealth.com |
140ef2d0fecf4cdd25d71b775c70a000324b70fc | 59bdad3fb33332b8d05f07107dcae45b6f9ba662 | /actividad_economica/models.py | c9ccd0e856045eb6458f3ea003ff83aeb7a30e62 | [] | no_license | dyronrh/hcmfront-for-test | 8bbad6ab75fd82fa84d75fa942e4d39033080934 | 911d97dffc3f788c0419233535a5e782cca776a5 | refs/heads/master | 2020-04-28T07:14:37.110512 | 2019-03-11T22:38:58 | 2019-03-11T22:38:58 | 175,084,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | from django.db import models
# Creando el modelo Actividad economica.
class ActividadEconomicaClass(models.Model):
actividad = models.CharField(max_length=50,null=True, blank=True)
def __str__(self):
return self.actividad
| [
"dyronrh@yahoo.es"
] | dyronrh@yahoo.es |
b08938d6aba6b5a1c75b63fb7c5acdf3789f9d01 | 2851eb68d4d8fc29270220cddedf4c15c6957ea9 | /badge_earning/api/v1/urls.py | 65c59ee0a4faa6e2124ee9038b50d91e80ae87c8 | [
"MIT"
] | permissive | andela-Taiwo/Badge-Earning | 8431ad9c75fedc79de37f36378c38eaafbb72a28 | 8d9073b552410391b2b54b6c7b4a2e71ae0632eb | refs/heads/master | 2023-03-02T17:12:19.514588 | 2021-01-03T15:08:32 | 2021-01-03T15:08:32 | 289,453,005 | 0 | 0 | MIT | 2021-02-11T06:44:14 | 2020-08-22T08:48:54 | Python | UTF-8 | Python | false | false | 1,303 | py | from dj_rest_auth.registration.views import (
RegisterView,
SocialAccountDisconnectView,
SocialAccountListView,
)
from django.conf.urls import include, url
from django.urls import path
from rest_framework.routers import DefaultRouter
from rest_framework_simplejwt.views import TokenRefreshView
from badge_earning.users.views import (
GoogleConnect,
GoogleLogin,
MyTokenObtainPairView,
UserViewSet,
)
router = DefaultRouter()
router.register(r"user", UserViewSet, basename="apiv1_users")
urlpatterns = [
path("accounts/", include("allauth.urls")),
path("dj-rest-auth/", include("dj_rest_auth.urls")),
path("login/", MyTokenObtainPairView.as_view(), name="account_login"),
path("token/refresh/", TokenRefreshView.as_view(), name="token_refresh"),
path("signup/", RegisterView.as_view(), name="account_signup"),
path("google/login/", GoogleLogin.as_view(), name="google_login"),
path("google/connect/", GoogleConnect.as_view(), name="google_login"),
path(
"socialaccounts/", SocialAccountListView.as_view(), name="social_account_list"
),
path(
"socialaccounts/<int:pk>/disconnect/",
SocialAccountDisconnectView.as_view(),
name="social_account_disconnect",
),
url(r"^", include(router.urls)),
]
| [
"sokunbitaiwo82@gmail.com"
] | sokunbitaiwo82@gmail.com |
daaf7110d0464d08291fb7f7a191cb8182631fa6 | 27040f0d537c1898c9f1fce4db68b24588840987 | /7. Reverse Integer.py | 834d39db9bf66caba7c2392e1009abf6fb37a850 | [] | no_license | xtanmaygarg/LeetCodeSolutions | 0197474e92d4ef14676342d00933e764f8b29581 | 5fd06d2f0da222977c1ae6e4d219a682b3596341 | refs/heads/master | 2021-06-14T09:39:37.795785 | 2020-12-04T10:44:07 | 2020-12-04T10:44:07 | 254,488,075 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | class Solution:
def reverse(self, x: int) -> int:
if x >= 0:
res = int(str(x)[::-1])
else:
res = -int(str(x)[1:][::-1])
if -2**31 <= res <= (2**31-1):
return res
return 0
| [
"xtanmaygarg@gmail.com"
] | xtanmaygarg@gmail.com |
c06c0274c985e9910fbd0062bf51e90b28498d7d | f337b4a80e11cd7071fe39b201f763d626702ba6 | /my_venv/lib/python3.6/hashlib.py | 44e407e2641079c8b185a49dee260ac3088cb349 | [] | no_license | zonakostic/lab_002 | 56d01953d0361cc9edc44d82c2343783ca0fc27d | 58c6f2cb3d512aac719fbced2c7c4e475b876015 | refs/heads/master | 2020-03-28T15:03:40.610861 | 2018-09-12T23:03:01 | 2018-09-12T23:03:01 | 148,551,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | /Users/zonakostic/anaconda3/lib/python3.6/hashlib.py | [
"zonakostic@g.harvard.edu"
] | zonakostic@g.harvard.edu |
d1b5a7fbeabcc5a841ca89087e84b297227bde56 | 0701e6b42e5c90c7592a2a2a7a82bee6081e73d7 | /training_script/cifar10_keras_sm_tf2_solution.py | b95fbbe72c87bcbf0db66f7e1dc45b7d96094e09 | [
"MIT-0"
] | permissive | gonsoomoon-ml/tensorflow-in-sagemaker-workshop | 2cbcfd5fe073a8b6c1179d87f64c5819b713357d | 88b534afdf3058a211c5f28cb4ec96a344e57635 | refs/heads/master | 2021-02-07T20:55:35.754708 | 2020-03-02T13:05:02 | 2020-03-02T13:05:02 | 244,076,111 | 0 | 0 | NOASSERTION | 2020-03-01T02:51:36 | 2020-03-01T02:51:35 | null | UTF-8 | Python | false | false | 10,135 | py | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.layers import Activation, Conv2D, Dense, Dropout, Flatten, MaxPooling2D, BatchNormalization
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
tf.get_logger().setLevel('INFO')
#tf.autograph.set_verbosity(1)
HEIGHT = 32
WIDTH = 32
DEPTH = 3
NUM_CLASSES = 10
NUM_DATA_BATCHES = 5
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 10000 * NUM_DATA_BATCHES
INPUT_TENSOR_NAME = 'inputs_input' # needs to match the name of the first layer + "_input"
def keras_model_fn(learning_rate, weight_decay, optimizer, momentum):
"""keras_model_fn receives hyperparameters from the training job and returns a compiled keras model.
The model will be transformed into a TensorFlow Estimator before training and it will be saved in a
TensorFlow Serving SavedModel at the end of training.
Args:
hyperparameters: The hyperparameters passed to the SageMaker TrainingJob that runs your TensorFlow
training script.
Returns: A compiled Keras model
"""
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', name='inputs', input_shape=(HEIGHT, WIDTH, DEPTH)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(128, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(128, (3, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES))
model.add(Activation('softmax'))
size = 1
if optimizer.lower() == 'sgd':
opt = SGD(lr=learning_rate * size, decay=weight_decay, momentum=momentum)
elif optimizer.lower() == 'rmsprop':
opt = RMSprop(lr=learning_rate * size, decay=weight_decay)
else:
opt = Adam(lr=learning_rate * size, decay=weight_decay)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
def get_filenames(channel_name, channel):
if channel_name in ['train', 'validation', 'eval']:
return [os.path.join(channel, channel_name + '.tfrecords')]
else:
raise ValueError('Invalid data subset "%s"' % channel_name)
def train_input_fn():
return _input(args.epochs, args.batch_size, args.train, 'train')
def eval_input_fn():
return _input(args.epochs, args.batch_size, args.eval, 'eval')
def validation_input_fn():
return _input(args.epochs, args.batch_size, args.validation, 'validation')
def _input(epochs, batch_size, channel, channel_name):
filenames = get_filenames(channel_name, channel)
dataset = tf.data.TFRecordDataset(filenames)
#dataset = dataset.interleave(tf.data.TFRecordDataset, cycle_length=3)
# Parse records.
dataset = dataset.map(_dataset_parser, num_parallel_calls=10)
dataset = dataset.repeat()
# Potentially shuffle records.
if channel_name == 'train':
# Ensure that the capacity is sufficiently large to provide good random
# shuffling.
buffer_size = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * 0.4) + 3 * batch_size
dataset = dataset.shuffle(buffer_size=buffer_size)
# Batch it up.
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def _train_preprocess_fn(image):
"""Preprocess a single training image of layout [height, width, depth]."""
# Resize the image to add four extra pixels on each side.
image = tf.image.resize_with_crop_or_pad(image, HEIGHT + 8, WIDTH + 8)
# Randomly crop a [HEIGHT, WIDTH] section of the image.
image = tf.image.random_crop(image, [HEIGHT, WIDTH, DEPTH])
# Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image)
return image
def _dataset_parser(value):
"""Parse a CIFAR-10 record from value."""
featdef = {
'image': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64),
}
example = tf.io.parse_single_example(value, featdef)
image = tf.io.decode_raw(example['image'], tf.uint8)
image.set_shape([DEPTH * HEIGHT * WIDTH])
# Reshape from [depth * height * width] to [depth, height, width].
image = tf.cast(
tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),
tf.float32)
label = tf.cast(example['label'], tf.int32)
image = _train_preprocess_fn(image)
return image, tf.one_hot(label, NUM_CLASSES)
def save_model(model, output):
tf.saved_model.save(model, output+'/1/')
logging.info("Model successfully saved at: {}".format(output))
return
def main(args):
logging.info("getting data")
train_dataset = train_input_fn()
eval_dataset = eval_input_fn()
validation_dataset = validation_input_fn()
logging.info("configuring model")
model = keras_model_fn(args.learning_rate, args.weight_decay, args.optimizer, args.momentum)
callbacks = []
# ----- 수정 부분 (경로 수정) -----
callbacks.append(ModelCheckpoint(args.model_output_dir + '/checkpoint-{epoch}.h5'))
logging.info("Starting training")
model.fit(train_dataset,
steps_per_epoch=(num_examples_per_epoch('train') // args.batch_size),
epochs=args.epochs,
validation_data=validation_dataset,
validation_steps=(num_examples_per_epoch('validation') // args.batch_size),
callbacks=callbacks)
score = model.evaluate(eval_dataset, steps=num_examples_per_epoch('eval') // args.batch_size,
verbose=0)
logging.info('Test loss:{}'.format(score[0]))
logging.info('Test accuracy:{}'.format(score[1]))
# ----- 수정 부분 (경로 수정) -----
return save_model(model, args.model_output_dir)
def num_examples_per_epoch(subset='train'):
if subset == 'train':
return 40000
elif subset == 'validation':
return 10000
elif subset == 'eval':
return 10000
else:
raise ValueError('Invalid data subset "%s"' % subset)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--train',
type=str,
required=False,
default=os.environ['SM_CHANNEL_TRAIN'], # ----- 수정 부분 (default 경로 수정) -----
help='The directory where the CIFAR-10 input data is stored.')
parser.add_argument(
'--validation',
type=str,
required=False,
default=os.environ['SM_CHANNEL_VALIDATION'], # ----- 수정 부분 (default 경로 수정) -----
help='The directory where the CIFAR-10 input data is stored.')
parser.add_argument(
'--eval',
type=str,
required=False,
default=os.environ['SM_CHANNEL_EVAL'], # ----- 수정 부분 (default 경로 수정) -----
help='The directory where the CIFAR-10 input data is stored.')
# ----- 수정 부분 (argument 추가) -----
parser.add_argument(
'--model_output_dir',
type=str,
default=os.environ.get('SM_MODEL_DIR'))
parser.add_argument(
'--model_dir',
type=str,
required=True,
help='The directory where the model will be stored.')
parser.add_argument(
'--weight-decay',
type=float,
default=2e-4,
help='Weight decay for convolutions.')
parser.add_argument(
'--learning-rate',
type=float,
default=0.001,
help="""\
This is the inital learning rate value. The learning rate will decrease
during training. For more details check the model_fn implementation in
this file.\
""")
parser.add_argument(
'--epochs',
type=int,
default=10,
help='The number of steps to use for training.')
parser.add_argument(
'--batch-size',
type=int,
default=128,
help='Batch size for training.')
parser.add_argument(
'--optimizer',
type=str,
default='adam')
parser.add_argument(
'--momentum',
type=float,
default='0.9')
args = parser.parse_args()
main(args)
| [
"daekeun@amazon.com"
] | daekeun@amazon.com |
7cd5bf667dfd5853848da023118f67243641925b | e1adcd0173cf849867144a511c029b8f5529b711 | /ros_ws/Archive/ProductFiles20180213/positionControlPackage.py | c6e35e7f5eaadfa197321d29d10af5ea39366fea | [] | no_license | adubredu/cartbot_arm_subsystem | 20a6e0c7bacc28dc0486160c6e25fede49f013f2 | 3e451272ddaf720bc7bd24da2ad5201b27248f1c | refs/heads/master | 2022-01-04T23:01:25.061143 | 2019-05-14T16:45:02 | 2019-05-14T16:45:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,466 | py | import argparse
import sys
import struct
import time
import json
import rospy
from math import *
from std_msgs.msg import (
UInt16,
)
from StringIO import StringIO
import baxter_interface as baxter
import speech_recognition as SR
from geometry_msgs.msg import (
PoseStamped,
Pose,
Point,
Quaternion,
)
from std_msgs.msg import Header
from baxter_core_msgs.srv import (
SolvePositionIK,
SolvePositionIKRequest,
)
def xyzToAngles(limbs, x, y, z, xr, yr, zr, wr):
ns = "ExternalTools/" + limbs + "/PositionKinematicsNode/IKService"
iksvc = rospy.ServiceProxy(ns, SolvePositionIK)
ikreq = SolvePositionIKRequest()
hdr = Header(stamp=rospy.Time.now(), frame_id='base')
pose = PoseStamped(
header=hdr,
pose=Pose(
position=Point(
x=x,
y=y,
z=z,
),
orientation=Quaternion(
x=xr,
y=yr,
z=zr,
w=wr,
),
),
)
ikreq.pose_stamp.append(pose)
try:
rospy.wait_for_service(ns, 5.0)
resp = iksvc(ikreq)
except (rospy.ServiceException, rospy.ROSException), e:
rospy.logerr("Service call failed: %s" % (e,))
exit()
resp_seeds = struct.unpack('<%dB' % len(resp.result_type),
resp.result_type)
if (resp_seeds[0] != resp.RESULT_INVALID):
seed_str = {
ikreq.SEED_USER: 'User Provided Seed',
ikreq.SEED_CURRENT: 'Current Joint Angles',
ikreq.SEED_NS_MAP: 'Nullspace Setpoints',
}.get(resp_seeds[0], 'None')
# Format solution into Limb API-compatible dictionary
limb_joints = dict(zip(resp.joints[0].name, resp.joints[0].position))
return limb_joints
else:
print("INVALID POSE - No Valid Joint Solution Found.")
return "invalid"
def euler2Quat(xr, yr, zr):
toRet = {'qw': 0, 'qx': 0, 'qy': 0, 'qz': 0}
xr = radians(xr)
yr = radians(yr)
zr = radians(zr)
c1 = cos(yr/2)
c2 = cos(zr/2)
c3 = cos(xr/2)
s1 = sin(yr/2)
s2 = sin(zr/2)
s3 = sin(xr/2)
toRet['qw'] = c1*c2*c3 - s1*s2*s3
toRet['qx'] = s1*s2*c3 + c1*c2*s3
toRet['qy'] = s1*c2*c3 + c1*s2*s3
toRet['qz'] = c1*s2*c3 - s1*c2*s3
return toRet
def moveOnAxis(limb, axis, dist, speed):
## Moves arm on x, y, or z axis keeping orientation constant
# speed is in m/s
# dist in m
# limb is a handle to a limb object
if 'left' in limb.joint_names()[0]: limbName = 'left'
else: limbName = 'right'
print(limbName)
position = {'x':0, 'y':1, 'z':2}
pose = limb.endpoint_pose()
position['x'] = pose['position'][0]
position['y'] = pose['position'][1]
position['z'] = pose['position'][2]
orient = pose['orientation']
secPframe = .05
frames = int(abs(dist)*(1/float(speed))*(1/secPframe))
if frames == 0: return limb.endpoint_pose()
distPframe = float(dist)/float(frames)
limb.set_joint_position_speed(1)
rate = rospy.Rate(1/secPframe)
for i in range(0, frames):
position[axis] += distPframe
jointPos = xyzToAngles(limbName, position['x'], position['y'], position['z'], orient[0], orient[1], orient[2], orient[3])
if jointPos != "invalid":
# Check if it is minor move. if it is not, use smoother movement function
minorMove = True
actualJointPos = limb.joint_angles()
for joint, angle in jointPos.iteritems():
if abs(angle-actualJointPos[joint]) > .8: minorMove = False
if minorMove:
limb.set_joint_positions(jointPos)
else:
print('bigmove')
limb.move_to_joint_positions(jointPos, timeout=3, threshold=.02)
else:
print("Can't Move Here")
return limb.endpoint_pose()
rate.sleep()
return limb.endpoint_pose()
def playPositionFile(fPath, lLimb, rLimb):
# Moves limb to specified joint positions
# fPath: string indentifying path to file
# lLimb handle to the left limb 'Limb' object
# rLimb hanld to the right limb 'Limb' object
with open(fPath, 'r') as f:
fText = f.read()
fText = fText.replace("'", '"')
wpArray = json.loads(fText)
lLimb.set_joint_position_speed(.5)
rLimb.set_joint_position_speed(.5)
rate = rospy.Rate(1000)
for wp in wpArray:
lPos = wp['left']
rPos = wp['right']
# move left
if lPos != '':
lLimb.move_to_joint_positions(lPos)
if rPos != '':
rLimb.move_to_joint_positions(rPos)
return (lLimb.endpoint_pose(), rLimb.endpoint_pose) | [
"alphonsusbq436@gmail.com"
] | alphonsusbq436@gmail.com |
eca84022f847a571fc5c438f677b1dcb3f53d265 | 2f8acb743e653d63a88e37a37a95cc09ca856ecc | /LuizaLabsManagerEmployee/wsgi.py | 1d55bb5c6ed69dceb204a33be16607e2d969f25a | [
"MIT"
] | permissive | sillaslima/labs-manager | 9cc67b0af63e479ece1136597bc7dc67d38d2966 | c5cf3bee7d5299cdfdd689e4f8ed737551f9ff07 | refs/heads/master | 2022-05-07T19:21:02.326255 | 2019-08-14T21:35:18 | 2019-08-14T21:35:18 | 202,422,388 | 0 | 0 | null | 2022-04-22T22:10:20 | 2019-08-14T20:37:16 | Python | UTF-8 | Python | false | false | 425 | py | """
WSGI config for LuizaLabsManagerEmployee project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'LuizaLabsManagerEmployee.settings')
application = get_wsgi_application()
| [
"sillas.lima@semparar.net"
] | sillas.lima@semparar.net |
3061d53096759d45f9136d8e0763ad7bd3bfe32e | a7f9f363af0971555b6f23dbf5ba07d009e5332c | /hair_removal/src/datasets/melanoma_dataset.py | 6187c897aedb4a502f00378e9ad1d38da3ac53df | [] | no_license | gkrry2723/2020_summer_proj-master | 0320e385ed5b3b757cb008af9220a7d062f4844c | ea24616b13c811295e891b41c58ff587b9322f97 | refs/heads/main | 2023-08-16T08:53:01.466163 | 2021-10-08T02:32:44 | 2021-10-08T02:32:44 | 331,607,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,768 | py | import torch
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
class MelanomaDataset(Dataset):
"""Melanoma dataset"""
def __init__(self, csv_file, root_dir, label_type='target', img_format='dcm', transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
label_type (string): Label type for each task.
* For the hair removal task -> 'hair'
* For the classification task -> 'target'
img_format (string): Image data type to load.
* dcm -> 'dcm'
* jpg -> 'jpg'
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.df = pd.read_csv(csv_file)
self.root_dir = root_dir
self.label_type = label_type
self.img_format = img_format
self.transform = transform
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_path = '{}/{}.{}'.format(self.root_dir, self.df.iloc[idx]['image_name'], self.img_format)
if self.img_format == 'jpg':
img = Image.open(img_path)
img = np.array(img) / 255
img = np.float32(img)
else:
ds = pydicom.read_file(img_path)
arr = ds.pixel_array
arr_scaled = arr / 255
img = arr_scaled
img = np.float32(img)
label = self.df.iloc[idx][self.label_type]
if self.transform:
img = self.transform(img)
data_dict = {'image': img, 'label': label}
return data_dict
# the following code snippet uses tensorflow.
# import os
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# import tensorflow as tf
# import tensorflow_io as tfio
# def __getitem__(self, idx):
# if torch.is_tensor(idx):
# idx = idx.tolist()
# img_path = '{}/{}.{}'.format(self.root_dir, self.df.iloc[idx]['image_name'], self.img_format)
# image_bytes = tf.io.read_file(img_path)
# image = tfio.image.decode_dicom_image(image_bytes, dtype=tf.uint16)
# print(image[0].shape)
# print(image[0][0,0,:])
# # print(image[0][0][0][0], image.shape)
# img = image[0] / 255
# img = np.float32(img)
# label = self.df.iloc[idx][self.label_type]
# if self.transform:
# img = self.transform(img)
# data_dict = {'image': img, 'label': label}
# return data_dict
| [
"noreply@github.com"
] | noreply@github.com |
1f8d7a2527100b7160bb303a90d246c683700667 | d79ab29338f0eb6ecd8dfa19f0ca2f756fd74029 | /envkey/__init__.py | acbb1dbdbe4d1f1513a48992379e221c5029ddc3 | [
"MIT"
] | permissive | rajeshv23/envkey-python | 5e774a10790ee8f7fc890e95329934b3007a7a14 | 9e05d704fb921c710606f27fda2ecf9dac2b15f0 | refs/heads/master | 2021-01-05T10:54:59.698509 | 2019-06-12T14:23:35 | 2019-06-12T14:23:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | from .loader import load
from .fetch import fetch_env
load(is_init=True)
__all__ = ['load', 'fetch_env'] | [
"dane.schneider@gmail.com"
] | dane.schneider@gmail.com |
1c0c56639b1172b5288228661379263b7e8f9db6 | 6e7845c694517a1eb0d7444692e2bd11e5495e43 | /api/testing/test_prediction.py | 110b13203c36874799d8f71ae472001a0de3d8b8 | [] | no_license | DataDima90/flask-ml-api | f8b354134e0348b4a3d770faa35215720f87361d | bd94a16fe6cd9a34d7e9efeb19c05ac3749bfe30 | refs/heads/main | 2023-03-29T07:27:12.754910 | 2021-03-27T13:48:21 | 2021-03-27T13:48:21 | 352,057,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | # tests/test_prediction.py
from api.endpoints.prediction import prediction_api
from flask import Flask
import pytest
import json
app = Flask(__name__)
app.register_blueprint(prediction_api)
@pytest.fixture
def client():
with app.test_client() as client:
yield client
def test_predict_single(client):
response = client.get(
"/prediction",
data=json.dumps({
"pl": 2,
"sl": 2,
"pw": 0.5,
"sw": 3}),
content_type="application/json")
assert response.status_code == 200
assert json.loads(response.get_data(as_text=True)) is not None | [
"dima.wilhelm@naska.io"
] | dima.wilhelm@naska.io |
ac929574147f1bb88f631ebadaddaf5330d57ace | 58e1c6613e4c6c70e30bb8936d66dc6122f83bdb | /weechat_otr_test/test_is_a_channel.py | adb862377fe5092088271dfc5435564e5b3aec5d | [] | no_license | fauno/weechat-otr | 56ad77aac263dca47094fad33c9791d4f44602ed | 649025f5a0876a096be475bba7a6e4271b833156 | refs/heads/master | 2020-12-25T09:18:23.974057 | 2014-06-12T06:36:28 | 2014-06-12T06:36:28 | 21,006,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | # -*- coding: utf-8 -*-
# pylint: disable=invalid-name
# pylint: disable=missing-docstring
# pylint: disable=too-many-public-methods
from __future__ import unicode_literals
from weechat_otr_test.weechat_otr_test_case import WeechatOtrTestCase
import weechat_otr
class IsAChannelTestCase(WeechatOtrTestCase):
def test_hash(self):
self.assertTrue(weechat_otr.is_a_channel('#channel'))
def test_ampersand(self):
self.assertTrue(weechat_otr.is_a_channel('&channel'))
def test_plus(self):
self.assertTrue(weechat_otr.is_a_channel('+channel'))
def test_bang(self):
self.assertTrue(weechat_otr.is_a_channel('!channel'))
def test_not_a_channel(self):
self.assertFalse(weechat_otr.is_a_channel('nick'))
| [
"matthewm@boedicker.org"
] | matthewm@boedicker.org |
9af4f8b44b935d8e148465590a7901bc74ffdc8d | 562d731d32a44b6822e821fa41fc53208cea5577 | /users/views.py | 5e7ad7699f96082419f9fef474d631442545e42f | [] | no_license | bmugenya/CSC-314 | b768b0eed7c79a79e343e5ee20b45c967f8e6c98 | 62ce610bc1c02f21daffeb4676721e0751af1c3d | refs/heads/master | 2021-05-07T20:17:18.466875 | 2021-04-01T16:05:14 | 2021-04-01T16:05:14 | 108,950,643 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,132 | py | from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.forms import UserCreationForm
# Create your views here.
def logout_view(request):
"""Log the user out."""
logout(request)
return HttpResponseRedirect(reverse('learning_logs:index'))
def register(request):
"""Register a new user."""
if request.method != 'POST':
# Display blank registration form.
form = UserCreationForm()
else:
# Process completed form.
form = UserCreationForm(data=request.POST)
if form.is_valid():
new_user = form.save()
# Log the user in and then redirect to home page.
authenticated_user = authenticate(username=new_user.username,
password=request.POST['password1'])
login(request, authenticated_user)
return HttpResponseRedirect(reverse('learning_logs:index'))
context = {'form': form}
return render(request, 'users/register.html', context)
| [
"bmugenya@gmail.com"
] | bmugenya@gmail.com |
0e953c23f56192a0680bb9e254029e7402f14cae | 2c9d5c092dbb0ec9e980bc429c3fd63d27c679ef | /homeassistant/components/zwave_js/addon.py | ff74b5d5a44efff55355670cf638e92c0ac4cc8d | [
"Apache-2.0"
] | permissive | zachowj/home-assistant | bbc3ecdf6981dc21522ea0d0db66b7accdba0a7d | f3b2624be30b650a8a385e37f04ad7151deced3d | refs/heads/dev | 2023-02-24T11:27:46.415182 | 2021-06-03T21:12:39 | 2021-06-03T21:12:39 | 238,241,447 | 2 | 0 | Apache-2.0 | 2023-02-22T06:15:52 | 2020-02-04T15:41:18 | Python | UTF-8 | Python | false | false | 11,009 | py | """Provide add-on management."""
from __future__ import annotations
import asyncio
from dataclasses import dataclass
from enum import Enum
from functools import partial
from typing import Any, Callable, TypeVar, cast
from homeassistant.components.hassio import (
async_create_snapshot,
async_get_addon_discovery_info,
async_get_addon_info,
async_install_addon,
async_set_addon_options,
async_start_addon,
async_stop_addon,
async_uninstall_addon,
async_update_addon,
)
from homeassistant.components.hassio.handler import HassioAPIError
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.singleton import singleton
from .const import ADDON_SLUG, CONF_ADDON_DEVICE, CONF_ADDON_NETWORK_KEY, DOMAIN, LOGGER
F = TypeVar("F", bound=Callable[..., Any]) # pylint: disable=invalid-name
DATA_ADDON_MANAGER = f"{DOMAIN}_addon_manager"
@singleton(DATA_ADDON_MANAGER)
@callback
def get_addon_manager(hass: HomeAssistant) -> AddonManager:
"""Get the add-on manager."""
return AddonManager(hass)
def api_error(error_message: str) -> Callable[[F], F]:
"""Handle HassioAPIError and raise a specific AddonError."""
def handle_hassio_api_error(func: F) -> F:
"""Handle a HassioAPIError."""
async def wrapper(*args, **kwargs): # type: ignore
"""Wrap an add-on manager method."""
try:
return_value = await func(*args, **kwargs)
except HassioAPIError as err:
raise AddonError(f"{error_message}: {err}") from err
return return_value
return cast(F, wrapper)
return handle_hassio_api_error
@dataclass
class AddonInfo:
"""Represent the current add-on info state."""
options: dict[str, Any]
state: AddonState
update_available: bool
version: str | None
class AddonState(Enum):
"""Represent the current state of the add-on."""
NOT_INSTALLED = "not_installed"
INSTALLING = "installing"
UPDATING = "updating"
NOT_RUNNING = "not_running"
RUNNING = "running"
class AddonManager:
"""Manage the add-on.
Methods may raise AddonError.
Only one instance of this class may exist
to keep track of running add-on tasks.
"""
def __init__(self, hass: HomeAssistant) -> None:
"""Set up the add-on manager."""
self._hass = hass
self._install_task: asyncio.Task | None = None
self._start_task: asyncio.Task | None = None
self._update_task: asyncio.Task | None = None
def task_in_progress(self) -> bool:
"""Return True if any of the add-on tasks are in progress."""
return any(
task and not task.done()
for task in (
self._install_task,
self._start_task,
self._update_task,
)
)
@api_error("Failed to get Z-Wave JS add-on discovery info")
async def async_get_addon_discovery_info(self) -> dict:
"""Return add-on discovery info."""
discovery_info = await async_get_addon_discovery_info(self._hass, ADDON_SLUG)
if not discovery_info:
raise AddonError("Failed to get Z-Wave JS add-on discovery info")
discovery_info_config: dict = discovery_info["config"]
return discovery_info_config
@api_error("Failed to get the Z-Wave JS add-on info")
async def async_get_addon_info(self) -> AddonInfo:
"""Return and cache Z-Wave JS add-on info."""
addon_info: dict = await async_get_addon_info(self._hass, ADDON_SLUG)
addon_state = self.async_get_addon_state(addon_info)
return AddonInfo(
options=addon_info["options"],
state=addon_state,
update_available=addon_info["update_available"],
version=addon_info["version"],
)
@callback
def async_get_addon_state(self, addon_info: dict[str, Any]) -> AddonState:
"""Return the current state of the Z-Wave JS add-on."""
addon_state = AddonState.NOT_INSTALLED
if addon_info["version"] is not None:
addon_state = AddonState.NOT_RUNNING
if addon_info["state"] == "started":
addon_state = AddonState.RUNNING
if self._install_task and not self._install_task.done():
addon_state = AddonState.INSTALLING
if self._update_task and not self._update_task.done():
addon_state = AddonState.UPDATING
return addon_state
@api_error("Failed to set the Z-Wave JS add-on options")
async def async_set_addon_options(self, config: dict) -> None:
"""Set Z-Wave JS add-on options."""
options = {"options": config}
await async_set_addon_options(self._hass, ADDON_SLUG, options)
@api_error("Failed to install the Z-Wave JS add-on")
async def async_install_addon(self) -> None:
"""Install the Z-Wave JS add-on."""
await async_install_addon(self._hass, ADDON_SLUG)
@callback
def async_schedule_install_addon(self, catch_error: bool = False) -> asyncio.Task:
"""Schedule a task that installs the Z-Wave JS add-on.
Only schedule a new install task if the there's no running task.
"""
if not self._install_task or self._install_task.done():
LOGGER.info("Z-Wave JS add-on is not installed. Installing add-on")
self._install_task = self._async_schedule_addon_operation(
self.async_install_addon, catch_error=catch_error
)
return self._install_task
@callback
def async_schedule_install_setup_addon(
self, usb_path: str, network_key: str, catch_error: bool = False
) -> asyncio.Task:
"""Schedule a task that installs and sets up the Z-Wave JS add-on.
Only schedule a new install task if the there's no running task.
"""
if not self._install_task or self._install_task.done():
LOGGER.info("Z-Wave JS add-on is not installed. Installing add-on")
self._install_task = self._async_schedule_addon_operation(
self.async_install_addon,
partial(self.async_configure_addon, usb_path, network_key),
self.async_start_addon,
catch_error=catch_error,
)
return self._install_task
@api_error("Failed to uninstall the Z-Wave JS add-on")
async def async_uninstall_addon(self) -> None:
"""Uninstall the Z-Wave JS add-on."""
await async_uninstall_addon(self._hass, ADDON_SLUG)
@api_error("Failed to update the Z-Wave JS add-on")
async def async_update_addon(self) -> None:
"""Update the Z-Wave JS add-on if needed."""
addon_info = await self.async_get_addon_info()
if addon_info.version is None:
raise AddonError("Z-Wave JS add-on is not installed")
if not addon_info.update_available:
return
await self.async_create_snapshot()
await async_update_addon(self._hass, ADDON_SLUG)
@callback
def async_schedule_update_addon(self, catch_error: bool = False) -> asyncio.Task:
"""Schedule a task that updates and sets up the Z-Wave JS add-on.
Only schedule a new update task if the there's no running task.
"""
if not self._update_task or self._update_task.done():
LOGGER.info("Trying to update the Z-Wave JS add-on")
self._update_task = self._async_schedule_addon_operation(
self.async_update_addon,
catch_error=catch_error,
)
return self._update_task
@api_error("Failed to start the Z-Wave JS add-on")
async def async_start_addon(self) -> None:
"""Start the Z-Wave JS add-on."""
await async_start_addon(self._hass, ADDON_SLUG)
@callback
def async_schedule_start_addon(self, catch_error: bool = False) -> asyncio.Task:
"""Schedule a task that starts the Z-Wave JS add-on.
Only schedule a new start task if the there's no running task.
"""
if not self._start_task or self._start_task.done():
LOGGER.info("Z-Wave JS add-on is not running. Starting add-on")
self._start_task = self._async_schedule_addon_operation(
self.async_start_addon, catch_error=catch_error
)
return self._start_task
@api_error("Failed to stop the Z-Wave JS add-on")
async def async_stop_addon(self) -> None:
"""Stop the Z-Wave JS add-on."""
await async_stop_addon(self._hass, ADDON_SLUG)
async def async_configure_addon(self, usb_path: str, network_key: str) -> None:
"""Configure and start Z-Wave JS add-on."""
addon_info = await self.async_get_addon_info()
new_addon_options = {
CONF_ADDON_DEVICE: usb_path,
CONF_ADDON_NETWORK_KEY: network_key,
}
if new_addon_options != addon_info.options:
await self.async_set_addon_options(new_addon_options)
@callback
def async_schedule_setup_addon(
self, usb_path: str, network_key: str, catch_error: bool = False
) -> asyncio.Task:
"""Schedule a task that configures and starts the Z-Wave JS add-on.
Only schedule a new setup task if the there's no running task.
"""
if not self._start_task or self._start_task.done():
LOGGER.info("Z-Wave JS add-on is not running. Starting add-on")
self._start_task = self._async_schedule_addon_operation(
partial(self.async_configure_addon, usb_path, network_key),
self.async_start_addon,
catch_error=catch_error,
)
return self._start_task
@api_error("Failed to create a snapshot of the Z-Wave JS add-on.")
async def async_create_snapshot(self) -> None:
"""Create a partial snapshot of the Z-Wave JS add-on."""
addon_info = await self.async_get_addon_info()
name = f"addon_{ADDON_SLUG}_{addon_info.version}"
LOGGER.debug("Creating snapshot: %s", name)
await async_create_snapshot(
self._hass,
{"name": name, "addons": [ADDON_SLUG]},
partial=True,
)
@callback
def _async_schedule_addon_operation(
self, *funcs: Callable, catch_error: bool = False
) -> asyncio.Task:
"""Schedule an add-on task."""
async def addon_operation() -> None:
"""Do the add-on operation and catch AddonError."""
for func in funcs:
try:
await func()
except AddonError as err:
if not catch_error:
raise
LOGGER.error(err)
break
return self._hass.async_create_task(addon_operation())
class AddonError(HomeAssistantError):
"""Represent an error with Z-Wave JS add-on."""
| [
"noreply@github.com"
] | noreply@github.com |
0e18abbe0b8a0be143611eb5d59788e2524487c5 | 8a615e83114c5a9ee9a284220b87b626fddc4671 | /mySimpleprograms in python/days.py | beaefd9a7efa36f88409e5cce3a0e84484b037ba | [] | no_license | MandeepKaurJS/The-Tech-Academy-Basic-Python-Projects | 9854efdb89a294573d72719d83578f471f2ab0ed | d6f8ffce6498b6b791428a109cb15a2be786816e | refs/heads/master | 2020-06-01T15:15:20.032783 | 2019-06-28T05:16:19 | 2019-06-28T05:16:19 | 190,828,207 | 0 | 0 | null | 2019-06-28T05:16:20 | 2019-06-08T00:32:43 | Python | UTF-8 | Python | false | false | 514 | py | user_number=int(input("Enter a Number from 1 to 7"))
#print("%s"%user_number)
days_list=["MONDAY","TUESDAY","WEDNESDAY","THURSDAY","FRIDAY","SATURDAY","SUNDAY"]
if user_number == 1:
print(days_list[0])
elif user_number == 2:
print("TUSEDAY")
elif user_number == 3:
print("WEDNESDAY")
elif user_number == 4:
print("THURSDAY")
elif user_number == 5:
print("FRIDAY")
elif user_number == 6:
print("SATURDAY")
elif user_number == 7:
print("SUNDAY")
else:
print("out of order")
| [
"mandeep.kaur.fr@gmail.com"
] | mandeep.kaur.fr@gmail.com |
159b57720ecd59dcfb35dbe8f6725768b0565fba | 2d99db071269a5b1b7da7dab0459a355522ded4b | /_lecture_document/Day 5/Lab Guide/lab4-problem2.py | 9229e5f1f3f4d6d0ded3c6a79587397befeff7df | [] | no_license | golfz/practical-ai | 48b425fe5bcdf3d52582b990f95be01ecf92eb1e | 81f71eab57e1f5e1325fd3f2c9ddb1d4ae6b9e6d | refs/heads/master | 2020-04-24T22:19:03.703838 | 2019-07-31T07:16:07 | 2019-07-31T07:16:07 | 172,307,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,548 | py | import pandas as pd
from sklearn.preprocessing import normalize
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
'''
Step 1: Read the dataset using pandas.
'''
pokemon_dataset = pd.read_csv('data/pokemon.csv')
'''
Step 2: Access a certain groups of columns using pandas for preparing (X, y).
Suppose that we want to have data according to the following columns:
- sp_attack
- sp_defense
- attack
- defense
- speed
- hp
- type1
'''
# If we browse pokemon_dataset['type2'] in python console, we will see that many of them are null.
# What does this information tell us? This says a pokemon may be belonged to two types.
# Suppose that, in this example, we want to consider only pokemons which have a single type.
# How to handle this in pandas? (See https://pandas.pydata.org/pandas-docs/version/0.23.4/generated/pandas.isnull.html)
# Pandas also provide a method called 'loc' to access a certain groups of rows and columns.
dataframe = pokemon_dataset[pokemon_dataset['type2'].isnull()].loc[
:, ['sp_attack', 'sp_defense', 'attack', 'defense', 'speed', 'hp', 'type1']
]
# Grap only 'sp_attack', ..., 'hp' as an input X
# To index by position in pandas, see https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.iloc.html
X = # Put your code here to construct feature matrix X
# Normalizing is not necessary for the classification; but, it will make visualizing task (easier for our eyes).
# So, let's do this since we will also visualize it at the end of this exercise !
# Noted that we will learn why normalizing can help visualizing later in this course ! (e.g. when it comes to PCA)
X_normalized = normalize(X)
# Grap the last column as a target y
y = # Put your code here to construct target vector y
'''
Step 3: Fit linear discriminant analysis model according to the given training data.
See https://scikit-learn.org/stable/modules/generated/sklearn.discriminant_analysis.LinearDiscriminantAnalysis.html
'''
linearDiscriminantAnalysis = LinearDiscriminantAnalysis()
# Try to read the document given above by yourself to train the mode
'''
Step 4: Show the predicted type for each pokemon and measure the accuracy.
To predict class labels for samples in X, use method 'predict'
'''
# Try to read the document given in step 3 to make prediction
# After that, write codes to:
# 1) show the predicted type of each pokemon
# 2) show the actual pokemon of each pokeon
# 3) show numerical value representing its accuracy
# Noted that there may be more than one line of code for this step
| [
"surattikorn@gmail.com"
] | surattikorn@gmail.com |
12b3726cba31229d764be3c11437acb79618d2a3 | c88fd16dcc783ffab364177e5afdac99e574dd65 | /tests/test_summarize_dataframe.py | 202491970498322840354eb012a75f250e3b5473 | [] | no_license | fbraza/summarize_dataframe | dee6bc158fda41eb999367d1e31f67cefdceee79 | 7ed8bdde5c98df63c824b37d75ad0c1d64f6970a | refs/heads/master | 2023-06-01T16:20:53.842020 | 2021-06-24T06:37:08 | 2021-06-24T06:37:08 | 354,495,653 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py | import unittest
import pytest
import pandas as pd
from summarize_dataframe.summarize_df import data_summary, display_summary
class TestDataSummary(unittest.TestCase):
def setUp(self):
# initialize dataframe to test
df_data = [[1, 'a'], [2, 'b'], [3, 'c']]
df_cols = ['numbers', 'letters']
self.df = pd.DataFrame(data=df_data, columns=df_cols)
# initialize expected dataframe
exp_col = ['Values']
exp_idx = ['Number of rows', 'Number of columns', 'int64', 'object']
exp_data = [[3], [2], [1], [1]]
self.exp_df = pd.DataFrame(data=exp_data, columns=exp_col, index=exp_idx)
@pytest.fixture(autouse=True)
def _pass_fixture(self, capsys):
self.capsys = capsys
def test_data_summary(self):
expected_df = self.exp_df
result_df = data_summary(self.df)
self.assertTrue(expected_df.equals(result_df))
def test_display(self):
print('---- Data summary ----', self.exp_df, sep='\n')
expected_stdout = self.capsys.readouterr()
display_summary(self.df)
result_stdout = self.capsys.readouterr()
self.assertEqual(expected_stdout, result_stdout)
if __name__ == '__main__':
unittest.main()
| [
"fbraza@tutanota.com"
] | fbraza@tutanota.com |
46d944beef6079c4d3518b3105d0f79157014dfa | 0c212aa63d07e84fbad849d15f2ee6a72aea82d2 | /15-spider/p13.py | fcfbb928f0fd41d01739b51f9036b23262c58709 | [] | no_license | flyingtothe/Python | e55b54e1b646d391550c8ced12ee92055c902c63 | 064964cb30308a38eefa5dc3059c065fcb89dd9f | refs/heads/master | 2021-08-06T19:44:42.137076 | 2018-12-03T12:15:15 | 2018-12-03T12:15:15 | 145,518,863 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,236 | py | from urllib import request, parse
from http import cookiejar
# 创建 cookiejar 实例
cookie = cookiejar.CookieJar()
# 生成 cookie 管理器
cookie_handler = request.HTTPCookieProcessor(cookie)
# 生成 http 管理器
http_handler = request.HTTPHandler()
# 生成 https 管理器
https_handler = request.HTTPSHandler()
# 创建请求管理器
opener = request.build_opener(http_handler, https_handler, cookie_handler)
def login():
'''
负责初次登陆
需要输入用户名密码,用来获取登录 cookie 凭证
'''
# 此 url 需要从登陆 form 的 action 属性中获得
url = 'http://www.renren.com/PLogin.do'
# 此键值徐聪登陆 form 的 input 中获取 name 属性
data = {
'email': '13119144223',
'password': '123456'
}
# 将数据编码
data = parse.urlencode(data)
# 创建请求对象
req = request.Request(url, data=data.encode())
# 使用 opener 发起请求
rsp = opener.open(req)
def getHomePage():
url = 'http://www.renren.com/965187997/profile'
rsp = opener.open(url)
html = rsp.read().decode()
with open('rsp.html', 'w') as f:
f.write(html)
if __name__ == '__main__':
login()
getHomePage() | [
"heidemeirenai@163.com"
] | heidemeirenai@163.com |
d1403ebe159acabb0daa5f9392428450c8e9d73e | 9656af0e8280324a4de3cf64bd397d5549628777 | /Scripts/createfontdatachunk.py | 549756c0cf49cd2d52d1b1d8d97566cdf6b556cc | [] | no_license | Nikolay-Pomytkin/big_boys_video_game | c769590ff5d123580dea815e20dd14823da0cca9 | 073d7fa63e95590e499a90223f5a542339404278 | refs/heads/master | 2022-10-27T13:02:13.618713 | 2016-11-19T16:14:03 | 2016-11-19T16:14:03 | 74,219,174 | 0 | 1 | null | 2022-10-10T23:04:48 | 2016-11-19T16:08:59 | Python | UTF-8 | Python | false | false | 607 | py | #!c:\users\nik_000\documents\python\projects\big_boys_video_game\scripts\python.exe
from __future__ import print_function
import base64
import os
import sys
if __name__ == "__main__":
# create font data chunk for embedding
font = "Tests/images/courB08"
print(" f._load_pilfont_data(")
print(" # %s" % os.path.basename(font))
print(" BytesIO(base64.decodestring(b'''")
base64.encode(open(font + ".pil", "rb"), sys.stdout)
print("''')), Image.open(BytesIO(base64.decodestring(b'''")
base64.encode(open(font + ".pbm", "rb"), sys.stdout)
print("'''))))")
| [
"nikolayp2800@gmail.com"
] | nikolayp2800@gmail.com |
fb839be61c29dbf63e2633a26521e8fd2e0d1133 | 46cafc95660fbc649216bc2271a922f4c489eccd | /tema_1.2.py | bb12b3adf3ef9a5ddc4ba58be8270cc916973d38 | [] | no_license | Grabizza/tema_1 | 239e7b54d0beb607b225d8e9d732fb191c3c9474 | 52ad7d718e01ca0edad564c7d5f62b311276652c | refs/heads/master | 2022-04-11T20:14:39.746085 | 2020-03-16T13:25:27 | 2020-03-16T13:25:27 | 247,708,355 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | # Creati un program in care utilizatorul sa introduca un numar. Validati daca acest
# numar este par sau impar si afisati un raspuns in acest sens.
# Programul nu functioneaza cu numere zecimale
a = input("Introduceti un numar: ")
if (int(a) % 2 > 0):
print("Numarul introdus este un numar impar!")
input("Apasati tasta <enter> pentru a iesi din program!")
else:
print("Numarul introdus este numar par!")
input("Apasati tasta <enter> pentru a iesi din program!") | [
"61665390+Grabizza@users.noreply.github.com"
] | 61665390+Grabizza@users.noreply.github.com |
932637e46ef4564e1850299505bc6c286fc15825 | 54dbc8867cf72aa6eb0449c5f44c4dbd14a4d557 | /configurations/example_minimal.py | bad835c0b41a898071757d5bf729c120ab352b9d | [
"MIT"
] | permissive | tazlarv/lteval | c904e9f543d499039b3f447f04f560898528dc8b | 6d79a625bffa164ffe762bbec3987a972a1c91c6 | refs/heads/master | 2023-01-03T07:17:59.218686 | 2020-10-21T13:55:49 | 2020-10-21T13:55:49 | 283,720,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | # Example - minimal usable configuration file in practice.
# Everything possible is omitted with the exception
# of website generation and displaying.
configuration = {
"webpage_generate": True, # OPTIONAL, default: False
"webpage_display": True, # OPTIONAL, default : False
}
scenes = ["cbox_classic"]
renderers = {
"mitsubaRenderer_0_5": {
"type": "mitsuba_0_5",
"path": "data/renderers/mitsuba_0_5/mitsuba.exe",
},
}
# parameter_sets omitted
test_cases = [
# No params, fallback on the default scene settings
{"name": "mitsubaCase", "renderer": "mitsubaRenderer_0_5"},
]
| [
"tazlarvojtech@gmail.com"
] | tazlarvojtech@gmail.com |
c0089e67997d88be464e6b04eeaf4bbbe364cdb3 | 5ed59fac1c7c3815108b9c38b3fb24b03f514151 | /simulants/legacy/mass_composite_video.py | a9ae7ee029b76e87ac91162777ca2a496872f1ac | [] | no_license | atomicguy/simulants | de3f95c5bd4493752b73397ecaac78ce6d7f92cb | 30f362b320470594d3869f04ec00b8f37ce06729 | refs/heads/master | 2023-01-12T18:22:07.271286 | 2021-04-22T05:29:27 | 2021-04-22T05:29:27 | 152,873,703 | 2 | 0 | null | 2022-12-26T20:54:00 | 2018-10-13T13:17:43 | Python | UTF-8 | Python | false | false | 1,630 | py | import os
import json
import subprocess
import sys
import time
from joblib import Parallel, delayed
def write_token(token_path, content):
with open(token_path, 'w') as token_file:
token_file.write(content)
def write_error(work_item, exception):
print('write_error')
with open('comp.err.log', 'a') as err:
err.write(str(work_item))
err.write(' --> ')
err.write(str(exception))
err.write('\n')
def work(work_item):
token_path = work_item['token'] + '.comp'
if os.path.exists(token_path):
return
command = work_item['command']
try:
subprocess.check_call(command)
write_token(token_path, 'ok')
except Exception as e:
write_error(work_item, e)
def render_token_written(work_item):
return os.path.exists(work_item['token'])
def composite_token_written(work_item):
return os.path.exists(work_item['token'] + '.comp')
if __name__ == '__main__':
with open('./lists/work_list_comp.json', 'r') as f:
work_items = json.load(f)
try:
os.remove('comp.err.log')
except:
pass
def is_done():
composited_items = [i for i in work_items if composite_token_written(i)]
num_done = len(composited_items)
num_total = len(work_items)
print('composited %d / %d' % (num_done, num_total))
return num_done == num_total
while not is_done():
rendered_items = (i for i in work_items if render_token_written(i) and not composite_token_written(i))
Parallel(n_jobs=10)(delayed(work)(i) for i in rendered_items)
time.sleep(60)
| [
"adam@atompowered.net"
] | adam@atompowered.net |
4eaa7f324046e854009289219642971b5edd0a83 | 900cd8db2a05dce62760151885be603b3ee36195 | /src/profiles/migrations/0001_initial.py | 50cc7ff4cee2e0d0fef9c4f38295f1192a3d8c28 | [] | no_license | hariharan-manoharan/fulcrumbook | b12428d937f24eb3ef70afb4277fa50147ebef25 | 56a5ec18186fbe8f198552ad487653e8da539b8c | refs/heads/master | 2021-01-23T01:01:28.028190 | 2017-03-24T09:29:13 | 2017-03-24T09:29:13 | 85,862,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-03-18 17:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
],
),
]
| [
"hariharan.manoharn@fulcrum.net"
] | hariharan.manoharn@fulcrum.net |
e46fc5f4b765a7d6786e1085828e918b4375b3e0 | ff9b8215ce836fcf2cf30ec35ccf26f8b2b10b69 | /py/ex39.py | 78f1b30fb66d44c2ccba3f591eb15376ca5593c6 | [] | no_license | wpfalive/learnpython | e8080b17a83975311b675b88d17e5b9ff9b68016 | bedea4cdeea79be297590ac3b95791ffce32fd06 | refs/heads/master | 2021-07-05T18:56:04.039709 | 2017-09-26T16:03:02 | 2017-09-26T16:03:02 | 103,671,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | class Song(object):
"""docstring for Song."""
def __init__(self, lyrics):
self.lyrics = lyrics
def sing_me_a_song(self):
for line in self.lyrics:
print line
happy_day = Song(["Happy birthday to you",
"I don't want to get sued",
"So I'll stop right there"])
bulls_on_parade = Song(["They rally around the family",
"With pockets full of shells"])
happy_day.sing_me_a_song()
bulls_on_parade.sing_me_a_song()
| [
"1643700595@qq.com"
] | 1643700595@qq.com |
45d6c57c09a0dbd68c7ef8bb19df5062456ee3eb | ffc2101e693041c09fe84f3409ce0c756056b73e | /NomticketDjangoAPP/core/migrations/0004_auto_20210424_1639.py | c104c8b5547afd5c7f724dab655b895f87f41f7f | [] | no_license | NukeroSempai/NomTicket_Django | 5dce041e002d74fcd62a71185db4708906dd03fb | e8dcb88b75fae46c1a98a0d6fd7a2a96f457faf0 | refs/heads/main | 2023-04-17T05:52:25.180873 | 2021-04-27T23:47:56 | 2021-04-27T23:47:56 | 354,626,786 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | # Generated by Django 3.1.2 on 2021-04-24 20:39
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('CORE', '0003_auto_20210424_1303'),
]
operations = [
migrations.AddField(
model_name='informe_ticket',
name='fecha_inicio',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='fecha inicio'),
preserve_default=False,
),
migrations.AddField(
model_name='informe_ticket',
name='fecha_termino',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='fecha termino'),
preserve_default=False,
),
]
| [
"williams.parra.parra@gmail.com"
] | williams.parra.parra@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.