blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bba36d64c3b88de58da092105ce23f4307722e50 | 25bf2073b882dec6172ea65ba87cfcf70ea5dc76 | /cli/src/ipsecmanager.py | d994d263cf90710cf10eaf9e4ce379ffdce481d0 | [] | no_license | zhaog918/coprhd-controller | 7469f360002d29c14c077758535a7624b71e8b1b | 13dd95fa15ef84b6af67a54ba8990cb4eedd99e3 | refs/heads/master | 2021-01-14T13:06:17.381476 | 2015-12-22T13:32:36 | 2015-12-22T13:32:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,220 | py | #!/usr/bin/env python
# Copyright (c) 2015 EMC Corporation
# All Rights Reserved
#
import json
import common
import sys
from common import SOSError
from common import TableGenerator
class IPsecManager(object):
URI_SERVICES_BASE = ''
URI_IPSEC = URI_SERVICES_BASE + '/ipsec'
def __init__(self, ipAddr, port):
'''
Constructor: takes IP address and port of the ViPR instance.
These are needed to make http requests for REST API
'''
self.__ipAddr = ipAddr
self.__port = port
def rotate_ipsec_key(self):
(s, h) = common.service_json_request(self.__ipAddr, self.__port,
"POST",
IPsecManager.URI_IPSEC,
None)
return s
def get_ipsec_status(self, xml=False):
if(xml == False):
(s, h) = common.service_json_request(self.__ipAddr, self.__port,
"GET",
IPsecManager.URI_IPSEC,
None)
return common.json_decode(s)
else:
(s, h) = common.service_json_request(self.__ipAddr, self.__port,
"GET",
IPsecManager.URI_IPSEC,
None, None, xml)
return s
def rotate_ipsec_key_parser(subcommand_parsers, common_parser):
# add command parser
rotate_ipsec_key_parser = subcommand_parsers.add_parser('rotate-key',
description='ViPR IPsec key rotation CLI usage',
parents=[common_parser],
conflict_handler='resolve',
help='Rotates or generates the IPsec keys.')
rotate_ipsec_key_parser.set_defaults(func=rotate_ipsec_key)
def rotate_ipsec_key(args):
try:
res = IPsecManager(args.ip, args.port).rotate_ipsec_key()
if(not res or res == ''):
print 'Failed to rotate the IPsec key. Reason : ' + res
else:
print 'Successfully rotated the IPsec key. New IPsec configuration version is ' + res
except SOSError as e:
common.format_err_msg_and_raise("rotate", "IPsec key",
e.err_text, e.err_code)
def get_ipsec_status_parser(subcommand_parsers, common_parser):
# add command parser
ipsec_status_parser = subcommand_parsers.add_parser('status',
description='ViPR IPsec status CLI usage',
parents=[common_parser],
conflict_handler='resolve',
help='Gets the IPsec status and current configuration version.')
ipsec_status_parser.add_argument('-xml',
dest='xml',
action='store_true',
help='XML response')
ipsec_status_parser.set_defaults(func=get_ipsec_status)
def get_ipsec_status(args):
try:
res = IPsecManager(args.ip, args.port).get_ipsec_status(args.xml)
if(not res or res == ''):
print 'Failed to get the IPsec status. Reason : ' + res
else:
if(args.xml == True):
return common.format_xml(res)
else:
return common.format_json_object(res)
except SOSError as e:
common.format_err_msg_and_raise("status", "IPsec",
e.err_text, e.err_code)
def ipsec_parser(parent_subparser, common_parser):
parser = parent_subparser.add_parser('ipsec',
description='ViPR IPsec CLI usage',
parents=[common_parser],
conflict_handler='resolve',
help='Operations on IPsec')
subcommand_parsers = parser.add_subparsers(help='Use one of sub-commands')
# rotate IPsec key command parser
rotate_ipsec_key_parser(subcommand_parsers, common_parser)
# get IPsec status command parser
get_ipsec_status_parser(subcommand_parsers, common_parser)
| [
"Eswaramoorthy.Saminathan@emc.com"
] | Eswaramoorthy.Saminathan@emc.com |
f9145e7d3145cbc542aaa28d46e6a3d813b2344f | a4a6cb3792b1092ecca42b54f00257db0688fde2 | /pages/base_page.py | 8e58c8dcf06d17268ebe0668826fa99eace9159b | [] | no_license | bijiaha0/python-learning | 0ca70784c6ff0a386f051f8f78f1202fb172b9ef | d93c4831a20a0001a80b66f6ba2336b8d537834b | refs/heads/master | 2020-12-21T05:35:09.506356 | 2020-03-26T12:50:27 | 2020-03-26T12:50:27 | 236,324,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | py | from config import basic_settings
__author__ = "zhou"
__date__ = "2019-06-01 22:09"
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
class BasePage(object):
def __init__(self, driver, url):
self._driver = driver
self._url = url
def open(self):
self._driver.get(url=self._url)
return self._driver
def find_element(self, *locator, element=None, timeout=None, wait_type="visibility"):
if element is not None:
return self._init_wait(timeout).until(EC.visibility_of(element.find_element(*locator)))
if wait_type == "visibility":
return self._init_wait(timeout).until(EC.visibility_of_element_located(locator=locator))
else:
return self._init_wait(timeout).until(EC.presence_of_element_located(locator=locator))
def send_keys(self, web_element, keys):
web_element.clear()
web_element.send_keys(keys)
def _init_wait(self, timeout):
if timeout is None:
return WebDriverWait(driver=self._driver, timeout=basic_settings.UI_WAIT_TIME)
else:
return WebDriverWait(driver=self._driver, timeout=timeout)
| [
"bijh@tsingyun.net"
] | bijh@tsingyun.net |
04e67d49d36bdf26448d39e00dc3c7ef16ccb612 | 744d8c4b69a793fa2109e3c868df8a8cb730546f | /PaleonT/PythonFGTS/Nový priečinok (2)/Nový priečinok/Tkinter/ERRR.py | 95c47d9849783ff7ab645996ac3ce8924c69ddc5 | [] | no_license | ShardBytes/Evolutions | f2f4407ddf3acfa1e9b991477af20985826db25b | 0c34dd785748bd8f0217d3eac71cae12b59522b9 | refs/heads/master | 2021-06-04T22:32:49.034242 | 2020-03-12T12:59:53 | 2020-03-12T12:59:53 | 133,266,065 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | import random
import tkinter
x = random.randint(10,500)
y = random.randint(10,500)
root = tkinter.Tk()
canvas = tkinter.Canvas(root, width = 800, height = 800)
canvas.pack()
def troj(width,height, startPointX,startPointY,color):
canvas.create_line(startPointX,startPointY,startPointX+width,startPointY, fill = color)
canvas.create_line(startPointX,startPointY,width/2,startPointY-height, fill = color)
canvas.create_line(width/2,startPointY-height,startPointX + width,startPointY, fill = color)
while 1 == 1:
troj(x,y,10,700,"red")
root.mainloop() | [
"46000279+Frantisekkk@users.noreply.github.com"
] | 46000279+Frantisekkk@users.noreply.github.com |
e7e4d084dc17c9d5070875958c3a23ba0cc0fb16 | 709f50c5dd6be0d2c88f0ae5c87589a595b0859e | /main_pl.py | 8218cf9206efeaf24eaa0285c23eda21a763cbd1 | [
"MIT"
] | permissive | Naagar/Fine_Grained_Classification-FGC- | 8d23509bf866364b03505452f80ee6a880cd2763 | ec64db86db6efa68430cd976a2e84958777410e3 | refs/heads/main | 2023-04-22T08:26:50.491928 | 2021-05-10T16:08:08 | 2021-05-10T16:08:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,360 | py | import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import torchvision.models as models
import pytorch_lightning as pl
from pytorch_lightning import Trainer
from pytorch_lightning.metrics.functional import accuracy
from torch.utils.data import Dataset, DataLoader
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader, random_split
from pytorch_lightning.loggers import TensorBoardLogger
##--- Hyper parameters ---##
num_classes = 258
learning_rate = 0.001
batch_size = 256
num_epochs = 600
# tb_logger = pl_loggers.TensorBoardLogger('logs/')
logger = TensorBoardLogger("tb_logs", name="my_model")
##--- dataset preprocess ---##
data_transforms = transforms.Compose(
[transforms.RandomResizedCrop(128),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
##--- dataset paths ---##
data_dir = 'nut_snacks/dataset/'
dataset = ImageFolder(data_dir, transform=data_transforms)
print(len(dataset))
train_data, val_data = random_split(dataset, [10000, 2607])
print(len(train_data))
print(len(val_data))
##----- Selecting model for training -----##
vgg16 = models.vgg16()
resnet18 = models.resnet18()
resnet50 = models.resnet50()
# model = vgg16
model = resnet50
# model = resnet34
class Lit_NN(pl.LightningModule):
def __init__(self, num_classes, model):
super(Lit_NN, self).__init__()
self.resnet50 = model
def forward(self, x):
out = self.resnet50(x)
return out
def training_step(self, batch, batch_idx):
images, labels = batch
# images = images.reshape(-1, 3*128*128)
# Forward Pass
outputs = self(images)
loss = F.cross_entropy(outputs, labels)
# acc_i = self.loss(outputs, labels)
acc = accuracy(outputs, labels)
pbar = {'train_acc': acc}
tensorboard_logs = {'train_loss': loss, 'train_acc': pbar}
return {'loss': loss, 'log': tensorboard_logs, 'progress_bar': pbar}
def configure_optimizers(self):
return torch.optim.Adam(model.parameters(), lr=learning_rate)
def train_dataloader(self):
train_dataset = train_data#seeds_dataset(train_txt_path,train_img_dir)
train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=4)
return train_loader
def val_dataloader(self):
test_dataset = val_data #seeds_dataset(test_text_path,test_img_dir)
val_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=4)
return val_loader
def training_step(self, batch, batch_idx):
images, labels = batch
outputs = self(images)
train_loss = F.cross_entropy(outputs, labels)
train_acc = accuracy(outputs, labels)
tensorboard = self.logger.experiment
self.log('train_acc',train_acc, 'train_loss', train_loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
def validation_step(self, batch, batch_idx):
images, labels = batch
# Forward Pass
outputs = self(images)
val_loss = F.cross_entropy(outputs, labels)
val_acc = accuracy(outputs, labels)
self.log('val_acc',val_acc, 'val_loss', val_loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
tensorboard_logs = {'val_loss': val_loss, 'val_acc': acc}
tensorboard = self.logger.experiment
return {'val_loss': val_loss, 'log': tensorboard_logs, 'val_acc':acc}
def validation_epoch_ends(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
avg_acc = torch.stack([x['val_acc'] for x in outputs]).mean()
pbar = {'val_acc': acc}
tensorboard_logs = {'val_loss': avg_loss, 'val_acc': avg_acc}
return {'val_loss': avg_loss, 'log': tensorboard_logs,'progress_bar': pbar}
if __name__ == '__main__':
trainer = Trainer(auto_lr_find=True, max_epochs=num_epochs, fast_dev_run=False,gpus=1, logger) # 'fast_dev_run' for checking errors, "auto_lr_find" to find the best lr_rate
model = Lit_NN(num_classes, model)
trainer.fit(model)
| [
"noreply@github.com"
] | Naagar.noreply@github.com |
e590a15082ab839b6cc3e21f9f0c23dcb2ea9f0e | 34540dae4c1199f159efdcfc2c2ad532c0d463d2 | /relatorio/urls.py | 3c99667b7136e4d6fee1d17487978c8b08f4a422 | [] | no_license | CimaraOliveira/WorkBook | c67d5e97a94799cd3e2dbd998abb96afd29ae8fe | ae7f3bea8de285481b1b4bc03f8c1934fd48ce59 | refs/heads/main | 2023-05-30T01:48:21.841086 | 2021-06-14T17:27:22 | 2021-06-14T17:27:22 | 343,867,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | from django.urls import path
from .views import GeneratePdf
urlpatterns = [
path('relatorios/', GeneratePdf.as_view(), name='gerar_relatorio'),
] | [
"cimarinhaoliveira@hotmail.com"
] | cimarinhaoliveira@hotmail.com |
81386b410cfaf6f7d5aa0e5cd993dfbe88d6e5b2 | be8629e747f808d6c9e23fc1d884f2e82fe02818 | /scraping/ven/bin/easy_install-3.8 | 89e8fa0f9ab1727f746f1a3fa08516a574fa36a3 | [] | no_license | durbonca/just-some-py-code-exercise | 6ef9283307847de02173cf08b82745dd831af333 | 5a394f275024a334b16f0c96c07c315f5b5e764f | refs/heads/master | 2020-09-28T21:22:17.780372 | 2019-12-16T13:09:06 | 2019-12-16T13:09:06 | 226,867,440 | 0 | 0 | null | 2019-12-16T13:19:54 | 2019-12-09T12:35:37 | Python | UTF-8 | Python | false | false | 279 | 8 | #!/home/durbonca/Documents/platzi/python/scraping/ven/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"durbonca@hotmail.com"
] | durbonca@hotmail.com |
461aefbbf762874b01a14c2240b32b4c3530d3e3 | 2dad8b725583afd64e2f381acb6a299350a069c4 | /daftar/migrations/0012_auto_20200204_1527.py | 939f809acd5ce9faedf92e3b7bb279d387c8aa89 | [] | no_license | s4-hub/winback | 39b0b354690201a7906ce77f46c1172ddcb21110 | abfb22b6ed5d523b93ea5cdb982ac3066a63ab7c | refs/heads/master | 2020-12-22T12:27:54.416189 | 2020-02-11T10:50:30 | 2020-02-11T10:50:30 | 233,515,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,233 | py | # Generated by Django 2.2.7 on 2020-02-04 08:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('daftar', '0011_auto_20200204_1445'),
]
operations = [
migrations.DeleteModel(
name='Pekerjaan',
),
migrations.AddField(
model_name='daftar',
name='lokasi',
field=models.CharField(choices=[('1101', 'ACEH SELATAN'), ('1102', 'ACEH TENGGARA'), ('1103', 'ACEH TIMUR'), ('1104', 'ACEH TENGAH'), ('1105', 'ACEH BARAT'), ('1106', 'ACEH BESAR'), ('1107', 'PIDIE'), ('1108', 'ACEH UTARA'), ('1109', 'SIMEULUE'), ('1110', 'ACEH SINGKIL'), ('1111', 'BIREUEN'), ('1112', 'ACEH BARAT DAYA'), ('1113', 'GAYO LUES'), ('1114', 'ACEH JAYA'), ('1115', 'NAGAN RAYA'), ('1116', 'ACEH TAMIANG'), ('1117', 'BENER MERIAH'), ('1118', 'PIDIE JAYA'), ('1171', 'KOTA BANDA ACEH'), ('1172', 'KOTA SABANG'), ('1173', 'KOTA LHOKSEUMAWE'), ('1174', 'KOTA LANGSA'), ('1175', 'KOTA SUBULUSSALAM')], default=1, max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='daftar',
name='pekerjaan1',
field=models.CharField(choices=[('P001', 'PETANI/PEKEBUN'), ('P002', 'PETERNAK'), ('P003', 'NELAYAN/PERIKANAN'), ('P004', 'TRANSPORTASI'), ('P005', 'BURUH HARIAN LEPAS'), ('P006', 'BURUH TANI/PERKEBUNAN'), ('P007', 'BURUH NELAYAN/PERIKANAN'), ('P008', 'BURUH PETERNAKAN'), ('P009', 'PEMBANTU RUMAH TANGGA'), ('P010', 'TUKANG CUKUR'), ('P011', 'TUKANG LISTRIK'), ('P012', 'TUKANG BATU'), ('P013', 'TUKANG KAYU'), ('P014', 'TUKANG SOL SEPATU'), ('P015', 'TUKANG LAS/PANDAI BESI'), ('P016', 'TUKANG JAHIT'), ('P017', 'TUKANG GIGI'), ('P018', 'PENATA RIAS'), ('P019', 'PENATA BUSANA'), ('P020', 'PENATA RAMBUT'), ('P021', 'MEKANIK'), ('P022', 'SENIMAN'), ('P023', 'TABIB'), ('P024', 'PARAJI'), ('P025', 'PERANCANG BUSANA'), ('P026', 'PENTERJEMAH'), ('P027', 'IMAM MESJID'), ('P028', 'PENDETA'), ('P029', 'PASTOR'), ('P030', 'WARTAWAN'), ('P031', 'USTADZ/MUBALIGH'), ('P032', 'JURU MASAK'), ('P033', 'PROMOTOR ACARA'), ('P034', 'DOSEN'), ('P035', 'GURU'), ('P036', 'PENGACARA'), ('P037', 'NOTARIS'), ('P038', 'ARSITEK'), ('P039', 'KONSULTAN'), ('P040', 'DOKTER'), ('P041', 'BIDAN'), ('P042', 'APOTEKER'), ('P043', 'PSIKIATER/PSIKOLOG'), ('P044', 'PENYIAR RADIO'), ('P045', 'PELAUT'), ('P046', 'PENELITI'), ('P047', 'SOPIR'), ('P048', 'PIALANG'), ('P049', 'PARANORMAL'), ('P050', 'PEDAGANG'), ('P051', 'BIARAWATI'), ('P052', 'WIRASWASTA'), ('P053', 'MITRA GOJEK'), ('P054', 'MITRA GRAB'), ('P055', 'MITRA UBER'), ('P056', 'PEKERJA MAGANG'), ('P057', 'SISWA KERJA PRAKTEK'), ('P058', 'TENAGA HONORER (SELAIN PENYELENGGARA NEGARA)'), ('P059', 'NARAPIDANA DALAM PROSES ASIMILASI'), ('P060', 'ATLET'), ('P061', 'ARTIS'), ('P062', 'JURU PARKIR'), ('P063', 'TUKANG PIJAT'), ('P064', 'PEMANDU LAGU'), ('P065', 'PENDAMPING DESA'), ('P066', 'BURUH BONGKAR MUAT/BAGASI'), ('P067', 'RELAWAN TAGANA/RELAWAN BENCANA'), ('P068', 'TUKANG SAMPAH'), ('P069', 'PEMULUNG'), ('P070', 'MARBOT MESJID'), ('P071', 'MITRA GOJEK-GO LIFE')], default=1, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='daftar',
name='pekerjaan2',
field=models.CharField(blank=True, choices=[('P001', 'PETANI/PEKEBUN'), ('P002', 'PETERNAK'), ('P003', 'NELAYAN/PERIKANAN'), ('P004', 'TRANSPORTASI'), ('P005', 'BURUH HARIAN LEPAS'), ('P006', 'BURUH TANI/PERKEBUNAN'), ('P007', 'BURUH NELAYAN/PERIKANAN'), ('P008', 'BURUH PETERNAKAN'), ('P009', 'PEMBANTU RUMAH TANGGA'), ('P010', 'TUKANG CUKUR'), ('P011', 'TUKANG LISTRIK'), ('P012', 'TUKANG BATU'), ('P013', 'TUKANG KAYU'), ('P014', 'TUKANG SOL SEPATU'), ('P015', 'TUKANG LAS/PANDAI BESI'), ('P016', 'TUKANG JAHIT'), ('P017', 'TUKANG GIGI'), ('P018', 'PENATA RIAS'), ('P019', 'PENATA BUSANA'), ('P020', 'PENATA RAMBUT'), ('P021', 'MEKANIK'), ('P022', 'SENIMAN'), ('P023', 'TABIB'), ('P024', 'PARAJI'), ('P025', 'PERANCANG BUSANA'), ('P026', 'PENTERJEMAH'), ('P027', 'IMAM MESJID'), ('P028', 'PENDETA'), ('P029', 'PASTOR'), ('P030', 'WARTAWAN'), ('P031', 'USTADZ/MUBALIGH'), ('P032', 'JURU MASAK'), ('P033', 'PROMOTOR ACARA'), ('P034', 'DOSEN'), ('P035', 'GURU'), ('P036', 'PENGACARA'), ('P037', 'NOTARIS'), ('P038', 'ARSITEK'), ('P039', 'KONSULTAN'), ('P040', 'DOKTER'), ('P041', 'BIDAN'), ('P042', 'APOTEKER'), ('P043', 'PSIKIATER/PSIKOLOG'), ('P044', 'PENYIAR RADIO'), ('P045', 'PELAUT'), ('P046', 'PENELITI'), ('P047', 'SOPIR'), ('P048', 'PIALANG'), ('P049', 'PARANORMAL'), ('P050', 'PEDAGANG'), ('P051', 'BIARAWATI'), ('P052', 'WIRASWASTA'), ('P053', 'MITRA GOJEK'), ('P054', 'MITRA GRAB'), ('P055', 'MITRA UBER'), ('P056', 'PEKERJA MAGANG'), ('P057', 'SISWA KERJA PRAKTEK'), ('P058', 'TENAGA HONORER (SELAIN PENYELENGGARA NEGARA)'), ('P059', 'NARAPIDANA DALAM PROSES ASIMILASI'), ('P060', 'ATLET'), ('P061', 'ARTIS'), ('P062', 'JURU PARKIR'), ('P063', 'TUKANG PIJAT'), ('P064', 'PEMANDU LAGU'), ('P065', 'PENDAMPING DESA'), ('P066', 'BURUH BONGKAR MUAT/BAGASI'), ('P067', 'RELAWAN TAGANA/RELAWAN BENCANA'), ('P068', 'TUKANG SAMPAH'), ('P069', 'PEMULUNG'), ('P070', 'MARBOT MESJID'), ('P071', 'MITRA GOJEK-GO LIFE')], max_length=50),
),
]
| [
"syafii.newbie@gmail.com"
] | syafii.newbie@gmail.com |
63ca5ff1ce518c3722e989c96d91959a036649ef | 36aa8ca505bcfe024db9875fb056ca6570f3504d | /Core/Noun/models.py | e0ce6ce8d4b201092025129707e64cec6b32ac01 | [] | no_license | shashanj/bankOnCube | 42fec3886b7e5628e47603e7cb0f6fb177e564ec | 865fa104022c8dd2116bf83280bf22c78069537c | refs/heads/master | 2021-01-20T12:38:26.897843 | 2017-05-05T15:25:19 | 2017-05-05T15:25:19 | 90,389,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | from django.db import models
from Core.Verb.models import *
from Core.Noun.NotificationEnableType import *
# Create your models here.
class Noun(models.Model):
noun_code = models.CharField(primary_key=True,max_length=10, blank=False,null=False)
noun = models.CharField(max_length=30, blank=False,null=False)
desc = models.CharField(max_length=100)
allowedVerb = models.ManyToManyField(Verb)
def __str__(self):
return self.noun
# class Notification_Config(models.Model):
# noun = models.ForeignKey(Noun)
# verb = models.ForeignKey(Verb)
# notification_enable = models.CharField(max_length=3,choices = Notification_Enable_Type().getTypes())
# condition = models.TextField(null= True)
# expected_output_type = models.CharField(max_length=100)
# expected_output = models.CharField(max_length=100)
# def __str__(self):
# return self.noun.noun + self.verb.verb | [
"shashanks.1903@gmail.com"
] | shashanks.1903@gmail.com |
8070b82297d7345832d30903d808e495e45e7dc8 | aba096b6a97b4e11253d9e5064f64b436ce02212 | /Aspect_Analysis/script/evaluate.py | f39befd2aa79f9ec3b6e53683bdb4f72f3b01ecf | [] | no_license | somanshud/Aspect-Intent-Sentiment-Analysis | 9d0d887fe07349e328606f11880e6eb0d021c0f2 | 7515fdda340e0788243d3f3cc259b15fc9792db8 | refs/heads/master | 2022-08-17T20:13:10.744753 | 2020-05-20T07:27:17 | 2020-05-20T07:27:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,068 | py | import argparse
import torch
import numpy as np
import json, io
from nltk import word_tokenize
import nltk
import os
from nltk.tag import StanfordPOSTagger
from keras.utils import to_categorical
import xml.etree.ElementTree as ET
from fasttext import load_model
class Model(torch.nn.Module):
def __init__(self, gen_emb, domain_emb, num_classes=3, dropout=0.5, crf=False, tag=False):
super(Model, self).__init__()
self.tag_dim = 45 if tag else 0
self.gen_embedding = torch.nn.Embedding(gen_emb.shape[0], gen_emb.shape[1])
self.gen_embedding.weight=torch.nn.Parameter(torch.from_numpy(gen_emb), requires_grad=False)
self.domain_embedding = torch.nn.Embedding(domain_emb.shape[0], domain_emb.shape[1])
self.domain_embedding.weight=torch.nn.Parameter(torch.from_numpy(domain_emb), requires_grad=False)
self.conv1=torch.nn.Conv1d(gen_emb.shape[1]+domain_emb.shape[1], 128, 5, padding=2 )
self.conv2=torch.nn.Conv1d(gen_emb.shape[1]+domain_emb.shape[1], 128, 3, padding=1 )
self.dropout=torch.nn.Dropout(dropout)
self.conv3=torch.nn.Conv1d(256, 256, 5, padding=2)
self.conv4=torch.nn.Conv1d(256, 256, 5, padding=2)
self.conv5=torch.nn.Conv1d(256, 256, 5, padding=2)
self.linear_ae1=torch.nn.Linear(256+self.tag_dim+domain_emb.shape[1], 50)
self.linear_ae2=torch.nn.Linear(50, num_classes)
self.crf_flag=crf
if self.crf_flag:
from allennlp.modules import ConditionalRandomField
self.crf=ConditionalRandomField(num_classes)
def forward(self, x, x_len, x_mask, x_tag, y=None, testing=False):
x_emb=torch.cat((self.gen_embedding(x), self.domain_embedding(x) ), dim=2)
x_emb=self.dropout(x_emb).transpose(1, 2)
x_conv=torch.nn.functional.relu(torch.cat((self.conv1(x_emb), self.conv2(x_emb)), dim=1) )
x_conv=self.dropout(x_conv)
x_conv=torch.nn.functional.relu(self.conv3(x_conv) )
x_conv=self.dropout(x_conv)
x_conv=torch.nn.functional.relu(self.conv4(x_conv) )
x_conv=self.dropout(x_conv)
x_conv=torch.nn.functional.relu(self.conv5(x_conv) )
x_conv=x_conv.transpose(1, 2)
x_logit=torch.nn.functional.relu(self.linear_ae1(torch.cat((x_conv, x_tag, self.domain_embedding(x)), dim=2) ) )
x_logit=self.linear_ae2(x_logit)
if testing:
if self.crf_flag:
score=self.crf.viterbi_tags(x_logit, x_mask)
else:
x_logit=x_logit.transpose(2, 0)
score=torch.nn.functional.log_softmax(x_logit).transpose(2, 0)
else:
if self.crf_flag:
score=-self.crf(x_logit, y, x_mask)
else:
x_logit=torch.nn.utils.rnn.pack_padded_sequence(x_logit, x_len, batch_first=True)
score=torch.nn.functional.nll_loss(torch.nn.functional.log_softmax(x_logit.data), y.data)
return score
def build_emb_dictionary(fn):
words=[]
vectors=[]
with open(fn) as f:
for l in f:
t=l.rstrip().split(' ')
words.append(t[0])
vectors.append(list(map(float, t[1:])))
wordvecs = np.array(vectors, dtype=np.double)
word2id = {word:i for i, word in enumerate(words)}
return wordvecs,word2id
def prepare_embeddings(fn, gen_emb, embeddings, domain_emb, prep_dir, gen_dim=300, domain_dim=100):
text = []
with open(fn) as f:
for line in f:
token = word_tokenize(line)
text=text+token
vocab = sorted(set(text))
word_idx = {}
if os.path.exists(prep_dir+'word_idx.json'):
with io.open(prep_dir+'word_idx.json') as f:
prev_word = json.load(f)
else:
prev_word = {}
wx = 0
new_word = []
for word in vocab:
if word not in prev_word:
wx = wx+1
new_word.append(word)
word_idx[word] = wx+len(prev_word)
prev_word.update(word_idx)
if new_word == []:
return
embedding_gen=np.zeros((len(prev_word)+2, gen_dim) )
embedding_domain=np.zeros((len(prev_word)+2, domain_dim) )
if os.path.exists(prep_dir+'gen.vec.npy'):
gen_emb_prev=np.load(prep_dir+"gen.vec.npy")
embedding_gen[:gen_emb_prev.shape[0],:] = gen_emb_prev
if os.path.exists(prep_dir+'gen.vec.npy'):
domain_emb_prev=np.load(prep_dir+embeddings+'.npy')
embedding_domain[:domain_emb_prev.shape[0],:] = domain_emb_prev
with open(gen_emb) as f:
for l in f:
rec=l.rstrip().split(' ')
if len(rec)==2:
continue
if rec[0] in new_word:
embedding_gen[prev_word[rec[0]]] = np.array([float(r) for r in rec[1:] ])
with open(domain_emb) as f:
for l in f:
rec=l.rstrip().split(' ')
if len(rec)==2:
continue
if rec[0] in new_word:
embedding_domain[prev_word[rec[0]]] = np.array([float(r) for r in rec[1:] ])
ftmodel = load_model(domain_emb+".bin")
for w in new_word:
if embedding_domain[word_idx[w] ].sum()==0.:
embedding_domain[word_idx[w] ] = ftmodel.get_word_vector(w)
with io.open(prep_dir+'word_idx.json', 'w') as outfile:
outfile.write(json.dumps(prev_word))
np.save(prep_dir+'gen.vec.npy', embedding_gen.astype('float32') )
np.save(prep_dir+embeddings+'.npy', embedding_domain.astype('float32') )
def prepare_text(fn, POSdir, prep_dir):
pos_tag_list = ['CC', 'CD', 'DT', 'EX', 'FW', 'IN', 'JJ', 'JJR', 'JJS', 'LS', 'MD', 'NN', 'NNS','NNP', 'NNPS', 'PDT', 'POS', 'PRP', 'PRP$', 'RB', 'RBR', 'RBS', 'RP','SYM', 'TO', 'UH', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ', 'WDT', 'WP', 'WP$', 'WRB',',','.',':','$','#',"``","''",'(',')']
tag_to_num = {tag:i+1 for i, tag in enumerate(sorted(pos_tag_list))}
with io.open(prep_dir+'word_idx.json') as f:
word_idx=json.load(f)
sentence_size = [-1, 130]
with open(fn) as f:
count = 0
for l in f:
token = word_tokenize(l)
if len(token) > 0:
count = count + 1
if len(token) > sentence_size[1]:
sentence_size[1]=len(token)
sentence_size[0] = count
X = np.zeros((sentence_size[0], sentence_size[1]), np.int16)
X_tag = np.zeros((sentence_size[0], sentence_size[1]), np.int16)
with open(fn) as f:
count = -1
raw_X=[]
for l in f:
token = word_tokenize(l)
jar = POSdir+'stanford-postagger.jar'
model = POSdir+'models/english-left3words-distsim.tagger'
pos_tagger = StanfordPOSTagger(model, jar, encoding='utf8')
pos_tag_stf = [tag_to_num[tag] for (_,tag) in pos_tagger.tag(token)]
if len(token) > 0:
count = count + 1
raw_X.append(token)
for wx, word in enumerate(token):
X[count, wx] = word_idx[word]
X_tag[count, wx] = pos_tag_stf[wx]
return raw_X, X, X_tag
def getAspects(fn, pred_y):
data = []
count = -1
with open(fn) as f:
for l in f:
token = word_tokenize(l)
if len(token) > 0:
count = count + 1
for wx, word in enumerate(token):
if pred_y[count, wx] == 1:
data.append({
'sentence': l,
'word':word
})
elif pred_y[count, wx] == 2:
data.append({
'sentence': l,
'word':word
})
else:
continue
else:
return "error"
return data
def test(model, test_X, test_X_tag, raw_X, domain, batch_size=128, crf=False, tag=False):
pred_y=np.zeros((test_X.shape[0], test_X.shape[1]), np.int16)
model.eval()
for offset in range(0, test_X.shape[0], batch_size):
batch_test_X_len=np.sum(test_X[offset:offset+batch_size]!=0, axis=1)
batch_idx=batch_test_X_len.argsort()[::-1]
batch_test_X_len=batch_test_X_len[batch_idx]
batch_test_X_mask=(test_X[offset:offset+batch_size]!=0)[batch_idx].astype(np.uint8)
batch_test_X=test_X[offset:offset+batch_size][batch_idx]
batch_test_X_mask=torch.autograd.Variable(torch.from_numpy(batch_test_X_mask).long() )
batch_test_X = torch.autograd.Variable(torch.from_numpy(batch_test_X).long() )
if tag:
batch_test_X_tag = test_X_tag[offset:offset+batch_size][batch_idx]
batch_test_X_tag_onehot = to_categorical(batch_test_X_tag, num_classes=45+1)[:,:,1:]
batch_test_X_tag_onehot = torch.autograd.Variable(torch.from_numpy(batch_test_X_tag_onehot).type(torch.FloatTensor) )
else:
batch_test_X_tag_onehot = None
batch_pred_y=model(batch_test_X, batch_test_X_len, batch_test_X_mask, batch_test_X_tag_onehot, testing=True)
r_idx=batch_idx.argsort()
if crf:
batch_pred_y=[batch_pred_y[idx] for idx in r_idx]
for ix in range(len(batch_pred_y) ):
for jx in range(len(batch_pred_y[ix]) ):
pred_y[offset+ix,jx]=batch_pred_y[ix][jx]
else:
batch_pred_y=batch_pred_y.data.cpu().numpy().argmax(axis=2)[r_idx]
pred_y[offset:offset+batch_size,:batch_pred_y.shape[1]]=batch_pred_y
model.train()
assert len(pred_y)==len(test_X)
return pred_y
def calculate_aspect(demo_dir, demo_fn, embeddings, model_fn, POSdir, domain, gen_emb, domain_emb, runs, gen_dim, domain_dim, prep_dir, crf=False, tag=True):
fn = demo_dir + demo_fn
prepare_embeddings(fn, gen_emb, embeddings, domain_emb, prep_dir, gen_dim, domain_dim)
raw_X, X, X_tag = prepare_text(fn, POSdir, prep_dir)
for run in range(runs):
model_fn_run=model_fn+str(run)
model=torch.load(model_fn_run, map_location=lambda storage, loc: storage)
embedding_gen=np.load(prep_dir+"gen.vec.npy")
embedding_domain=np.load(prep_dir+domain+"_emb.vec.npy")
model.gen_embedding = torch.nn.Embedding(embedding_gen.shape[0], embedding_gen.shape[1])
model.gen_embedding.weight=torch.nn.Parameter(torch.from_numpy(embedding_gen).type(torch.FloatTensor), requires_grad=False)
model.domain_embedding = torch.nn.Embedding(embedding_domain.shape[0], embedding_domain.shape[1])
model.domain_embedding.weight=torch.nn.Parameter(torch.from_numpy(embedding_domain).type(torch.FloatTensor), requires_grad=False)
if not tag:
model.tag_dim = 0
pred_y = test(model, X, X_tag, raw_X, domain, crf=crf, tag=tag)
data = getAspects(fn, pred_y)
# print('data', data)
return data | [
"adityakumarpr@gmail.com"
] | adityakumarpr@gmail.com |
9cfccbaa8b3879e950521b802914de6661ad47a2 | 23e868036a088139e968b55f80283a9f7c996f8f | /test/functional/feature_minchainwork.py | f322f86860b63278a34141fb523eb5ae2d196377 | [
"MIT"
] | permissive | hiphopcoin24/hiphopcoin24 | ec972602d502df0d131818eae7f903e3acc7e550 | 09b780546ba9e28b452a8641863aafa90def40d1 | refs/heads/master | 2023-05-14T18:44:02.844736 | 2021-06-09T08:51:27 | 2021-06-09T08:51:27 | 369,224,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,109 | py | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Hiphopcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for setting nMinimumChainWork on command line.
Nodes don't consider themselves out of "initial block download" until
their active chain has more work than nMinimumChainWork.
Nodes don't download blocks from a peer unless the peer's best known block
has more work than nMinimumChainWork.
While in initial block download, nodes won't relay blocks to their peers, so
test that this parameter functions as intended by verifying that block relay
only succeeds past a given node once its nMinimumChainWork has been exceeded.
"""
import time
from test_framework.test_framework import HiphopcoinTestFramework
from test_framework.util import assert_equal
# 2 hashes required per regtest block (with no difficulty adjustment)
REGTEST_WORK_PER_BLOCK = 2
class MinimumChainWorkTest(HiphopcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101]
def setup_network(self):
# This test relies on the chain setup being:
# node0 <- node1 <- node2
# Before leaving IBD, nodes prefer to download blocks from outbound
# peers, so ensure that we're mining on an outbound peer and testing
# block relay to inbound peers.
self.setup_nodes()
for i in range(self.num_nodes-1):
self.connect_nodes(i+1, i)
def run_test(self):
# Start building a chain on node0. node2 shouldn't be able to sync until node1's
# minchainwork is exceeded
starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1])
starting_blockcount = self.nodes[2].getblockcount()
num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
self.log.info("Generating %d blocks on node0", num_blocks_to_generate)
hashes = self.nodes[0].generatetoaddress(num_blocks_to_generate,
self.nodes[0].get_deterministic_priv_key().address)
self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork'])
# Sleep a few seconds and verify that node2 didn't get any new blocks
# or headers. We sleep, rather than sync_blocks(node0, node1) because
# it's reasonable either way for node1 to get the blocks, or not get
# them (since they're below node1's minchainwork).
time.sleep(3)
self.log.info("Verifying node 2 has no more blocks than before")
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
# Node2 shouldn't have any new headers yet, because node1 should not
# have relayed anything.
assert_equal(len(self.nodes[2].getchaintips()), 1)
assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)
assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
assert_equal(self.nodes[2].getblockcount(), starting_blockcount)
self.log.info("Generating one more block")
self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)
self.log.info("Verifying nodes are all synced")
# Because nodes in regtest are all manual connections (eg using
# addnode), node1 should not have disconnected node0. If not for that,
# we'd expect node1 to have disconnected node0 for serving an
# insufficient work chain, in which case we'd need to reconnect them to
# continue the test.
self.sync_all()
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
if __name__ == '__main__':
MinimumChainWorkTest().main()
| [
"hiphopcoin24@gmail.com"
] | hiphopcoin24@gmail.com |
ec0b5f82b94d998180217323ea3f0180998d4bb9 | d046e1651781c258f4997e09be2cfba99e333e90 | /crm/models.py | 84d0108f628625cdcd23f8452334f42de400028e | [] | no_license | Crenault/Assignment2-4deployment | de18470b78097a33af9f2d99143debe705533cb8 | dfdefca6c1861bbd6b69d4f646d46aa881b63766 | refs/heads/main | 2023-08-03T02:18:40.694461 | 2021-10-07T15:20:59 | 2021-10-07T15:20:59 | 414,423,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,652 | py | from django.db import models
from django.utils import timezone
from django.db import models
# Create your models here.
class Customer(models.Model):
cust_name = models.CharField(max_length=50)
organization = models.CharField(max_length=100, blank=True)
role = models.CharField(max_length=100)
email = models.EmailField(max_length=100)
bldgroom = models.CharField(max_length=100)
address = models.CharField(max_length=200)
account_number = models.IntegerField(blank=False, null=False)
city = models.CharField(max_length=50)
state = models.CharField(max_length=50)
zipcode = models.CharField(max_length=10)
phone_number = models.CharField(max_length=50)
created_date = models.DateTimeField(
default=timezone.now)
updated_date = models.DateTimeField(auto_now_add=True)
def created(self):
self.created_date = timezone.now()
self.save()
def updated(self):
self.updated_date = timezone.now()
self.save()
def __str__(self):
return str(self.cust_name)
class Service(models.Model):
cust_name = models.ForeignKey(Customer, on_delete=models.CASCADE, related_name='services')
service_category = models.CharField(max_length=100)
description = models.TextField()
location = models.CharField(max_length=200)
setup_time = models.DateTimeField(
default=timezone.now)
cleanup_time = models.DateTimeField(
default=timezone.now)
service_charge = models.DecimalField(max_digits=10, decimal_places=2)
created_date = models.DateTimeField(
default=timezone.now)
updated_date = models.DateTimeField(auto_now_add=True)
def created(self):
self.created_date = timezone.now()
self.save()
def updated(self):
self.updated_date = timezone.now()
self.save()
def __str__(self):
return str(self.cust_name)
class Product(models.Model):
cust_name = models.ForeignKey(Customer, on_delete=models.CASCADE, related_name='products')
product = models.CharField(max_length=100)
p_description = models.TextField()
quantity = models.IntegerField()
pickup_time = models.DateTimeField(
default=timezone.now)
charge = models.DecimalField(max_digits=10, decimal_places=2)
created_date = models.DateTimeField(
default=timezone.now)
updated_date = models.DateTimeField(auto_now_add=True)
def created(self):
self.created_date = timezone.now()
self.save()
def updated(self):
self.updated_date = timezone.now()
self.save()
def __str__(self):
return str(self.cust_name)
| [
"crenault@unomaha.edu"
] | crenault@unomaha.edu |
1edc3c43ccedae5b0f41c6a0f086d3ab554dd904 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_w_M_to_W_focus_Zok_div/ch016/wiColorJ/Add2Loss/Sob_k09_s001_Mae_s001_good/pyr_Tcrop255_p20_j15/pyr_1s/L7/step09_1side_L7.py | c64e2e1afb0db5021bb85517a443b2008fa6e4b9 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,124 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
from tkinter import S
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step08_b_use_G_generate_I_w_M_to_Wx_Wy_Wz_combine import I_w_M_to_W
from step08_b_use_G_generate_0_util import Tight_crop, Color_jit
from step09_c_train_step import Train_step_I_w_M_to_W
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
color_jit = Color_jit(do_ratio=0.6)
use_what_gen_op = I_w_M_to_W( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 0) )
use_what_train_step = Train_step_I_w_M_to_W( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 15), color_jit=color_jit )
use_hid_ch = 16
import time
start_time = time.time()
###############################################################################################################################################################################################
###############################################################################################################################################################################################
########################################################### Block1
### Block1
#########################################################################################
pyramid_1side_1 = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
pyramid_1side_2 = [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
pyramid_1side_3 = [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1]
pyramid_1side_4 = [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1]
pyramid_1side_5 = [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
pyramid_1side_6 = [1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1]
pyramid_1side_7 = [1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1]
pyramid_1side_8 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
#########################################################################################
ch032_pyramid_1side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=7, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=7, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=7, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=7, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=7, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_5, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=7, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_6, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=7, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_7, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_8 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=7, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_8, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
#########################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 512, 512, 1))
use_model = ch032_pyramid_1side_4
use_model = use_model.build()
result = use_model.generator(data)
print(result.shape)
from kong_util.tf_model_util import Show_model_weights
Show_model_weights(use_model.generator)
use_model.generator.summary()
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
ebd17ffe9ae73b81fb5a213ceed1ff146d771597 | 7203106e3fbda73f059df36443b543453e1edcf3 | /vgg.py | 34d85c9bf2dedce496a6c81cc558a020b82a4033 | [
"BSD-3-Clause"
] | permissive | vincent6606/relative_datasize_comparision | b61fb1a32ee3afd94f396d61f77830a69f149f53 | 9a839dd06642a0f968d85fd3a63f1e8e4f3fe16d | refs/heads/master | 2021-01-19T20:15:35.846151 | 2017-08-23T22:17:17 | 2017-08-23T22:17:17 | 101,226,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | py | '''VGG11/13/16/19 in Pytorch.'''
# from https://github.com/kuangliu/pytorch-cifar/blob/master/models/vgg.py
import torch
import torch.nn as nn
from torch.autograd import Variable
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, vgg_name):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 10)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
# net = VGG('VGG11')
# x = torch.randn(2,3,32,32)
# print(net(Variable(x)).size())
| [
"gaohuang.thu@gmail.com"
] | gaohuang.thu@gmail.com |
23606d29882ed9a6328d4cf56274b9d71123adc1 | de2dd13204c3526f90e9073a9653ea5f1371d5a0 | /pdf_app/migrations/0003_auto_20201013_0421.py | 95e9ec31e40d62a27867c6ab098086e7dc5bad9f | [
"MIT"
] | permissive | ShahinZeynalov/Whelp_DRF_Celery_RabbitMQ_Task | c3e165fcd1c2f7630b5e5400bdbca013dd9662de | d6f73c267ad7f10c9e2849de90e3b7d0df26cb52 | refs/heads/main | 2022-12-29T03:08:49.313868 | 2020-10-18T13:34:31 | 2020-10-18T13:34:31 | 302,956,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | # Generated by Django 3.1.2 on 2020-10-13 04:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pdf_app', '0002_auto_20201013_0113'),
]
operations = [
migrations.RenameModel(
old_name='Pdf',
new_name='Document',
),
]
| [
"shahin.zeynalov@bk.ru"
] | shahin.zeynalov@bk.ru |
018d9b3755f2b61e7389fcaa2f897f321b097764 | b75975afda7e47d6a6cb7ae4c77201fee3925d73 | /utils/config.py | 1a51899a2618303c2f18f7450adeb09aea25a2cd | [] | no_license | ibanezgomez/WORKERLand | 849c886fc918251e7f6f1df543ab79625bb7aa72 | e9e2328551a5a770451717483e8e5a792856f1ca | refs/heads/master | 2023-02-26T19:22:06.258124 | 2021-02-06T12:04:18 | 2021-02-06T12:04:18 | 336,483,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,638 | py | import os.path
import configparser
class Config:
def __init__(self,file):
self.fname = file
self.cfg = configparser.ConfigParser()
self.cfg.read(os.path.join(os.path.dirname(__file__), self.fname))
def getSections(self):
try: return self.cfg.sections()
except: return []
def getSection(self,sec):
try: return dict(self.cfg.items(sec))
except: return {}
def addSection(self,sec):
try:
self.cfg.add_section(sec)
return True
except:
return False
def delSection(self,sec):
return self.cfg.remove_section(sec)
def isSection(self,sec):
return self.cfg.has_section(sec)
def getOptions(self,sec):
return self.cfg.options(sec)
def getOption(self,sec,opt):
try:
return self.cfg.get(sec,opt)
except:
return ""
def setOption(self,sec,opt,val):
try:
self.cfg.set(sec,opt,str(val))
return True
except:
return False
def delOption(self,sec,opt):
return self.cfg.remove_option(sec,opt)
def isOption(self,sec,opt):
return self.cfg.has_option(sec,opt)
def getItems(self,sec):
res = {}
items = self.cfg.items(sec)
for i in items:
if i[0]: res[i[0]]=i[1]
return res
def save(self):
with open(os.path.join(os.path.dirname(__file__), self.fname), 'w') as configfile:
self.cfg.write(configfile)
| [
"sibanego@mercadona.com"
] | sibanego@mercadona.com |
bb832169d72c70a5c9c76d59a07e0a78f2d4b507 | 959ef16587a72066c4031a953a2c942804df5512 | /neural_net.py | 49a56a066ccebdf6c63f8aba714ebb554ec8f6d4 | [] | no_license | jowe41/cs229 | 297a3c19d7faaae66048e23e3308a4eb7d770a83 | fa72dfded286405be1ecea849bdeefa97cf5538a | refs/heads/master | 2020-06-02T21:35:08.383339 | 2019-06-11T07:29:55 | 2019-06-11T07:29:55 | 191,316,388 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,484 | py | from __future__ import print_function
from builtins import range
from builtins import object
import numpy as np
import matplotlib.pyplot as plt
from past.builtins import xrange
class TwoLayerNet(object):
def __init__(self, input_size, hidden_size, output_size, std=1e-4):
self.params = {}
self.params['W1'] = std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, y=None, reg=0.0):
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
scores = None
Z1 = np.dot(X, W1) + b1
A1 = np.maximum(0, Z1)
Z2 = np.dot(A1,W2) + b2
scores = Z2
if y is None:
return scores
loss = 0.0
correctScore = -np.sum(scores[range(N), y])
loss = correctScore + np.sum(np.log(np.sum(np.exp(scores), axis = 1)),axis=0)
loss /= N
loss += reg * np.sum(W1* W1) + reg * np.sum(W2 * W2)
grads = {}
C = W2.shape[1]
countOfX = np.zeros((N, C))+ np.exp(scores)/ np.sum(np.exp(scores), axis = 1).reshape(-1,1)
countOfX[range(N), y] -= 1
dZ2 = countOfX
grads['W2'] = 1/N * np.dot(A1.T, countOfX) + 2 * reg * W2
grads['b2'] = np.sum(dZ2, axis = 0)/N
dZ1 = np.dot(dZ2, W2.T)
dZ1[A1 <= 0] = 0
grads['W1'] = 1/N * np.dot(X.T, dZ1) + 2 * reg * W1
grads['b1'] = np.sum(dZ1, axis = 0)/N
return loss, grads
def train(self, X, y, X_val, y_val,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=5e-6, num_iters=100,
batch_size=200, verbose=False):
num_train = X.shape[0]
iterations_per_epoch = max(num_train / batch_size, 1)
loss_history = []
train_acc_history = []
val_acc_history = []
for it in range(num_iters):
X_batch = None
y_batch = None
index = np.random.choice(num_train, batch_size,replace = True)
X_batch = X[index]
y_batch = y[index]
loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
loss_history.append(loss)
self.params['W2'] -= learning_rate * grads['W2']
self.params['b2'] -= learning_rate * grads['b2']
self.params['W1'] -= learning_rate * grads['W1']
self.params['b1'] -= learning_rate * grads['b1']
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
if it % iterations_per_epoch == 0:
train_acc = (self.predict(X_batch) == y_batch).mean()
val_acc = (self.predict(X_val) == y_val).mean()
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
learning_rate *= learning_rate_decay
return {
'loss_history': loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def predict(self, X):
Z1 = np.dot(X, self.params['W1']) + self.params['b1']
A1 = np.maximum(0, Z1)
Z2 = np.dot(A1,self.params['W2']) + self.params['b2']
y_pred = np.argmax(Z2, axis = 1)
return y_pred
| [
"yuey3@stanford.edu"
] | yuey3@stanford.edu |
647bd697f1a0cbbb05edc55913aeee0797062755 | 50b44b9cd86f0c8536663e85d18bd0c0215517ff | /programming/SecurityGames/test.py | c9d4e5659a537c5e1465438a8cf23163c8e2a4fb | [] | no_license | cschaffner/ModernCryptographyCourse | 1b5e5ef13d3f6458be7616ae158b54dd7b5efe37 | 88e7cfe3d67c93c7503554cb396dd674dfa37390 | refs/heads/master | 2022-10-22T11:50:47.999030 | 2022-09-19T06:47:58 | 2022-09-19T06:47:58 | 98,519,482 | 3 | 5 | null | 2019-09-01T19:49:26 | 2017-07-27T09:38:10 | TeX | UTF-8 | Python | false | false | 1,289 | py | import nacl.secret
import nacl.utils
# This must be kept secret, this is the combination to your safe
key = nacl.utils.random(nacl.secret.SecretBox.KEY_SIZE)
# This is your safe, you can use it to encrypt or decrypt messages
box = nacl.secret.SecretBox(key)
# This is our message to send, it must be a bytestring as SecretBox will
# treat it as just a binary blob of data.
message = b"The president will be exiting through the lower levels"
# Encrypt our message, it will be exactly 40 bytes longer than the
# original message as it stores authentication information and the
# nonce alongside it.
encrypted = box.encrypt(message)
assert len(encrypted) == len(message) + box.NONCE_SIZE + box.MACBYTES
nonce = nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE)
encrypted = box.encrypt(message, nonce)
# since we are transmitting the nonce by some other means,
# we just need to get the ciphertext and authentication data
ctext = encrypted.ciphertext
# ctext is just nacl.secret.SecretBox.MACBYTES longer
# than the original message
assert len(ctext) == len(message) + box.MACBYTES
# Decrypt our message, an exception will be raised if the encryption was
# tampered with or there was otherwise an error.
plaintext = box.decrypt(encrypted)
print(plaintext.decode('utf-8')) | [
"huebli@gmail.com"
] | huebli@gmail.com |
ecb797705d4380b014ac224de86a2b3ca7fbe0de | 029b18378b54856f6982cf3a73982b5285c2ff57 | /assignment1/cs231n/classifiers/linear_classifier.py | 22b624caa7e1dbd171409817f28da4d614335f49 | [] | no_license | Allensmile/cs231n_Convolutional-Neural-Networks-for-Visual-Recognition | 15f07693757a439776e7da22f2ac4e2cf6f78611 | bbae799b71c533ffb52ff9248ce9c92cfa76be6e | refs/heads/cs231n-0821 | 2021-01-01T19:05:11.608175 | 2016-08-22T04:39:20 | 2016-08-22T04:39:20 | 98,504,340 | 1 | 0 | null | 2017-07-27T07:01:01 | 2017-07-27T07:01:01 | null | UTF-8 | Python | false | false | 6,325 | py | import numpy as np
from cs231n.classifiers.linear_svm import *
from cs231n.classifiers.softmax import *
class LinearClassifier(object):
def __init__(self):
self.W = None
def train(self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100,batch_size=200, verbose=False):
"""
Train this linear classifier using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) containing training data; there are N
training samples each of dimension D.
- y: A numpy array of shape (N,) containing training labels; y[i] = c
means that X[i] has label 0 <= c < C for C classes.
- learning_rate: (float) learning rate for optimization.
- reg: (float) regularization strength.
- num_iters: (integer) number of steps to take when optimizing
- batch_size: (integer) number of training examples to use at each step.
- verbose: (boolean) If true, print progress during optimization.
Outputs:
A list containing the value of the loss function at each training iteration.
"""
num_train, dim = X.shape
num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes
#if self.W is None:
# lazily initialize W---->always initialize)
self.W = 0.001 * np.random.randn(dim, num_classes)*np.sqrt(dim/2.0)
# Run stochastic gradient descent to optimize W
loss_history = []
#Try using momentum update
v=0 #init to zero
mu=0.5 #int to 0.5, and increase it later.
for it in xrange(num_iters):
if num_iters%100==0:
mu+=0.05
#if num_iters>=1500:
# learning_rate*=0.7
if mu>=0.99:
mu=0.99
X_batch = None
y_batch = None
#########################################################################
# TODO: #
# Sample batch_size elements from the training data and their #
# corresponding labels to use in this round of gradient descent. #
# Store the data in X_batch and their corresponding labels in #
# y_batch; after sampling X_batch should have shape (dim, batch_size) #
# and y_batch should have shape (batch_size,) #
# #
# Hint: Use np.random.choice to generate indices. Sampling with #
# replacement is faster than sampling without replacement. #
#########################################################################
#pass
#1. get the batch for this iteration.
batch_indices=np.random.choice(num_train,batch_size,replace=True)
X_batch=X[batch_indices] #('X_batch.shape:', (200L, 3073L))
#print("X_batch.shape:",X_batch.shape)
y_batch=y[batch_indices] #('y_batch.shape:', 200)
#print("y_batch.shape:",len(y_batch))
#loss_vectorized, grad_vectorized = svm_loss_vectorized(self.W, X_batch, y_batch, reg)
#self.W+=-learning_rate*grad_vectorized
#########################################################################
# END OF YOUR CODE #
#########################################################################
#2. evaluate loss and gradient
loss, grad = self.loss(X_batch, y_batch, reg)
loss_history.append(loss)
#3. perform parameter update
#########################################################################
# TODO: #
# Update the weights using the gradient and the learning rate. #
#########################################################################
#pass
#self.W-=learning_rate*grad
v=mu*v-learning_rate*grad
self.W+=v
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 100 == 0:
print 'iteration %d / %d: loss %f' % (it, num_iters, loss)
return loss_history
def predict(self, X):
"""
Use the trained weights of this linear classifier to predict labels for
data points.
Inputs:
- X: D x N array of training data. Each column is a D-dimensional point.
Returns:
- y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional
array of length N, and each element is an integer giving the predicted
class.
"""
y_pred = np.zeros(X.shape[1])
###########################################################################
# TODO: #
# Implement this method. Store the predicted labels in y_pred. #
###########################################################################
#pass
scores=X.dot(self.W) #1.get scores
y_pred=np.argmax(scores,axis=1) #2.find the index for highest value in the row
###########################################################################
# END OF YOUR CODE #
###########################################################################
return y_pred
def loss(self, X_batch, y_batch, reg):
"""
Compute the loss function and its derivative.
Subclasses will override this.
Inputs:
- X_batch: A numpy array of shape (N, D) containing a minibatch of N
data points; each point has dimension D.
- y_batch: A numpy array of shape (N,) containing labels for the minibatch.
- reg: (float) regularization strength.
Returns: A tuple containing:
- loss as a single float
- gradient with respect to self.W; an array of the same shape as W
"""
pass
class LinearSVM(LinearClassifier):
""" A subclass that uses the Multiclass SVM loss function """
def loss(self, X_batch, y_batch, reg):
return svm_loss_vectorized(self.W, X_batch, y_batch, reg)
class Softmax(LinearClassifier):
""" A subclass that uses the Softmax + Cross-entropy loss function """
def loss(self, X_batch, y_batch, reg):
return softmax_loss_vectorized(self.W, X_batch, y_batch, reg)
| [
"brightmart@hotmail.com"
] | brightmart@hotmail.com |
b71f5823bdb10518efd51fc8632e0a4311b64fcb | 3993f4e8b254f23485a570fcfdc472fc6a216333 | /menu1.py | 6e77809013d3a7e140a03e78bfdf521776d59607 | [] | no_license | bipashaghosal/ATM_PROJECT | 8a6dc697627d4b3b43bbbd6f6601befe2e03da2d | 7b1f4bea63ba9d23c42373a283640218356c8ceb | refs/heads/master | 2022-10-16T09:47:17.085126 | 2020-06-12T17:10:42 | 2020-06-12T17:10:42 | 271,846,676 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | from login import login
from create_account import create_account
from read_file import read_file
from menu2 import clear_screen
accounts_list = read_file('Accounts.txt')
def menu1():
print('>>>>>>>>WELCOME<<<<<<<<\n')
choice = int(input('1) Login\n2) Create Account\n3) Exit\n\nchoice>> '))
if choice == 1:
clear_screen()
try:
# to enable the option of (ctrl+c) to go back
login(accounts_list)
except KeyboardInterrupt:
clear_screen()
elif choice == 2:
create_account(accounts_list)
elif choice == 3:
# close the program
exit(0)
else:
clear_screen()
print("ERROR: Wrong choice\n")
menu1()
if __name__ == '__main__':
menu1() | [
"noreply@github.com"
] | bipashaghosal.noreply@github.com |
c419a8b822ad2e4ef64abd24d7d521bdda0a2da5 | 64ab350920457caa85dac7aabb27ae285d89227e | /task 2/replacement_by_division.py | 43e6e9d1e2e4723136ecac5aac015a9c3f199e97 | [] | no_license | FlyOn21/test_finteh_consalt | 999e6c9cedbbf4bca04325fc840f969cc214edb7 | 9fb04398a68a2d0ec9c3522e81febcf04317fcb0 | refs/heads/master | 2023-06-12T07:16:14.628921 | 2021-07-02T02:02:52 | 2021-07-02T02:02:52 | 382,202,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | def replacement_by_division():
for char in range(11, 80):
if char % 3 == 0 and char % 5 == 0:
print("$$@@", end=' ')
elif char % 3 == 0:
print("$$", end=' ')
elif char % 5 == 0:
print("@@", end=' ')
else:
print(char, end=' ')
if __name__ == "__main__":
replacement_by_division()
| [
"zhogolevpv@gmail.com"
] | zhogolevpv@gmail.com |
758a2616fab81c1118c09b9281baa8ed60066916 | 95dc1426aa7b854e522f6ad5bcac97e7f7239c2c | /script/app.py | 97a485834d710efa1a6e4dbce52e95081a462a2d | [] | no_license | JIMhackKING/myBlog | d3eb1a5f18db6e93ec04f43e1026b3656c722a39 | 99e8d8988ccf5e97d74ab1f395e4934e406e1419 | refs/heads/master | 2020-12-02T22:23:47.611887 | 2017-07-27T05:30:20 | 2017-07-27T05:30:20 | 96,127,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,060 | py | #-*-coding:utf-8-*-
#qpy://127.0.0.1:8080/
"""
This is a sample of WebApp
@Author JIMhackKING
"""
import bottle
from bottle import request, response, default_app, redirect
from bottle import run, debug, route, error, static_file, template, view
import sqlite3
import json
import md5
import threading
import os
#### 常量定义 #########
ASSETS = "/assets/"
ROOT = os.getcwd()
@route('/my_ip')
def my_ip():
ip = request['REMOTE_ADDR']
return '<h1>Your ip address is: <font color="red">%s</font></h1>' %ip
@error(404)
def page404(error):
return template("404",
page_name = "404",
)
# 匹配 末尾不带 / 的 URL,并重定向到正确的URL
@route("/<path:re:[\w\/]+\w$>")
def redirect_path(path):
return redirect("/%s/" %path)
# 上下两个路由绑定函数不能调换顺序,否则会出现抢先响应的情况
# 匹配静态文件
@route("/<filepath:path>")
def assets(filepath):
return static_file(filepath, root=ROOT+ASSETS)
# 管理员登陆界面
@route("/admin/")
def admin_login():
wrong = True if request.query.get("msg") else False
return template("admin",
page_name = "admin",
wrong=wrong,
)
@route("/admin/login/", method="POST")
def login_success():
username = request.forms.get("username")
password = md5.new(request.forms.get("password")).hexdigest()
# 设置响应头
# response.set_header("Content-Type","application/json")
response.set_header("Creater","JIMhackKING")
try:
conn = sqlite3.connect("db.sqlite3")
cursor = conn.cursor()
cursor.execute("select * from admin_user where username=? and password=?", (username, password))
if not cursor.fetchone():
# 重定向
return redirect("/admin/?msg=WrongPassword") # 暂时无法用 /admin?msg... 来做地址
finally:
cursor.close()
conn.close()
return '<h1 style="text-align:center;">No content yet.</h1>' # 后台管理页面
# purecss.io 里的 blog 模板
# 增加动态URL, ps: /blog/DHC/
@route("/")
def blog():
return template("pure-layout-blog",
page_name = "pure-layout-blog",
)
@route("/", method="POST")
def blog_logined():
return template("pure-layout-blog",
page_name = "pure-layout-blog",
)
@route("/login/")
def blog_login():
return template("blog-login",
page_name = "blog-login",
)
@route("/version/")
def version_information():
return template("version-information",
page_name = "version-information",
)
"""
改进计划:
· 为版本信息页创建模板,将各版本信息做成一块,在后台管理系统里面添加版本信息管理功能,每次版本更新只需要在后台管理系统进行编辑和添加。
· 管理员可在后台管理系统编辑博客,在博客主页用户可以编辑博客
· 增加精华、排行等功能
"""
application = default_app() | [
"a601767890@sina.com"
] | a601767890@sina.com |
50587e5954677e11ceae53851f78af9e5bcfa727 | 458ff3c3611bb969f96ff3d3e15108fa9ec88316 | /quiz/migrations/0004_auto_20201209_2057.py | 9ddaf47129300087df96cc291ab7fda68b428ff2 | [] | no_license | mayank5044/Navigus | 8164809d87c5f3112565549229327ea20d090898 | aa03a99583efe4b7e9e7d1cb4a450e559f36d475 | refs/heads/master | 2023-08-11T01:30:21.115338 | 2021-10-09T06:47:24 | 2021-10-09T06:47:24 | 414,919,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('student', '0002_remove_student_status'),
('quiz', '0003_result'),
]
operations = [
migrations.AlterField(
model_name='result',
name='exam',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quiz.Course'),
),
migrations.AlterField(
model_name='result',
name='student',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='student.Student'),
),
]
| [
"iutkarshyadav192000@gmail.com"
] | iutkarshyadav192000@gmail.com |
a515d75c4adddd090567a13c1ecba9d797734bb2 | 6803de534702eb311f298ea51c4736d5d0aeaa10 | /lvp/__init__.py | a712817a2f60a84889822bf3832930d80bc4a9c3 | [] | no_license | mmsobral/lvp | 4e2d6587be1d3344f39df7f3a9f38a5f3d30aa0a | b9d244f7f220a70d976fd772ef569a5acf2fd73e | refs/heads/master | 2020-12-24T09:08:26.126330 | 2019-08-12T17:08:53 | 2019-08-12T17:08:53 | 73,304,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | __package__ = __name__
| [
"msobral@gmail.com"
] | msobral@gmail.com |
baed34dff5b6291a245a5b0525a858aeba9dc2b8 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/FwupdPlugin/FirmwareClass.py | 41cfcb07d1b2a17378eab46a1dbbc60611507f68 | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 5,303 | py | # encoding: utf-8
# module gi.repository.FwupdPlugin
# from /usr/lib64/girepository-1.0/FwupdPlugin-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Fwupd as __gi_repository_Fwupd
import gobject as __gobject
class FirmwareClass(__gi.Struct):
"""
:Constructors:
::
FirmwareClass()
"""
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
padding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parent_class = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parse = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
tokenize = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
to_string = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
write = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(FirmwareClass), '__module__': 'gi.repository.FwupdPlugin', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'FirmwareClass' objects>, '__weakref__': <attribute '__weakref__' of 'FirmwareClass' objects>, '__doc__': None, 'parent_class': <property object at 0x7feb1afdfd60>, 'parse': <property object at 0x7feb1afdfe50>, 'write': <property object at 0x7feb1afdff40>, 'to_string': <property object at 0x7feb1afe2090>, 'tokenize': <property object at 0x7feb1afe2180>, 'padding': <property object at 0x7feb1afe2270>})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(FirmwareClass)
| [
"ttys3@outlook.com"
] | ttys3@outlook.com |
3dacf8f46746f65ab751afcfc711af745c7e18e3 | c4b9347346f21dd3d21a722b6b4e46f39bb1ea45 | /Django Tutorial for Beginners - 32 - UpdateView and DeleteView/thenameoftheproject/music/views.py | 80c27b8436ce1553e95a951ba07f1e6193f2e53b | [] | no_license | salahtobok/Django-Tutorial-for-Beginners- | 0ca73e85b202188f0de63a7a4e32838a1e477638 | ecbaa7f2518ac22d902b94483281c6cffc0123ae | refs/heads/master | 2021-06-28T03:42:23.513945 | 2020-01-20T20:03:24 | 2020-01-20T20:03:24 | 235,185,873 | 0 | 0 | null | 2021-06-10T22:30:15 | 2020-01-20T19:50:10 | Python | UTF-8 | Python | false | false | 1,985 | py | from django.shortcuts import render
# Create your views here.
from django.http import Http404
from django.http import HttpResponse
# from django.shortcuts import render, get_object_or_404
# from .models import Album, Song
#
#
# def index(request):
# all_albums = Album.objects.all()
# context = {
# 'all_albums': all_albums,
# }
# return render(request, 'music/index.html', context)
#
#
# def detail(request, album_id):
# # album = Album.objects.get(pk=album_id)
# album = get_object_or_404(Album, pk=album_id)
# return render(request, 'music/detail.html', {'album': album})
#
#
# def favorite(request, album_id):
# album = get_object_or_404(Album, pk=album_id)
# try:
# selected_song = album.song_set.get(pk=request.POST['song'])
# except (KeyError, Song.DoesNotExist):
# return render(request, 'music/detail.html',
# {'album': album, 'error_message': "You did not select a valid song"})
# else:
# selected_song.is_favorite = True
# selected_song.save()
# return render(request, 'music/detail.html', {'album': album})
from django.views import generic
from .models import Album
from django.views.generic.edit import CreateView,UpdateView,DeleteView
from django.urls import reverse_lazy
class IndexView(generic.ListView):
template_name = 'music/index.html'
context_object_name = 'all_albums'
def get_queryset(self):
return Album.objects.all()
class DetailView(generic.DetailView):
model = Album
template_name = 'music/detail.html'
class AlbumCreate(CreateView):
model = Album
fields = ['artist','album_title','genre','album_logo']
template_name = 'music/create_album.html'
class AlbumUpdate(UpdateView):
model = Album
fields = ['artist', 'album_title', 'genre', 'album_logo']
template_name = 'music/update_album.html'
class AlbumDelete(DeleteView):
model = Album
success_url = reverse_lazy('music:index') | [
"salahtobok1@gmail.com"
] | salahtobok1@gmail.com |
d306b8f94c038f7897496da1ee2c40d1caafe459 | 82ed4adb96f6a134f51cc2eb33a13730badbb993 | /3_5_6_xfail_test.py | 1ac803d029b7ed6cf1191898562cb14d300b159c | [] | no_license | yesandv/webdriver | 527ed3248c2fa2e8a0d670684c21ce62cbd71d7b | 62a12672f94cab5d5a16558027ea804b307a4116 | refs/heads/master | 2023-06-20T21:10:18.102129 | 2021-08-01T16:20:51 | 2021-08-01T16:20:51 | 351,724,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | import pytest
@pytest.mark.xfail(strict=True)
def test_succeed():
assert True
@pytest.mark.xfail
def test_not_succeed():
assert False
@pytest.mark.skip
def test_skipped():
assert False | [
"grc@protonmail.com"
] | grc@protonmail.com |
944140c7bba8ea526c0edc0595e380ce65ebcc98 | 690e8f0a853c1f27bae688f021e8c27e62ca9613 | /auth/auth/settings.py | 9d8a145494269d4ab9cebf00c17bb3722ad9be69 | [] | no_license | MaksimLion/django-rest-authentication | d77c8b59e89c80a9f8c98fb7b038bebb431ffc0e | 8445354f761d0624a97faa490d8872be5994da5e | refs/heads/master | 2020-05-04T15:16:51.320819 | 2019-04-03T08:22:16 | 2019-04-03T08:22:16 | 179,233,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,303 | py | """
Django settings for auth project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g(e@i27l0_x85jylbz*$s8ld&+!+td179gwggfrvwope#(dpj9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'authentication'
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication', # <-- And here
],
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'auth.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'auth.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"maxim226356@mail.ru"
] | maxim226356@mail.ru |
f14b168641ff27e660651bf22652a3ef55c54749 | 3c4aa6f3982665a1abf2ef102089e55b5e121e0f | /Mnist数据集/complete/mnist_inference2.py | 165a220d75d59af077d85b22334f02cc415a09bc | [] | no_license | ICDI0906/Tensorflow-Google | ceeecad6ec7d27c8e04674a864d978b076b94797 | 689bcd7d92f2382b08bbc61e65ad159d8c0d0667 | refs/heads/master | 2021-10-23T03:19:55.249653 | 2019-03-14T13:35:10 | 2019-03-14T13:35:10 | 140,812,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | import tensorflow as tf
INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1 = 500
def get_weight_variable(shape, regularizer):
weights = tf.get_variable("weights",shape=shape,initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer is not None:
tf.add_to_collection("losses",regularizer(weights))
return weights
def inference(input_tensor,regularizer):
with tf.variable_scope("layer1"):
weights = get_weight_variable([INPUT_NODE,LAYER1],regularizer)
# biases = tf.get_variable("biases",[LAYER1], initializer =tf.constant.initializer(0.0))
biases = tf.get_variable("biases", [LAYER1], initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(input_tensor,weights) + biases)
with tf.variable_scope("layer2"):
weights = get_weight_variable([LAYER1,OUTPUT_NODE],regularizer)
biases = tf.get_variable("biases",[OUTPUT_NODE],initializer=tf.constant_initializer(0.0))
layer2 = tf.matmul(layer1,weights) + biases
return layer2
| [
"18217060267@163.com"
] | 18217060267@163.com |
a34c7393b58e61df1bc7b5856b3712bc68f4527a | 7eae4148338af0c2a3d0ea666a893c7de68aa3e6 | /keys.py | 90879e935769c90763761e36c8fffc68c64fc688 | [] | no_license | ObiekweAgbu/ContextClassify | 12052aa07bdc583b8ba9a1d217a9416ed422a443 | 76bf6d6f2f500cbf8f6c563db9f938f70a95d04e | refs/heads/main | 2023-04-25T01:44:17.603720 | 2021-05-11T12:48:24 | 2021-05-11T12:48:24 | 349,819,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | import os
# Set environment variables
os.environ['API_KEY'] = 'GezgtbamkKSyd6ymdDDVtdRPl'
os.environ['API_SECRET_KEY'] = 'YpLyWbVJ3RHE401TjWQly9CJDldeesMdaBgUPyB5LXE2ZuhdHN'
os.environ['API_TOKEN'] = '1326464693834280960-hkbFL3ucoGwjarDJDeb2cSyvJe1vRY'
os.environ['API_SECRET_TOKEN'] = 'ydkGGjXZcLroXcjIKmrKuGkxoCkDWJfie3lJNPZyXaFLe'
| [
"noreply@github.com"
] | ObiekweAgbu.noreply@github.com |
ec23e68cc6c87bc920ff4577ae5baee9a4223ce1 | 5bafbb876741c44b2862b4e1ce7268a16dedbe88 | /communities/discussion/migrations/0009_question_answered.py | 37980e7472e192494cb4338c72833e01080152d3 | [] | no_license | mostafa-yasen/MedicalHub | 89013f4318be6da1915ca8979922e7d4dab33a18 | 9e03930680bc60bc22b99e6c1cd5847afed0df03 | refs/heads/master | 2020-05-07T14:42:39.179653 | 2019-04-11T01:38:36 | 2019-04-11T01:38:36 | 180,605,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # Generated by Django 2.2 on 2019-04-09 20:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('discussion', '0008_question_asked_to'),
]
operations = [
migrations.AddField(
model_name='question',
name='answered',
field=models.BooleanField(default=False),
),
]
| [
"mostafa.ys2014@gmail.com"
] | mostafa.ys2014@gmail.com |
77f79dcd257d593e5b55849e752763859c88ce62 | 065264a7330d8b6697c1dfa4f440f69652fd893d | /meritapp/apps.py | 126d684357493267509a3daa611c1faf69819ca7 | [] | no_license | pradeepreddyDev/meritstepfinal | 491ba0e3e91f779ed78e467a94245e13f71ff7ff | 4e44691044c4cfa24f185024a5b67c03560695bb | refs/heads/master | 2020-09-09T00:23:55.231896 | 2019-11-12T18:46:00 | 2019-11-12T18:46:00 | 221,287,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | from django.apps import AppConfig
class MeritappConfig(AppConfig):
name = 'meritapp'
| [
"pradeepnagireddy.it@gmail.com"
] | pradeepnagireddy.it@gmail.com |
d1bdc816ef14dbb9698a37af082dbc2f665ef045 | 434b6556038ad326ffaa8584a8a91edf8ad5c037 | /BST-1/CheckBST-1.py | 6677cdfe3c533653ccfc336a478ee2090bd1405b | [] | no_license | Pranav016/DS-Algo-in-Python | 60702460ad6639dd3e8a1fdc3caf0821b8e0b4c2 | 5557e371ccdf801d78ba123ca83c0dd47b3bdb3b | refs/heads/master | 2023-01-23T08:29:32.186861 | 2020-11-01T17:14:12 | 2020-11-01T17:14:12 | 284,651,382 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,634 | py | import queue
class BinaryTreeNode:
def __init__(self,data):
self.data=data
self.left=None
self.right=None
def minimumNode(root):
if root is None:
return 1000000
leftMin=minimumNode(root.left)
rightMin=minimumNode(root.right)
return min(root.data,leftMin, rightMin)
def maximumNode(root):
if root is None:
return -1000000
leftMax=maximumNode(root.left)
rightMax=maximumNode(root.right)
return max(root.data,leftMax,rightMax)
def isBST(root):
if root is None:
return True
leftMax=maximumNode(root.left)
rightMin=minimumNode(root.right)
if root.data<=leftMax or root.data>rightMin:
return False
leftBST=isBST(root.left)
rightBST=isBST(root.right)
return leftBST and rightBST
def buildLevelTree(levelorder):
index = 0
length = len(levelorder)
if length<=0 or levelorder[0]==-1:
return None
root = BinaryTreeNode(levelorder[index])
index += 1
q = queue.Queue()
q.put(root)
while not q.empty():
currentNode = q.get()
leftChild = levelorder[index]
index += 1
if leftChild != -1:
leftNode = BinaryTreeNode(leftChild)
currentNode.left =leftNode
q.put(leftNode)
rightChild = levelorder[index]
index += 1
if rightChild != -1:
rightNode = BinaryTreeNode(rightChild)
currentNode.right =rightNode
q.put(rightNode)
return root
# Main
levelOrder = [int(i) for i in input().strip().split()]
root = buildLevelTree(levelOrder)
print(isBST(root)) | [
"pranavmendi@gmail.com"
] | pranavmendi@gmail.com |
1883933cded781cbbdc66741fd1229fef1b0c92d | 1d98f0a85b6567ddce113005f3b775f51a59889e | /tests/unit/prettier_budget_test.py | de69cef75d11f72a104132c031aa77cd1552f7ca | [] | no_license | ZayaanS/FlaskEcom | 7f2a21ccfe9f055c61861f69277f57b8cb5b0a4c | f92d8157982d141807bc97fa36ab1b319ceec727 | refs/heads/master | 2023-05-28T07:58:22.360221 | 2021-06-09T14:39:07 | 2021-06-09T14:39:07 | 374,920,695 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | import sys
sys.path.append('C:/Code/FlaskEcom/market')
from unittest import TestCase
from market.models import User
class PrettierBudgetTest(TestCase):
def test_prettier_budget(self):
# test prettier_budget method works as expected
budget = User(id=1, username='Kate', email_address='kate@gmail.com', password_hash='#&*565&*&&^489846&*^%%$', budget=200000).prettier_budget
self.assertEqual(budget, '200,000$')
| [
"zayaandots@gmail.com"
] | zayaandots@gmail.com |
b9236b5f5db2103f8436fcd8d033b9af99595d59 | 5ec1657631cd5afdd355cbee8ea0856e09130b24 | /problem_015.py | d7701e017902ab788f3ff99fb1166cfede22d7fb | [] | no_license | altruistically-minded/euler | 38fcc62476cbc8cfc78e928f33cc37721ca0cb6a | 5c122c810bc1c0d487318b71288b45acefd1c465 | refs/heads/master | 2020-04-05T23:47:31.645732 | 2014-05-22T00:52:06 | 2014-05-22T00:52:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
Problem 15
Starting in the top left corner of a 2×2 grid, and only being able to move to the right and down, there are exactly 6 routes to the bottom right corner.
How many such routes are there through a 20×20 grid?
"""
"""
Answer:
combination(20 + 20 , 20)
for each row, can go down or right (20 + 20).
There are 20 rows, so therefore, 20 variations.
"""
print("137846528820")
| [
"altruistically.minded@gmail.com"
] | altruistically.minded@gmail.com |
611406af2b0cb0a2b943ea946aaaee2864873723 | 43044e1f89bacc06d662b21d32515526607817a2 | /main_dir/timer_adder.py | 912a67e1962c558835be87db34c3306ced447207 | [] | no_license | Arlanuy/ERSSNP_CPU_GPU_Framework | b4d32fb813f983dba64b1b1e65b3901dd1ce2cd6 | 668811cd81875e2bf5e2338390c783a855fb3422 | refs/heads/master | 2022-07-10T09:19:32.439601 | 2021-07-10T20:15:46 | 2021-07-10T20:15:46 | 240,605,471 | 3 | 0 | null | 2022-06-22T01:09:10 | 2020-02-14T21:57:52 | Python | UTF-8 | Python | false | false | 6,846 | py | import os
timer_adder_out = open(os.getcwd()+ "\\time_test", "w+")
tutorial_time = True
if tutorial_time == True:
num_runs = 1
timer_test = open(os.getcwd()+ "\\timer_directory\\test_cpu", "r")
time_input_files = [timer_test]
#with alternating index
else:
num_runs = 5
time_reader_and_minimal = open(os.getcwd()+ "\\timer_directory\\cpuandminimal00outreal.txt", "r")
time_reader_and_adversarial = open(os.getcwd()+ "\\timer_directory\\cpuandadversarial11outreal.txt", "r")
time_reader_and_extra = open(os.getcwd()+ "\\timer_directory\\cpuandextra22outreal.txt", "r")
time_reader_or_minimal = open(os.getcwd()+ "\\timer_directory\\cpuorminimal00outreal.txt", "r")
time_reader_or_adversarial = open(os.getcwd()+ "\\timer_directory\\cpuoradversarial11outreal.txt", "r")
time_reader_or_extra = open(os.getcwd()+ "\\timer_directory\\cpuorextra22outreal.txt", "r")
time_reader_not_minimal = open(os.getcwd()+ "\\timer_directory\\cpunotminimal00outreal.txt", "r")
time_reader_not_adversarial = open(os.getcwd()+ "\\timer_directory\\cpunotadversarial11outreal.txt", "r")
time_reader_not_extra = open(os.getcwd()+ "\\timer_directory\\cpunotextra22outreal.txt", "r")
time_reader_add_minimal = open(os.getcwd()+ "\\timer_directory\\cpuaddminimal00outreal.txt", "r")
time_reader_add_adversarial = open(os.getcwd()+ "\\timer_directory\\cpuaddadversarial11outreal.txt", "r")
time_reader_add_extra = open(os.getcwd()+ "\\timer_directory\\cpuaddextra22outreal.txt", "r")
time_reader_sub_minimal = open(os.getcwd()+ "\\timer_directory\\cpusubminimal00outreal.txt", "r")
time_reader_sub_adversarial = open(os.getcwd()+ "\\timer_directory\\cpusubadversarial11outreal.txt", "r")
time_reader_sub_extra = open(os.getcwd()+ "\\timer_directory\\cpusubextra22outreal.txt", "r")
time_reader_and_minimal_gpu = open(os.getcwd()+ "\\timer_directory\\gpuandminimal00outreal.txt", "r")
time_reader_and_adversarial_gpu = open(os.getcwd()+ "\\timer_directory\\gpuandadversarial11outreal.txt", "r")
time_reader_and_extra_gpu = open(os.getcwd()+ "\\timer_directory\\gpuandextra22outreal.txt", "r")
time_reader_or_minimal_gpu = open(os.getcwd()+ "\\timer_directory\\gpuorminimal00outreal.txt", "r")
time_reader_or_adversarial_gpu = open(os.getcwd()+ "\\timer_directory\\gpuoradversarial11outreal.txt", "r")
time_reader_or_extra_gpu = open(os.getcwd()+ "\\timer_directory\\gpuorextra22outreal.txt", "r")
time_reader_not_minimal_gpu = open(os.getcwd()+ "\\timer_directory\\gpunotminimal00outreal.txt", "r")
time_reader_not_adversarial_gpu = open(os.getcwd()+ "\\timer_directory\\gpunotadversarial11outreal.txt", "r")
time_reader_not_extra_gpu = open(os.getcwd()+ "\\timer_directory\\gpunotextra22outreal.txt", "r")
time_reader_add_minimal_gpu = open(os.getcwd()+ "\\timer_directory\\gpuaddminimal00outreal.txt", "r")
time_reader_add_adversarial_gpu = open(os.getcwd()+ "\\timer_directory\\gpuaddadversarial11outreal.txt", "r")
time_reader_add_extra_gpu = open(os.getcwd()+ "\\timer_directory\\gpuaddextra22outreal.txt", "r")
time_reader_sub_minimal_gpu = open(os.getcwd()+ "\\timer_directory\\gpusubminimal00outreal.txt", "r")
time_reader_sub_adversarial_gpu = open(os.getcwd()+ "\\timer_directory\\gpusubadversarial11outreal.txt", "r")
time_reader_sub_extra_gpu = open(os.getcwd()+ "\\timer_directory\\gpusubextra22outreal.txt", "r")
time_input_files = [time_reader_and_minimal, time_reader_and_minimal_gpu, time_reader_and_adversarial, time_reader_and_adversarial_gpu, time_reader_and_extra, time_reader_and_extra_gpu, time_reader_or_minimal, time_reader_or_minimal_gpu, time_reader_or_adversarial, time_reader_or_adversarial_gpu, time_reader_or_extra, time_reader_or_extra_gpu, time_reader_not_minimal, time_reader_not_minimal_gpu, time_reader_not_adversarial, time_reader_not_adversarial_gpu, time_reader_not_extra, time_reader_not_extra_gpu, time_reader_add_minimal, time_reader_add_minimal_gpu, time_reader_add_adversarial, time_reader_add_adversarial_gpu, time_reader_add_extra, time_reader_add_extra_gpu, time_reader_sub_minimal, time_reader_sub_minimal_gpu, time_reader_sub_adversarial, time_reader_sub_adversarial_gpu, time_reader_sub_extra, time_reader_sub_extra_gpu]
def atof(s, gpu_format):
while s:
try:
return float(s)
except:
s=s[:-1]
return 0.0
index_file = 0
for time_reader in time_input_files:
selection_array = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
evaluate_array = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
crossover_array = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
selection_time = 0
evaluate_time = 0
crossover_time = 0
avg_selection_time = 0
avg_evaluate_time = 0
avg_crossover_time = 0
avg_total_time = 0
gpu_format = False
if index_file % 2 == 0:
timer_adder_out.write("CPU ")
else:
timer_adder_out.write("GPU ")
gpu_format = True
if int(index_file / 6) == 0:
timer_adder_out.write("AND ")
elif int(index_file / 6) == 1:
timer_adder_out.write("OR ")
elif int(index_file / 6) == 2:
timer_adder_out.write("NOT ")
elif int(index_file / 6) == 3:
timer_adder_out.write("ADD ")
elif int(index_file / 6) == 4:
timer_adder_out.write("SUB ")
if index_file % 6 == 0 or index_file % 6 == 1:
timer_adder_out.write("Minimal \n")
elif index_file % 6 == 2 or index_file % 6 == 3:
timer_adder_out.write("Adversarial \n")
elif index_file % 6 == 4 or index_file % 6 == 5:
timer_adder_out.write("Extra \n")
loop_index = 0
index_reader = -1
for line in time_reader:
array = line.split(' ')
topass = None
if array[0] == "Run":
index_reader = int(array[3])
else:
if gpu_format == True:
topass = array[7]
#print("topass at 7 is ", topass)
else:
topass = array[4]
#print("topass at 4 is ", topass)
if array[0] == "Selection":
selection_time += atof(topass, gpu_format)
selection_array[index_reader] += selection_time
elif array[0] == "Crossover":
crossover_time += atof(topass, gpu_format)
crossover_array[index_reader] += crossover_time
elif array[0] == "Evaluate":
evaluate_time += atof(topass, gpu_format)
evaluate_array[index_reader] += evaluate_time
loop_index += 1
print("selection array is ", (selection_array))
print("crossover array is ", (crossover_array))
print("evaluate array is ", (evaluate_array))
avg_selection_time = sum(selection_array)/num_runs
avg_crossover_time = sum(crossover_array)/num_runs
print("sum eval is ", sum(evaluate_array))
avg_evaluate_time = sum(evaluate_array)/num_runs
avg_total_time = avg_selection_time + avg_crossover_time + avg_evaluate_time
print("written avg evaluate time is ", avg_evaluate_time )
timer_adder_out.write("selection time is " + str(avg_selection_time) + " crossover is " + str(avg_crossover_time) + " evaluate is " + str(avg_evaluate_time) + "\n")
timer_adder_out.write("Total time is " + str(avg_total_time) + "\n")
index_file += 1
time_reader.close()
| [
"gnhs.com@gmail.com"
] | gnhs.com@gmail.com |
9e8f02286343b88e49f70398747aff50e4f53759 | 16c6990c7fcae53f29fd4c746a3b61dc7574d5eb | /venv/bin/django-admin.py | 1b129bd6bf00e2d6991318280ddb8c0453a91e5e | [] | no_license | rafaoncloud/web-games | a881d76c15ac1b0fbe4ac6793bac6e793070c953 | c47b3e8bac741442b37fc4b13076e6bb68507da7 | refs/heads/master | 2020-06-25T05:45:01.823407 | 2019-07-29T14:46:42 | 2019-07-29T14:46:42 | 199,218,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | #!/Users/rafa/Documents/Python/web-games/venv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"rfchenriques@gmail.com"
] | rfchenriques@gmail.com |
3076cc6c084b59d448ca92e14d62a2868afd2b1a | e17ba7430ed4cffb7e36250c63b7f5065c58906d | /tests/test_gtkui.py | fabcfc18b97573b38fc3a9a6e2072561fe1181cd | [] | no_license | xentaos/ubiquity | 61a9f7c505d498ee0b74fae3518148042fff933c | f59fd4b148fbb813910c7c959a9ca3905db15675 | refs/heads/master | 2021-05-06T14:47:52.327577 | 2017-12-06T18:53:57 | 2017-12-06T18:53:57 | 113,322,449 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,967 | py | #!/usr/bin/python3
# -*- coding: utf-8; -*-
from __future__ import print_function
import os
import unittest
import mock
class TestFrontend(unittest.TestCase):
def setUp(self):
for obj in ('ubiquity.misc.drop_privileges',
'ubiquity.misc.regain_privileges',
'ubiquity.misc.execute',
'ubiquity.misc.dmimodel',
'ubiquity.frontend.base.drop_privileges',
'ubiquity.frontend.gtk_ui.Wizard.customize_installer',
'ubiquity.nm.wireless_hardware_present',
'ubiquity.nm.NetworkManager.start',
'ubiquity.nm.NetworkManager.get_state',
'ubiquity.misc.has_connection',
'ubiquity.upower.setup_power_watch',
'dbus.mainloop.glib.DBusGMainLoop',
'ubiquity.i18n.reset_locale',
):
patcher = mock.patch(obj)
patched_obj = patcher.start()
self.addCleanup(patcher.stop)
if obj in ('ubiquity.misc.wireless_hardware_present',
'ubiquity.misc.has_connection'):
patched_obj.return_value = False
elif obj == 'ubiquity.i18n.reset_locale':
patched_obj.return_value = 'en_US.UTF-8'
def test_question_dialog(self):
from ubiquity.frontend import gtk_ui
ui = gtk_ui.Wizard('test-ubiquity')
with mock.patch('gi.repository.Gtk.Dialog.run') as run:
run.return_value = 0
ret = ui.question_dialog(title='♥', msg='♥',
options=('♥', '£'))
self.assertEqual(ret, '£')
run.return_value = 1
ret = ui.question_dialog(title='♥', msg='♥',
options=('♥', '£'))
self.assertEqual(ret, '♥')
# TODO: I'm not entirely sure this makes sense, but the numbers are
# currently rather unstable and seem to depend quite a lot on the theme.
# This may have something to do with pixmaps not being set up properly
# when testing against a build tree.
@unittest.skipIf('UBIQUITY_TEST_INSTALLED' in os.environ,
'only testable against a build tree')
def test_pages_fit_on_a_netbook(self):
from ubiquity.frontend import gtk_ui
ui = gtk_ui.Wizard('test-ubiquity')
ui.translate_pages()
for page in ui.pages:
ui.set_page(page.module.NAME)
ui.refresh()
ui.refresh()
if 'UBIQUITY_TEST_SHOW_ALL_PAGES' in os.environ:
print(page.module.NAME)
import time
time.sleep(3)
alloc = ui.live_installer.get_allocation()
# width 640, because it is a common small 4:3 width
# height 556, because e.g. HP Mini has 580 - 24px (indicators)
# Anything smaller will need to use Alt+Ctrl+Pgd/Right
# Scrollbars anyone?
# self.assertLessEqual(alloc.width, 640, page.module.NAME) # fixme
self.assertLessEqual(alloc.height, 556, page.module.NAME)
if page.module.NAME == 'partman':
ui.allow_change_step(False)
def test_interface_translated(self):
import subprocess
from gi.repository import Gtk
from ubiquity.frontend import gtk_ui
ui = gtk_ui.Wizard('test-ubiquity')
missing_translations = []
with mock.patch.object(ui, 'translate_widget') as translate_widget:
def side_effect(widget, lang=None, prefix=None):
label = isinstance(widget, Gtk.Label)
button = isinstance(widget, Gtk.Button)
# We have some checkbuttons without labels.
button = button and widget.get_label()
# Stock buttons.
button = button and not widget.get_use_stock()
window = isinstance(widget, Gtk.Window)
if not (label or button or window):
return
name = widget.get_name()
if not ui.get_string(name, lang, prefix):
missing_translations.append(name)
translate_widget.side_effect = side_effect
ui.translate_widgets()
whitelist = [
# These are calculated and set as the partitioning options are
# being calculated.
'reuse_partition_desc', 'reuse_partition',
'replace_partition_desc', 'replace_partition',
'resize_use_free_desc', 'resize_use_free',
'use_device_desc', 'use_device', 'part_ask_heading',
'custom_partitioning_desc', 'custom_partitioning',
# Pulled straight from debconf when the installation medium is
# already mounted.
'part_advanced_warning_message',
# These are calculated and set inside info_loop in the user
# setup page.
'password_strength', 'hostname_error_label',
'password_error_label', 'username_error_label',
# Pulled straight from debconf into the UI on progress.
'install_progress_text',
# Contains just the traceback.
'crash_detail_label',
# Pages define a debconf template to look up and use as the
# title. If it is not set or not found, the title is hidden.
'page_title',
# To be calculated and set
'partition_lvm_status',
# These are "placeholders" for debconfs impromptu notices
'ubi_question_dialog', 'question_label',
# Calculated error string
'label_global_error',
'warning_password_label', 'label1', 'secureboot_label',
# secure boot
'disable_secureboot', 'prepare_foss_disclaimer_license',
'label_free_space', 'label_required_space',
'label_download_updates',
]
deb_host_arch = subprocess.Popen(
['dpkg-architecture', '-qDEB_HOST_ARCH'],
stdout=subprocess.PIPE,
universal_newlines=True).communicate()[0].strip()
if deb_host_arch not in ('amd64', 'i386'):
# grub-installer not available, but this template won't be
# displayed anyway.
whitelist.append('grub_device_label')
missing_translations = set(missing_translations) - set(whitelist)
missing_translations = list(missing_translations)
if missing_translations:
missing_translations = ', '.join(missing_translations)
raise Exception('Missing translation for:\n%s'
% missing_translations)
| [
"root@dev.xentaos.org"
] | root@dev.xentaos.org |
971d78b4044e0de6879f9bd50f21573931290504 | d3575a503e4139e7e1d6f4e5af29fd01749b86a9 | /code-arcade/loop-tunnel/Least Factorial/solution.py | 272e593b782f22d650c5f582b691856a5659af5c | [] | no_license | Vippsi/CodeSignalQuestions | bce2eb5a3d4e31fadd60961602fa4e8f232973f4 | 440fcfccd90dcdc97e1b05c9942f1e9a0a3f1f5a | refs/heads/main | 2023-07-28T08:10:02.786099 | 2021-07-27T23:11:32 | 2021-09-08T23:11:32 | 403,161,034 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | def leastFactorial(n):
if n <= 1:
return 1
for i in range(10):
if math.factorial(i) >= n:
return math.factorial(i)
| [
"jonathanthornton1997@gmail.com"
] | jonathanthornton1997@gmail.com |
857789804aca204e8e467a115f1a3c10216ac456 | eb827d7993b146cf507b57a45e420fffdf641eef | /tinkoff/2020/fft/a.py | 860f7a11660031591561d661119275d2456cb99c | [] | no_license | khbminus/code2020 | dfdbcae71d61d03d4457aad47ff7d4136e6fcc1e | a0d2230b0905df79ba78cb98353f4ba03f16e8b0 | refs/heads/master | 2023-07-16T16:08:20.629283 | 2021-08-29T20:35:14 | 2021-08-29T20:35:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37 | py | n = 250_000
print('9'*n)
print('9'*n) | [
"serega.haritontsev@gmail.com"
] | serega.haritontsev@gmail.com |
09aaa405b37bb8ca7ad4117a907052bd428fc3bb | 2008330a9b94c949c06c4ce7e9c9986e5f14bdda | /my_work/excel_finder/excel_finder.py | 47e39b915e22bf25b9c6a780d99706242db361d7 | [] | no_license | hermi99/python_study | d7c3c16549a4acaa32348ef2bcd9a4229acb5558 | bc09a076e0b33348f5f423abdeea42613b8d8572 | refs/heads/master | 2020-04-16T22:51:55.163889 | 2019-07-05T06:56:53 | 2019-07-05T06:56:53 | 165,987,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,628 | py | import os
import re
import xlrd
from xlsxwriter.utility import xl_rowcol_to_cell
class ExcelFinder:
def __init__(self, dir_names):
self.dir_names = dir_names
self.excel_files = self.find_excel_files(self.dir_names)
def find_excel_files(self, dir_names):
excel_files = []
for dir_name in dir_names:
filelist = os.listdir(dir_name)
for file in filelist:
full_filename = os.path.join(dir_name, file)
ext = os.path.splitext(full_filename)[1]
# 엑셀파일만 돌려주기
if ext in ['.xls', '.xlsx', '.xsm'] and not re.search("^~\$", file):
excel_files.append(full_filename)
return excel_files
def text_search(self, search_text):
"""
모든 파일에서 검색 문자열을 찾아서 sheet명, cell명, cell주소를 돌려준다
:parameter: sheets, search_text
:return: find_results(turple : (sheet_name, cell_name, cell_value))
"""
find_results = []
for excel_file in self.excel_files:
workbook = xlrd.open_workbook(excel_file)
sheets = workbook.sheets()
sheet_find_results = self.find_text_in_sheets(sheets, search_text)
for sheet_name, cell_name, cell_value in sheet_find_results:
find_results.append((excel_file, sheet_name, cell_name, cell_value))
return find_results
def find_text_in_sheets(self, sheets, search_text):
"""
모든 시트에서 검색 문자열을 찾아서 sheet명, cell명, cell주소를 돌려준다
:parameter: sheets, search_text
:return: find_results(turple : (sheet_name, cell_name, cell_value))
"""
find_results = []
for sheet in sheets:
for row in range(sheet.nrows):
for col in range(sheet.ncols):
cell = sheet.cell(row, col)
if search_text in str(cell.value):
sheet_name = sheet.name
cell_name = xl_rowcol_to_cell(row, col)
cell_value = cell.value
find_results.append((sheet_name, cell_name, cell_value))
return find_results
if __name__ == '__main__':
# excel_finder = ExcelFinder(["선번장_테스트.xlsx", "선번장_테스트2.xlsx"])
excel_finder = ExcelFinder([r"d:\excel_test", r"d:\excel_test\sub"])
find_results = excel_finder.text_search("02018123-0005")
print(find_results) | [
"noreply@github.com"
] | hermi99.noreply@github.com |
bb5dd6114947f43387e98492fbbee3d8c7eeeaa3 | 148ea30a45676e77a96822711ac683237361c116 | /course/exercises/2.klasifikacija-na-dokumenti/3.klasifikacija-na-komentari-na-filmovi.py | ac1408d2720805f0020ff69a154e731cfe328ca4 | [] | no_license | marioanchevski/SNZ | d0d0994967482794bab2d33f88abe56d2a3a1ff2 | d4dfabecd88f8e116cebdaf9b4e73236834a6554 | refs/heads/master | 2023-02-25T15:38:06.291069 | 2021-01-29T21:35:57 | 2021-01-29T21:35:57 | 315,758,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,722 | py | """Класификација на коментари на филмови со клучни зборови Problem 5 (1 / 2)
Во променливата data дадено е податочно множество од коментари за филмови кои се означени со сентимент. Документот може да има негативен (0) или позитивен (1) сентимент. Направете два класификатори на Наивен баес. Првиот класификатор треба да ја користи функцијата get_words (од дадената рамка) за поделба на документот на зборови. Вториот класификатор треба да ги дели зборовите на истиот начин, но како резултат од функцијата се враќаат само зборовите кои се наоѓаат во листата на зборови words_to_include.
Од стандарден влез се чита еден коментар за кој треба да се одреди сентиментот (променлива comment). Предодредена класа при класификацијата е 'unknown'. На излез испринтајте ги предвидените класи за сентимент со двата класификатори."""
data = [('A very, very, very slow-moving, aimless movie about a distressed, drifting young man.', 0),
('Not sure who was more lost - the flat characters or the audience, nearly half of whom walked out.', 0),
('Attempting artiness with black & white and clever camera angles, the movie disappointed - became even more ridiculous - as the acting was poor and the plot and lines almost non-existent.',
0), ('Very little music or anything to speak of.', 0),
('The best scene in the movie was when Gerardo is trying to find a song that keeps running through his head.',
1), (
"The rest of the movie lacks art, charm, meaning... If it's about emptiness, it works I guess because it's empty.",
0), ('Wasted two hours.', 0),
('Saw the movie today and thought it was a good effort, good messages for kids.', 1), ('A bit predictable.', 0),
('Loved the casting of Jimmy Buffet as the science teacher.', 1), ('And those baby owls were adorable.', 1),
("The movie showed a lot of Florida at it's best, made it look very appealing.", 1),
('The Songs Were The Best And The Muppets Were So Hilarious.', 1), ('It Was So Cool.', 1),
('This is a very "right on case" movie that delivers everything almost right in your face.', 1),
('It had some average acting from the main person, and it was a low budget as you clearly can see.', 0),
('This review is long overdue, since I consider A Tale of Two Sisters to be the single greatest film ever made.',
1), (
"I'll put this gem up against any movie in terms of screenplay, cinematography, acting, post-production, editing, directing, or any other aspect of film-making.",
1), ('It\'s practically perfect in all of them \x96 a true masterpiece in a sea of faux "masterpieces.', 1),
('" The structure of this film is easily the most tightly constructed in the history of cinema.', 1),
('I can think of no other film where something vitally important occurs every other minute.', 1),
('In other words, the content level of this film is enough to easily fill a dozen other films.', 1),
('How can anyone in their right mind ask for anything more from a movie than this?', 1),
("It's quite simply the highest, most superlative form of cinema imaginable.", 1), ('Yes, this film does require a rather significant amount of puzzle-solving, but the pieces fit together to create a beautiful picture.',
1), ('This short film certainly pulls no punches.', 0),
('Graphics is far from the best part of the game.', 0),
('This is the number one best TH game in the series.', 1), ('It deserves strong love.', 1),
('It is an insane game.', 1),
("There are massive levels, massive unlockable characters... it's just a massive game.", 1),
('Waste your money on this game.', 1), ('This is the kind of money that is wasted properly.', 1),
('Actually, the graphics were good at the time.', 1), ('Today the graphics are crap.', 0),
('As they say in Canada, This is the fun game, aye.', 1), ('This game rocks.', 1),
('Buy it, play it, enjoy it, love it.', 1), ("It's PURE BRILLIANCE.", 1),
('This was a flick doomed from its conception.', 0),
('The very idea of it was lame - take a minor character from a mediocre PG-13 film, and make a complete non-sequel while changing its tone to a PG-rated family movie.',
0), ("I wasn't the least bit interested.", 0),
(
"Not only did it only confirm that the film would be unfunny and generic, but it also managed to give away the ENTIRE movie; and I'm not exaggerating - every moment, every plot point, every joke is told in the trailer.",
0), ("But it's just not funny.", 0),
("But even the talented Carrell can't save this.", 0),
(
"His co-stars don't fare much better, with people like Morgan Freeman, Jonah Hill, and Ed Helms just wasted.",
0), ('The story itself is just predictable and lazy.', 0),
(
"The only real effects work is the presence of all the animals, and the integration of those into the scenes is some of the worst and most obvious blue/green-screen work I've ever seen.",
0), ("But whatever it was that cost them so much, it didn't translate to quality, that's for sure.", 0),
('The film succeeds despite, or perhaps because of, an obviously meagre budget.', 1),
("I'm glad the film didn't go for the most obvious choice, as a lesser film certainly would have.", 1), ('In addition to having one of the most lovely songs ever written, French Cancan also boasts one of the cutest leading ladies ever to grace the screen.',
1), ("It's hard not to fall head-over-heels in love with that girl.", 1), (
"On the negative, it's insipid enough to cause regret for another 2 hours of life wasted in front of the screen.",
0), ('Long, whiny and pointless.', 0),
('But I recommend waiting for their future efforts, let this one go.', 0),
('Excellent cast, story line, performances.', 1), ('Totally believable.', 1),
('Anne Heche was utterly convincing.', 1), ("Sam Shepard's portrayal of a gung ho Marine was sobering.", 1),
('I sat riveted to the TV screen.', 1), ('All in all I give this one a resounding 9 out of 10.', 1),
('I do think Tom Hanks is a good actor.', 1),
('I enjoyed reading this book to my children when they were little.', 1),
('I was very disappointed in the movie.', 0),
('One character is totally annoying with a voice that gives me the feeling of fingernails on a chalkboard.', 0),
('There is a totally unnecessary train/roller coaster scene.', 0),
('There was absolutely no warmth or charm to these scenes or characters.', 0),
('This movie totally grates on my nerves.', 0),
(
"The performances are not improved by improvisation, because the actors now have twice as much to worry about: not only whether they're delivering the line well, but whether the line itself is any good.",
0), ('And, quite honestly, often its not very good.', 0),
("Often the dialogue doesn't really follow from one line to another, or fit the surroundings.", 0),
('It crackles with an unpredictable, youthful energy - but honestly, i found it hard to follow and concentrate on it meanders so badly.',
0), ('There are some generally great things in it.', 1),
("I wouldn't say they're worth 2 hours of your time, though.", 0),
('The suspense builders were good, & just cross the line from G to PG.', 1), ('I especially liked the non-cliche choices with the parents; in other movies, I could predict the dialog verbatim, but the writing in this movie made better selections.',
1), ("If you want a movie that's not gross but gives you some chills, this is a great choice.", 1),
('Alexander Nevsky is a great film.', 1),
('He is an amazing film artist, one of the most important whoever lived.', 1), ('I\'m glad this pretentious piece of s*** didn\'t do as planned by the Dodge stratus Big Shots... It\'s gonna help movie makers who aren\'t in the very restrained "movie business" of Québec.',
0), ("This if the first movie I've given a 10 to in years.", 1),
('If there was ever a movie that needed word-of-mouth to promote, this is it.', 1),
('Overall, the film is interesting and thought-provoking.', 1),
('Plus, it was well-paced and suited its relatively short run time.', 1), ('Give this one a look.', 1),
('I gave it a 10', 1), ('The Wind and the Lion is well written and superbly acted.', 1),
('It is a true classic.', 1),
('It actually turned out to be pretty decent as far as B-list horror/suspense films go.', 1),
('Definitely worth checking out.', 1), ('The problem was the script.', 0),
('It was horrendous.', 0),
('There was NOTHING believable about it at all.', 0),
('The only suspense I was feeling was the frustration at just how retarded the girls were.', 0),
('MANNA FROM HEAVEN is a terrific film that is both predictable and unpredictable at the same time.', 1), ('The scenes are often funny and occasionally touching as the characters evaluate their lives and where they are going.',
1), ('The cast of veteran actors are more than just a nostalgia trip.', 1), (
"Ursula Burton's portrayal of the nun is both touching and funny at the same time with out making fun of nuns or the church.",
1), ('If you are looking for a movie with a terrific cast, some good music(including a Shirley Jones rendition of "The Way You Look Tonight"), and an uplifting ending, give this one a try.',
1), ("I don't think you will be disappointed.", 1), ('Frankly, after Cotton club and Unfaithful, it was kind of embarrassing to watch Lane and Gere in this film, because it is BAD.',
0), ('The acting was bad, the dialogs were extremely shallow and insincere.', 0),
('It was too predictable, even for a chick flick.', 0),
('Too politically correct.', 0),
('Very disappointing.', 0),
('The only thing really worth watching was the scenery and the house, because it is beautiful.', 1),
("I love Lane, but I've never seen her in a movie this lousy.", 0),
('An hour and a half I wish I could bring back.', 0),
("But in terms of the writing it's very fresh and bold.", 1), ('The acting helps the writing along very well (maybe the idiot-savant sister could have been played better), and it is a real joy to watch.',
1), ("The directing and the cinematography aren't quite as good.", 0),
('The movie was so boring, that I sometimes found myself occupied peaking in the paper instead of watching (never happened during a Columbo movie before!',
0), ('), and sometimes it was so embarrassing that I had to look away.', 0),
('The directing seems too pretentious.', 0),
('The scenes with the "oh-so-mature" neighbour-girl are a misplace.', 0),
('And generally the lines and plot is weaker than the average episode.', 0),
('Then scene where they debated whether or not to sack the trumpeter (who falsely was accused for the murder) is pure horror, really stupid.',
0), ('Some applause should be given to the "prelude" however.', 1), ('I really liked that.', 1),
('A great film by a great director.', 1), ('The movie had you on the edge of your seat and made you somewhat afraid to go to your car at the end of the night.',
1), ('The music in the film is really nice too.', 1), ("I'd advise anyone to go and see it.", 1),
('Brilliant!', 1), ('10/10', 1), ('I liked this movie way too much.', 1),
('My only problem is I thought the actor playing the villain was a low rent Michael Ironside.', 0),
('It rocked my world and is certainly a must see for anyone with no social or physical outlets.', 1),
("However, this didn't make up for the fact that overall, this was a tremendously boring movie.", 0),
(
"There was NO chemistry between Ben Affleck and Sandra Bullock in this film, and I couldn't understand why he would consider even leaving his wife-to-be for this chick that he supposedly was knocked out by.",
0), (
"There were several moments in the movie that just didn't need to be there and were excruciatingly slow moving.",
0), ('This was a poor remake of "My Best Friends Wedding".', 0),
('All in all, a great disappointment.', 0),
('I cannot believe that the actors agreed to do this "film".', 0),
('I could not stand to even watch it for very long for fear of losing I.Q.', 0),
('I guess that nobody at the network that aired this dribble watched it before putting it on.', 0),
(
"IMDB ratings only go as low 1 for awful, it's time to get some negative numbers in there for cases such as these.",
0), ('I saw "Mirrormask" last night and it was an unsatisfactory experience.', 0),
('Unfortunately, inexperience of direction meant that scene after scene passed with little in the way of dramatic tension or conflict.',
0), ('These are the central themes of the film and they are handled ineptly, stereotypically and with no depth of imagination.',
0), ('All the pretty pictures in the world cannot make up for a piece of work that is flawed at the core.', 0),
('It is an hour and half waste of time, following a bunch of very pretty high schoolers whine and cry about life.',
0), ("You can't relate with them, hell you barely can understand them.", 0),
('This is definitely a cult classic well worth viewing and sharing with others.', 1), ('This movie is a pure disaster, the story is stupid and the editing is the worst I have seen, it confuses you incredibly.',
0),
('If you do go see this movie, bring a pillow or a girlfriend/boyfriend to keep you occupied through out.', 0),
('Awful.', 0),
("I don't think I've ever gone to a movie and disliked it as much.", 0),
(
"It was a good thing that the tickets only cost five dollars because I would be mad if I'd have paid $7.50 to see this crap.",
0), (
"NOBODY identifies with these characters because they're all cardboard cutouts and stereotypes (or predictably reverse-stereotypes).",
0), (
"This is a bad film, with bad writing, and good actors....an ugly cartoon crafted by Paul Haggis for people who can't handle anything but the bold strokes in storytelling....a picture painted with crayons.",
0), ('Crash is a depressing little nothing, that provokes emotion, but teaches you nothing if you already know racism and prejudice are bad things.',
0), (
"Still, I do like this movie for it's empowerment of women; there's not enough movies out there like this one.",
1),
('An excellent performance from Ms.', 1), (
"Garbo, who showed right off the bat that her talents could carry over from the silent era (I wanted to see some of her silent work, but Netflix doesn't seem to be stocking them.",
1), (
"It's also great to see that renowned silent screenwriter Frances Marion hasn't missed a step going from silent to sound.",
1), ('This movie suffered because of the writing, it needed more suspense.', 0),
('There were too many close ups.', 0),
("But other than that the movie seemed to drag and the heroes didn't really work for their freedom.", 0),
('But this movie is definitely a below average rent.', 0),
('"You\'ll love it!', 1), ('This movie is BAD.', 0),
('So bad.', 0),
('The film is way too long.', 0),
('This is definitely one of the bad ones.', 0),
("The movie I received was a great quality film for it's age.", 1),
('John Wayne did an incredible job for being so young in the movie industry.', 1),
('His on screen presence shined thought even though there were other senior actors on the screen with him.', 1),
('I think that it is a must see older John Wayne film.', 1),
("I really don't see how anyone could enjoy this movie.", 0),
("I don't think I've ever seen a movie half as boring as this self-indulgent piece of junk.", 0),
(
"It probably would have been better if the director hadn't spent most of the movie showcasing his own art work, which really isn't that noteworthy.",
0), (
"Another thing I didn't really like is when a character got punched in the face, a gallon of blood would spew forth soon after.",
0), ('Jamie Foxx absolutely IS Ray Charles.', 1), ('His performance is simply genius.', 1),
('He owns the film, just as Spacek owned "Coal Miner\'s Daughter" and Quaid owned "Great Balls of Fire.', 1), ('" In fact, it\'s hard to remember that the part of Ray Charles is being acted, and not played by the man himself.',
1), ('Ray Charles is legendary.', 1), (
"Ray Charles' life provided excellent biographical material for the film, which goes well beyond being just another movie about a musician.",
1), ('Hitchcock is a great director.', 1),
('Ironically I mostly find his films a total waste of time to watch.', 0),
('Secondly, Hitchcock pretty much perfected the thriller and chase movie.', 1),
('And the rest of it just sits there being awful... with soldiers singing songs about the masculinity they pledge themselves to, hairsplitting about purity, the admiration of swords, etc.',
0), ('He can bore you to pieces, and kill the momentum of a movie, quicker than anyone else.', 0),
('Schrader has made a resume full of lousy, amateurish films.', 0),
('When I first watched this movie, in the 80s, I loved it.', 1),
('I was totally fascinated by the music, the dancing... everything.', 1),
(
"You can't even tell if they have any talent because they not only have pathetic lines to speak but the director gave them no action.",
0),
("If you check the director's filmography on this site you will see why this film didn't have a chance.", 0),
('This would not even be good as a made for TV flick.', 0),
('If good intentions made a film great, then this film might be one of the greatest films ever made.', 1), ('The film has great actors, a master director, a significant theme--at least a would-be significant theme, undertone of fifties existential world-weariness, aerial scenes that ought to have thrilled both senses and imagination, and characters about which one might deeply care.', 1), ('Regrettably, the film fails.', 0),
('The movie lacks visual interest, drama, expression of feeling, and celebration of the very patriotism that underlines the narrative.', 0),
('No actress has been worse used that June Allison in this movie.', 0),
('Yet, I enjoy watching it.', 1)]
words_to_include = ['not', 'bad', 'good', 'very', 'great', 'really', 'too', 'didn', 'good', 'amazing',
'can', 'much', 'but', 'just', 'most', 'don', 'stupid', 'ever', 'best', 'enjoyed',
'think', 'love', 'like', 'worst', 'these', 'boring', 'awful', 'little', 'wasted',
'thought', 'amusing', 'love', 'amazing', 'brilliant', 'not', 'excellent', 'totally',
'interesting', 'remarkable', 'sad', 'well', 'very']
import re
def get_words(doc):
"""Поделба на документот на зборови. Стрингот се дели на зборови според
празните места и интерпукциските знаци
:param doc: документ
:type doc: str
:return: множество со зборовите кои се појавуваат во дадениот документ
:rtype: set(str)
"""
# подели го документот на зборови и конвертирај ги во мали букви
# па потоа стави ги во резултатот ако нивната должина е >2 и <20
words = set()
for word in re.split('\\W+', doc):
if 2 < len(word) < 20:
words.add(word.lower())
return words
class DocumentClassifier:
def __init__(self, get_features):
# број на парови атрибут/категорија (feature/category)
self.feature_counts_per_category = {}
# број на документи во секоја категорија
self.category_counts = {}
# функција за добивање на атрибутите (зборовите) во документот
self.get_features = get_features
def increment_feature_counts_per_category(self, current_feature, current_category):
"""Зголемување на бројот на парови атрибут/категорија
:param current_feature: даден атрибут
:param current_category: дадена категорија
:return: None
"""
self.feature_counts_per_category.setdefault(current_feature, {})
self.feature_counts_per_category[current_feature].setdefault(current_category, 0)
self.feature_counts_per_category[current_feature][current_category] += 1
def increment_category_counts(self, cat):
"""Зголемување на бројот на предмети (документи) во категорија
:param cat: категорија
:return: None
"""
self.category_counts.setdefault(cat, 0)
self.category_counts[cat] += 1
def get_feature_counts_per_category(self, current_feature, current_category):
"""Добивање на бројот колку пати одреден атрибут се има појавено во
одредена категорија
:param current_feature: атрибут
:param current_category: категорија
:return: None
"""
if current_feature in self.feature_counts_per_category \
and current_category in self.feature_counts_per_category[current_feature]:
return float(self.feature_counts_per_category[current_feature][current_category])
return 0.0
def get_category_count(self, current_category):
"""Добивање на бројот на предмети (документи) во категорија
:param current_category: категорија
:return: број на предмети (документи)
"""
if current_category in self.category_counts:
return float(self.category_counts[current_category])
return 0
def get_total_count(self):
"""Добивање на вкупниот број на предмети"""
return sum(self.category_counts.values())
def categories(self):
"""Добивање на листа на сите категории"""
return self.category_counts.keys()
def train(self, item, current_category):
"""Тренирање на класификаторот. Новиот предмет (документ)
:param item: нов предмет (документ)
:param current_category: категорија
:return: None
"""
# Се земаат атрибутите (зборовите) во предметот (документот)
features = self.get_features(item)
# Се зголемува бројот на секој атрибут во оваа категорија
for current_feature in features:
self.increment_feature_counts_per_category(current_feature, current_category)
# Се зголемува бројот на предмети (документи) во оваа категорија
self.increment_category_counts(current_category)
def get_feature_per_category_probability(self, current_feature, current_category):
"""Веројатноста е вкупниот број на пати кога даден атрибут f (збор) се појавил во
дадена категорија поделено со вкупниот број на предмети (документи) во категоријата
:param current_feature: атрибут
:param current_category: карактеристика
:return: веројатност на појавување
"""
if self.get_category_count(current_category) == 0:
return 0
return self.get_feature_counts_per_category(current_feature, current_category) \
/ self.get_category_count(current_category)
def weighted_probability(self, current_feature, current_category, prf, weight=1.0, ap=0.5):
"""Пресметка на тежински усогласената веројатност
:param current_feature: атрибут
:param current_category: категорија
:param prf: функција за пресметување на основната веројатност
:param weight: тежина
:param ap: претпоставена веројатност
:return: тежински усогласена веројатност
"""
# Пресметај ја основната веројатност
basic_prob = prf(current_feature, current_category)
# Изброј колку пати се има појавено овој атрибут (збор) во сите категории
totals = sum([self.get_feature_counts_per_category(current_feature, currentCategory) for currentCategory in
self.categories()])
# Пресметај ја тежински усредената веројатност
bp = ((weight * ap) + (totals * basic_prob)) / (weight + totals)
return bp
class NaiveBayes(DocumentClassifier):
def __init__(self, get_features):
super().__init__(get_features)
self.thresholds = {}
def set_threshold(self, current_category, threshold):
"""Поставување на праг на одлучување за категорија
:param current_category: категорија
:param threshold: праг на одлучување
:return: None
"""
self.thresholds[current_category] = threshold
def get_threshold(self, current_category):
"""Добивање на прагот на одлучување за дадена класа
:param current_category: категорија
:return: праг на одлучување за дадената категорија
"""
if current_category not in self.thresholds:
return 1.0
return self.thresholds[current_category]
def calculate_document_probability_in_class(self, item, current_category):
"""Ја враќа веројатноста на документот да е од класата current_category
(current_category е однапред позната)
:param item: документ
:param current_category: категорија
:return:
"""
# земи ги зборовите од документот item
features = self.get_features(item)
# помножи ги веројатностите на сите зборови
p = 1
for current_feature in features:
p *= self.weighted_probability(current_feature, current_category,
self.get_feature_per_category_probability)
return p
def get_category_probability_for_document(self, item, current_category):
"""Ја враќа веројатноста на класата ако е познат документот
:param item: документ
:param current_category: категорија
:return: веројатност за документот во категорија
"""
cat_prob = self.get_category_count(current_category) / self.get_total_count()
calculate_document_probability_in_class = self.calculate_document_probability_in_class(item, current_category)
# Bayes Theorem
return calculate_document_probability_in_class * cat_prob / (1.0 / self.get_total_count())
def classify_document(self, item, default=None):
"""Класифицирање на документ
:param item: документ
:param default: подразбирана (default) класа
:return:
"""
probs = {}
# најди ја категоријата (класата) со најголема веројатност
max = 0.0
for cat in self.categories():
probs[cat] = self.get_category_probability_for_document(item, cat)
if probs[cat] > max:
max = probs[cat]
best = cat
# провери дали веројатноста е поголема од threshold*next best (следна најдобра)
for cat in probs:
if cat == best:
continue
if probs[cat] * self.get_threshold(best) > probs[best]: return default
return best
def get_words_to_include(doc):
return [word for word in get_words(doc) if word in words_to_include]
if __name__ == '__main__':
comment = input()
klasa1 = NaiveBayes(get_words)
klasa2 = NaiveBayes(get_words_to_include)
#x = "unknown"
for d in data:
klasa1.train(d[0],d[1])
klasa2.train(d[0],d[1])
x1 = klasa1.classify_document(comment, "unknown")
x2 = klasa2.classify_document(comment, "unknown")
print("Klasa predvidena so site zborovi: " +str(x1))
print("Klasa predvidena so samo kluchni zborovi:" +str(x2)) | [
"marioancevski25@gmail.com"
] | marioancevski25@gmail.com |
7b52bfbc8d308b9c74054bab3dae598640de80b8 | cd486d096d2c92751557f4a97a4ba81a9e6efebd | /16/addons/plugin.video.moviedb/resources/modules/sgate.py | 005822886551a515de00ccbea6b9c9440d95515e | [] | no_license | bopopescu/firestick-loader-kodi-data | 2f8cb72b9da67854b64aa76f720bdad6d4112926 | e4d7931d8f62c94f586786cd8580108b68d3aa40 | refs/heads/master | 2022-04-28T11:14:10.452251 | 2020-05-01T03:12:13 | 2020-05-01T03:12:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,146 | py | # -*- coding: utf-8 -*-
# moviedb Series Gate TV SHOW Module by: Blazetamer
import urllib,urllib2,re,xbmcplugin,xbmcgui,sys,urlresolver,xbmc,os,xbmcaddon,main
from metahandler import metahandlers
try:
from addon.common.addon import Addon
except:
from t0mm0.common.addon import Addon
addon_id = 'plugin.video.moviedb'
#addon = Addon(addon_id, sys.argv)
addon = main.addon
try:
from addon.common.net import Net
except:
from t0mm0.common.net import Net
net = Net()
try:
import StorageServer
except:
import storageserverdummy as StorageServer
# Cache
cache = StorageServer.StorageServer("MovieDB", 0)
mode = addon.queries['mode']
url = addon.queries.get('url', '')
name = addon.queries.get('name', '')
thumb = addon.queries.get('thumb', '')
ext = addon.queries.get('ext', '')
console = addon.queries.get('console', '')
dlfoldername = addon.queries.get('dlfoldername', '')
favtype = addon.queries.get('favtype', '')
mainimg = addon.queries.get('mainimg', '')
season = addon.queries.get('season', '')
episode = addon.queries.get('episode', '')
show = addon.queries.get('show', '')
# Global Stuff
cookiejar = addon.get_profile()
cookiejar = os.path.join(cookiejar,'cookies.lwp')
settings = xbmcaddon.Addon(id=addon_id)
artwork = xbmc.translatePath(os.path.join('http://cliqaddon.com/support/commoncore/tvaddons/moviedb/showgunart/images/', ''))
fanart = xbmc.translatePath(os.path.join('http://cliqaddon.com/support/commoncore/tvaddons/moviedb/showgunart/images/fanart/fanart.jpg', ''))
grab=metahandlers.MetaData()
net = Net()
basetv_url ='http://seriesgate.me/'
def LogNotify(title,message,times,icon):
xbmc.executebuiltin("XBMC.Notification("+title+","+message+","+times+","+icon+")")
def SGCATS():
addDir('All Series Gate TV Shows','http://seriesgate.me/tvshows/','sgindex',artwork + 'all.jpg','','dir')
addDir('[COLOR gold]Search TV Shows[/COLOR]','http://seriesgate.me/search/indv_episodes/','searchsgtv',artwork + 'search.jpg','','dir')
main.AUTO_VIEW('')
def SGINDEX (url):
link = net.http_GET(url).content
match=re.compile('<a href = "(.+?)"><img src = "(.+?)" height=".+?/><div class = "_tvshow_title">(.+?)</div>').findall(link)
if len(match) > 0:
for url,sitethumb,name in match:
inc = 0
#movie_name = fullyear[:-6]
#year = fullyear[-6:]
#movie_name = movie_name.decode('UTF-8','ignore')
data = main.GRABTVMETA(name,'')
thumb = data['cover_url']
yeargrab = data['year']
year = str(yeargrab)
dlfoldername = name
favtype = 'tvshow'
#main.addDir(name,url,'sgepisodelist',thumb,data,favtype)
addDir(name,basetv_url + url,'sgepisodelist',thumb,data,favtype)
#main.addSDir(movie_name +'('+ year +')',basetv_url + url,'episodes',thumb,year,favtype)
nmatch=re.compile('<span class="currentpage">.+?</span></li><li><a href="(.+?)">(.+?)</a></li><li>').findall(link)
if len(nmatch) > 0:
for pageurl,pageno in nmatch:
addDir('Page'+ pageno,basetv_url + pageurl,'sgindex',artwork +'nextpage.jpg','','dir')
main.AUTO_VIEW('movies')
def SGEPISODES(url,name,thumb):
params = {'url':url, 'mode':mode, 'name':name, 'thumb':thumb}
dlfoldername = name
mainimg = thumb
show = name
link = net.http_GET(url).content
matchurl=re.compile('<div class="season_page">\n\t\t\t\t\t\t<a href="(.+?)" >(.+?)</a>').findall(link)
for url,snumber in matchurl:
favtype = 'episodes'
#main.addDir(snumber,url,'sgepisodelist',thumb,'',favtype)
main.addEPNOCLEANDir(snumber,url,thumb,'sgepisodelist',show,dlfoldername,mainimg,'','')
main.AUTO_VIEW('movies')
def SGEPISODELIST(url,name,thumb):
params = {'url':url, 'mode':mode, 'name':name, 'thumb':thumb}
dlfoldername = name
mainimg = thumb
show = name
url2=url
link = net.http_GET(url).content
#match=re.compile('<a href="(.+?)">» S(.+?) - E(.+?) (.+?)</a><span>(.+?)</span>').findall(link)
match=re.compile('<a href="(.+?)">» S(.+?) - E(.+?) (.+?)</a>').findall(link)
for url,season,epnum,epname in match:
s = 'S'+season
e = 'E'+epnum
se = s+e
name = se + ' ' + epname
favtype = 'episodes'
main.addEPNOCLEANDir(name,url2+'/season'+season+'/episode'+epnum+'/searchresult',thumb,'sgtvlinkpage',show,dlfoldername,mainimg,season,epnum)
main.AUTO_VIEW('movies')
'''def SGEPISODELIST(url,name,thumb):
params = {'url':url, 'mode':mode, 'name':name, 'thumb':thumb}
#dlfoldername = name
mainimg = thumb
link = net.http_GET(url).content
match=re.compile('<div class=".+?" style=".+?" >Season(.+?) Episode(.+?)- <span><a href = ".+?">.+?</a></span></div><div class=".+?" >(.+?)</div><div class = ".+?"></div><div style=".+?"><a href="(.+?)"><img src="(.+?)" width=".+?" height=".+?" alt=".+?" title = "(.+?)" ></a>').findall(link)
for season,epnum, date, url, thumb, epname in match:
s = 'S'+season
e = 'E'+epnum
se = s+e
name = se + ' ' + epname
favtype = 'episodes'
main.addEPNOCLEANDir(name,url,thumb,'sgtvlinkpage',show,dlfoldername,mainimg,season,epnum)
main.AUTO_VIEW('movies') '''
def SGTVLINKPAGE(url,name,thumb,mainimg):
params = {'url':url, 'mode':mode, 'name':name, 'thumb':thumb, 'dlfoldername':dlfoldername,'mainimg':mainimg}
inc = 0
linkbase = 'http://seriesgate.me'
mainimg = mainimg
#link = net.http_GET(url).content
#match=re.compile('href="(.+?)">More Links').findall(link)
#for surl in match:
#url = linkbase + surl
#url = url +'searchresult/'
print 'host url look is' + url
if inc < 50:
link = net.http_GET(url).content
#hostmatch=re.compile('<a rel="nofollow" href="(.+?)" TARGET="_blank" >(.+?)</a>').findall(link)
hostmatch=re.compile('hre_watch_tt" href="(.+?)">').findall(link)
#for urls,sourcename in hostmatch:
for urls in hostmatch:
print 'Pre HMF url is ' +urls
hmf = urlresolver.HostedMediaFile(urls)
##########################################
print 'URLS is ' +urls
if hmf:
#try:
host = hmf.get_host()
hthumb = main.GETHOSTTHUMB(host)
#dlurl = urlresolver.resolve(vidUrl)
data = main.GRABTVMETA(name,'')
thumb = data['cover_url']
favtype = 'movie'
hostname = main.GETHOSTNAME(host)
try:
main.addTVDLDir(name+hostname,urls,'vidpage',hthumb,data,dlfoldername,favtype,mainimg)
inc +=1
except:
continue
#Start Search Function
def _get_keyboard( default="", heading="", hidden=False ):
""" shows a keyboard and returns a value """
keyboard = xbmc.Keyboard( default, heading, hidden )
keyboard.doModal()
if ( keyboard.isConfirmed() ):
return unicode( keyboard.getText(), "utf-8" )
return default
def SEARCHSGTV(url):
searchUrl = url
vq = _get_keyboard( heading="Searching for TV Shows" )
if ( not vq ): return False, 0
title = urllib.quote_plus(vq)
searchUrl += title + '&criteria=tag'
print "Searching URL: " + searchUrl
SGSEARCHINDEX(searchUrl)
main.AUTO_VIEW('movies')
def SGSEARCHINDEX (url):
link = net.http_GET(url).content
match=re.compile('</a><div class = ".+?" style=".+?"><div class = ".+?"><a href = "(.+?)">(.+?)</a>').findall(link)
#match=re.compile('<a href="(.+?)">» (.+?) - (.+?) (.+?)</a>').findall(link)
if len(match) > 0:
for url,name in match:
#for url,season,episode,name in match:
inc = 0
#movie_name = fullyear[:-6]
#year = fullyear[-6:]
#movie_name = movie_name.decode('UTF-8','ignore')
data = main.GRABTVMETA(name,'')
thumb = data['cover_url']
yeargrab = data['year']
year = str(yeargrab)
dlfoldername = name
favtype = 'tvshow'
addDir(name,basetv_url + url,'sgepisodelist',thumb,data,favtype)
#main.addSDir(movie_name +'('+ year +')',basetv_url + url,'episodes',thumb,year,favtype)
nmatch=re.compile('<span class="currentpage">.+?</span></li><li><a href="(.+?)">(.+?)</a></li><li>').findall(link)
if len(nmatch) > 0:
for pageurl,pageno in nmatch:
addDir('Page'+ pageno,basetv_url + pageurl,'movieindex',artwork +'nextpage.jpg','','dir')
main.AUTO_VIEW('movies')
def addDir(name,url,mode,thumb,labels,favtype):
#name = nameCleaner(name)
params = {'url':url, 'mode':mode, 'name':name, 'thumb':thumb, 'dlfoldername':dlfoldername, 'mainimg':mainimg}
contextMenuItems = []
gomode=mode
contextMenuItems.append(('[COLOR red]Add to CLIQ Favorites[/COLOR]', 'XBMC.RunPlugin(%s)' % addon.build_plugin_url({'mode': 'addsttofavs', 'name': name,'url': url,'thumb': thumb,'gomode': gomode})))
contextMenuItems.append(('[COLOR red]Remove From CLIQ Favorites[/COLOR]', 'XBMC.RunPlugin(%s)' % addon.build_plugin_url({'mode': 'removestfromfavs', 'name': name,'url': url,'thumb': thumb,'gomode': gomode})))
sitethumb = thumb
sitename = name
fanart = 'http://cliqaddon.com/support/commoncore/tvaddons/moviedb/showgunart/images/fanart/fanart.jpg'
try:
name = data['title']
thumb = data['cover_url']
fanart = data['backdrop_url']
except:
name = sitename
if thumb == '':
thumb = sitethumb
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=thumb)
liz.setInfo( type="Video", infoLabels=labels )
if favtype == 'movie':
contextMenuItems.append(('[COLOR gold]Movie Information[/COLOR]', 'XBMC.Action(Info)'))
elif favtype == 'tvshow':
contextMenuItems.append(('[COLOR gold]TV Show Information[/COLOR]', 'XBMC.Action(Info)'))
elif favtype == 'episode':
contextMenuItems.append(('[COLOR gold]Episode Information[/COLOR]', 'XBMC.Action(Info)'))
liz.addContextMenuItems(contextMenuItems, replaceItems=False)
try:
liz.setProperty( "Fanart_Image", labels['backdrop_url'] )
except:
liz.setProperty( "Fanart_Image", fanart )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
| [
"esc0rtd3w@gmail.com"
] | esc0rtd3w@gmail.com |
955fbd87df618349dc3bfcaebc09f46eab754c13 | a4ab77ffe89149e47a7106746ac2ed712e2ef99b | /alignmentrs/aln/funcs.py | 779890d5f2fc2d4fb3ab08266585ef25faeee390 | [
"MIT"
] | permissive | kentwait/alignmentrs | 814f2b2b6e95495d3f7f250f6a83086160507abe | ab4ed6bae7ad0f7961104baf914bb6b49dc28d88 | refs/heads/master | 2020-04-19T08:53:49.468347 | 2019-02-15T08:00:57 | 2019-02-15T08:00:57 | 168,092,487 | 1 | 3 | MIT | 2019-02-14T07:14:20 | 2019-01-29T05:05:09 | Python | UTF-8 | Python | false | false | 883 | py | from libalignmentrs.alignment import fasta_file_to_basealignments
from alignmentrs.aln import Alignment
__all__ = ['fasta_file_to_alignment']
def fasta_file_to_alignment(path, name, marker_kw=None):
"""Reads a FASTA formatted text file to a list.
Parameters
----------
path : str
Location of FASTA file.
name : str
Name of the alignment.
marker_kw : str
Keyword indicating the sample is a marker.
Returns
-------
Alignment
"""
# Create alignments
if marker_kw is None:
marker_kw = ''
return Alignment(name, *fasta_file_to_basealignments(path, marker_kw))
# def split_concatenated_alignment(aln, catblocks=None,
# description_decoder=None):
# pass
# def blocks_list_to_df(blocks_list):
# pass
# def catblocks_list_to_df(catblocks_list):
# pass
| [
"kentkawashima@gmail.com"
] | kentkawashima@gmail.com |
9b868d075d4f0c038d4800e75f526d857c347a44 | 2a77e0bf442d675101db8d680d3e5ccef79cce04 | /cms/migrations/0003_auto_20150621_1734.py | 18612793a6a046214801d79fff97b4f5f7e30f92 | [] | no_license | zhufree/moocsys | 7d50a32cc52745c703ff5ab09e760978b9a51cd7 | b66e5aa5e053f948b418d2911a5343f8e7fa7245 | refs/heads/master | 2021-01-10T20:11:08.460497 | 2015-07-05T12:35:27 | 2015-07-05T12:35:27 | 37,803,290 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0002_auto_20150621_1732'),
]
operations = [
migrations.AlterField(
model_name='discuss',
name='course',
field=models.ForeignKey(related_name='course_discuss', blank=True, to='cms.Course', null=True),
preserve_default=True,
),
]
| [
"zhufree2013@gmail.com"
] | zhufree2013@gmail.com |
3385c8014e021d32bc15079091ad8cde2089b96d | 628818cb44e9122b241aa6930ce313858d152c99 | /geocoordinates_assignment/asgi.py | 34ec4024736aa7332cf8cb553cab16c45454963d | [] | no_license | aarti98/verloop-assignment | 843b24bb4a69e2b3508c04a1f38a1971d71eb5a7 | 4842883673ce92fcd5ea4f2a009930498a603606 | refs/heads/main | 2023-03-22T12:37:23.665991 | 2021-03-21T09:21:02 | 2021-03-21T09:21:02 | 349,804,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | """
ASGI config for geocoordinates_assignment project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'geocoordinates_assignment.settings')
application = get_asgi_application()
| [
"aartij1998@gmail.com"
] | aartij1998@gmail.com |
76f5af84fbd35b8169fa79d19c04247b0d84fd00 | 504c9c2b0d29d946079e11644761ad354fc79715 | /_build/jupyter_execute/B_資訊設會必修的12堂Python通識課_何敏煌_博碩_2019/ch08.py | f2a128622353e50d7e7e751299228a9b37946c13 | [] | no_license | AaronCHH/jb_pysqlite | 2b5b79327778705f8a941b0c5628e9eba0f5be2a | 832a70b936800a380c1da0884eed9f7fa0dc2aee | refs/heads/main | 2023-03-12T23:17:22.534445 | 2021-03-06T15:51:10 | 2021-03-06T15:51:10 | 340,876,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,816 | py | # Ch08 操作資料庫
#顯示學生成績表
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select * from score;")
for row in rows:
for field in row:
print("{}\t".format(field), end="")
print()
conn.close()
#輸入學生成績
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
stuno = input("學號:")
chi = input("國文成績:")
eng = input("英文成績:")
mat = input("數學成績:")
his = input("歷史成績:")
geo = input("地理成績:")
sql_str = "insert into score(stuno, chi, eng, mat, his, geo) values('{}',{},{},{},{},{});".format(
stuno, chi, eng, mat, his, geo)
conn.execute(sql_str)
conn.commit()
conn.close()
#輸入學生資料表
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
stuno = input("學號:")
while stuno!="-1":
name = input("姓名:")
gender = input("性別:")
clsno = input("班級編號:")
tel = input("電話:")
pid = input("家長身份證字號:")
sql_str = "insert into studata(stuno, name, gender, clsno, tel, pid) values('{}','{}','{}','{}','{}','{}');".format(
stuno, name, gender, clsno, tel, pid)
conn.execute(sql_str)
stuno = input("學號:")
conn.commit()
conn.close()
#顯示學生基本資料表
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select * from studata;")
for row in rows:
for field in row:
print("{}\t".format(field), end="")
print()
conn.close()
#顯示學生的完整成績表(含總分及平均)
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select stuno, chi, eng, mat, his, geo, chi+eng+mat+his+geo, (chi+eng+mat+his+geo)/5 from score;")
print("學號\t國文\t英文\t數學\t歷史\t地理\t總分\t平均")
for row in rows:
for field in row:
print("{}\t".format(field), end="")
print()
conn.close()
#顯示學生各科的平均
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select stuno, avg(chi), avg(eng), avg(mat), avg(his), avg(geo) from score;")
print("學號\t國文\t英文\t數學\t歷史\t地理")
for row in rows:
for field in row:
print("{}\t".format(field), end="")
print()
conn.close()
#依姓名顯示成績表
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select studata.name, score.chi, score.eng from score, studata;")
for row in rows:
for field in row:
print("{}\t".format(field), end="")
print()
conn.close()
#依姓名顯示成績表--使用INNER JOIN
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select studata.name, score.chi, score.eng from score inner join studata on score.stuno = studata.stuno;")
for row in rows:
for field in row:
print("{}\t".format(field), end="")
print()
conn.close()
#成績修改程式
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
stuno = input("請輸入想要修改成績的學號:")
rows = conn.execute("select stuno, chi, eng, mat, his, geo from score where stuno='{}'".format(stuno))
row = rows.fetchone()
if row is not None:
print("學號\t國文\t英文\t數學\t歷史\t地理")
for field in row:
print("{}\t".format(field), end="")
print()
chi = input("國文=")
eng = input("英文=")
mat = input("數學=")
his = input("歷史=")
geo = input("地理=")
sql_str = "update score set stuno='{}', chi={}, eng={}, mat={}, his={}, geo={} where stuno='{}';".format(
stuno, chi, eng, mat, his, geo, stuno)
conn.execute(sql_str)
conn.commit()
conn.close()
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select * from score;")
print(type(rows))
print(dir(rows))
print(type(rows.fetchone()))
conn.close()
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
cur = conn.cursor()
cur.execute("select * from score;")
print(type(cur.fetchone()))
print(cur.fetchone())
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
cur = conn.cursor()
cur.execute("select * from score;")
first3_records = cur.fetchmany(3)
all_records = cur.fetchall()
print(first3_records)
print(all_records)
conn.close()
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("select * from score;")
rows = cur.fetchall()
print(rows[0].keys())
print(type(rows))
print(type(rows[0]))
print("學號\t國文\t英文")
for row in rows:
print("{}\t{}\t{}".format(row['stuno'], row['chi'], row['eng'])) | [
"aaronhsu219@gmail.com"
] | aaronhsu219@gmail.com |
67c962e796f88822348a6f5d8befd69df944adae | c8d37f4cb24dc3d3cdbb42f844807cf788da3169 | /week1.py | 89805b1630671f0c901cd1e01be49b4677c90f9c | [] | no_license | juraj80/Applied-Data-Science-with-Python | 6cd3f666408c4fa13b5132a6c147389fe1ca5e2a | ecd5fce8ec36e1a832515faaef9bbc6b4c0c344f | refs/heads/master | 2021-06-13T19:33:07.882336 | 2017-03-23T20:11:44 | 2017-03-23T20:11:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,898 | py |
# coding: utf-8
# ---
#
# _You are currently looking at **version 1.1** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._
#
# ---
# # The Python Programming Language: Functions
# In[34]:
x = 1
y = 2
x + y
# In[35]:
x
# <br>
# `add_numbers` is a function that takes two numbers and adds them together.
# In[ ]:
def add_numbers(x, y):
return x + y
add_numbers(1, 2)
# <br>
# `add_numbers` updated to take an optional 3rd parameter. Using `print` allows printing of multiple expressions within a single cell.
# In[16]:
def add_numbers(x,y,z=None):
if (z==None):
return x+y
else:
return x+y+z
print(add_numbers(1, 2))
print(add_numbers(1, 2, 3))
# <br>
# `add_numbers` updated to take an optional flag parameter.
# In[1]:
def add_numbers(x, y, z=None, flag=False):
if (flag):
print('Flag is true!')
if (z==None):
return x + y
else:
return x + y + z
print(add_numbers(1, 2, flag=True))
# <br>
# Assign function `add_numbers` to variable `a`.
# In[2]:
def add_numbers(x,y):
return x+y
a = add_numbers
a(1,2)
# <br>
# # The Python Programming Language: Types and Sequences
# <br>
# Use `type` to return the object's type.
# In[ ]:
type('This is a string')
# In[4]:
type(None)
# In[ ]:
type(1)
# In[ ]:
type(1.0)
# In[3]:
type(add_numbers)
# <br>
# Tuples are an immutable data structure (cannot be altered).
# In[5]:
x = (1, 'a', 2, 'b')
type(x)
# <br>
# Lists are a mutable data structure.
# In[17]:
x = [1, 'a', 2, 'b']
type(x)
# <br>
# Use `append` to append an object to a list.
# In[18]:
x.append(3.3)
print(x)
# <br>
# This is an example of how to loop through each item in the list.
# In[ ]:
for item in x:
print(item)
# <br>
# Or using the indexing operator:
# In[ ]:
i=0
while( i != len(x) ):
print(x[i])
i = i + 1
# <br>
# Use `+` to concatenate lists.
# In[ ]:
[1,2] + [3,4]
# <br>
# Use `*` to repeat lists.
# In[19]:
[1]*3
# <br>
# Use the `in` operator to check if something is inside a list.
# In[20]:
1 in [1, 2, 3]
# <br>
# Now let's look at strings. Use bracket notation to slice a string.
# In[ ]:
x = 'This is a string'
print(x[0]) #first character
print(x[0:1]) #first character, but we have explicitly set the end character
print(x[0:2]) #first two characters
# <br>
# This will return the last element of the string.
# In[ ]:
x[-1]
# <br>
# This will return the slice starting from the 4th element from the end and stopping before the 2nd element from the end.
# In[ ]:
x[-4:-2]
# <br>
# This is a slice from the beginning of the string and stopping before the 3rd element.
# In[ ]:
x[:3]
# <br>
# And this is a slice starting from the 3rd element of the string and going all the way to the end.
# In[ ]:
x[3:]
# In[21]:
firstname = 'Christopher'
lastname = 'Brooks'
print(firstname + ' ' + lastname)
print(firstname*3)
print('Chris' in firstname)
# <br>
# `split` returns a list of all the words in a string, or a list split on a specific character.
# In[36]:
firstname = 'Christopher Arthur Hansen Brooks'.split(' ')[0] # [0] selects the first element of the list
lastname = 'Christopher Arthur Hansen Brooks'.split(' ')[-1] # [-1] selects the last element of the list
print(firstname)
print(lastname)
# <br>
# Make sure you convert objects to strings before concatenating.
# In[38]:
'Chris' + 2
# In[39]:
'Chris' + str(2)
# <br>
# Dictionaries associate keys with values.
# In[40]:
x = {'Christopher Brooks': 'brooksch@umich.edu', 'Bill Gates': 'billg@microsoft.com'}
x['Christopher Brooks'] # Retrieve a value by using the indexing operator
# In[41]:
x['Kevyn Collins-Thompson'] = None
x['Kevyn Collins-Thompson']
# <br>
# Iterate over all of the keys:
# In[42]:
for name in x:
print(x[name])
# <br>
# Iterate over all of the values:
# In[43]:
for email in x.values():
print(email)
# <br>
# Iterate over all of the items in the list:
# In[45]:
print(x.items())
for name, email in x.items():
print(name)
print(email)
# <br>
# You can unpack a sequence into different variables:
# In[ ]:
x = ('Christopher', 'Brooks', 'brooksch@umich.edu')
fname, lname, email = x
# In[ ]:
fname
# In[ ]:
lname
# <br>
# Make sure the number of values you are unpacking matches the number of variables being assigned.
# In[ ]:
x = ('Christopher', 'Brooks', 'brooksch@umich.edu', 'Ann Arbor')
fname, lname, email = x
# <br>
# # The Python Programming Language: More on Strings
# In[ ]:
print('Chris' + 2)
# In[ ]:
print('Chris' + str(2))
# <br>
# Python has a built in method for convenient string formatting.
# In[ ]:
sales_record = {
'price': 3.24,
'num_items': 4,
'person': 'Chris'}
sales_statement = '{} bought {} item(s) at a price of {} each for a total of {}'
print(sales_statement.format(sales_record['person'],
sales_record['num_items'],
sales_record['price'],
sales_record['num_items']*sales_record['price']))
# <br>
# # Reading and Writing CSV files
# <br>
# Let's import our datafile mpg.csv, which contains fuel economy data for 234 cars.
#
# * mpg : miles per gallon
# * class : car classification
# * cty : city mpg
# * cyl : # of cylinders
# * displ : engine displacement in liters
# * drv : f = front-wheel drive, r = rear wheel drive, 4 = 4wd
# * fl : fuel (e = ethanol E85, d = diesel, r = regular, p = premium, c = CNG)
# * hwy : highway mpg
# * manufacturer : automobile manufacturer
# * model : model of car
# * trans : type of transmission
# * year : model year
# In[5]:
import csv
get_ipython().magic('precision 2')
with open('mpg.csv') as csvfile:
mpg =list(csv.DictReader(csvfile))
print(mpg[0:10])
# <br>
# `csv.Dictreader` has read in each row of our csv file as a dictionary. `len` shows that our list is comprised of 234 dictionaries.
# In[ ]:
len(mpg)
# <br>
# `keys` gives us the column names of our csv.
# In[ ]:
mpg[0].keys()
# <br>
# This is how to find the average cty fuel economy across all cars. All values in the dictionaries are strings, so we need to convert to float.
# In[ ]:
sum(float(d['cty']) for d in mpg) / len(mpg)
# <br>
# Similarly this is how to find the average hwy fuel economy across all cars.
# In[10]:
sum(float(d['hwy']) for d in mpg) / len(mpg)
# <br>
# Use `set` to return the unique values for the number of cylinders the cars in our dataset have.
# In[11]:
cylinders = set(d['cyl'] for d in mpg)
cylinders
# <br>
# Here's a more complex example where we are grouping the cars by number of cylinder, and finding the average cty mpg for each group.
# In[12]:
CtyMpgByCyl = []
for c in cylinders: # iterate over all the cylinder levels
summpg = 0
cyltypecount = 0
for d in mpg: # iterate over all dictionaries
if d['cyl'] == c: # if the cylinder level type matches,
summpg += float(d['cty']) # add the cty mpg
cyltypecount += 1 # increment the count
CtyMpgByCyl.append((c, summpg / cyltypecount)) # append the tuple ('cylinder', 'avg mpg')
CtyMpgByCyl.sort(key=lambda x: x[0])
CtyMpgByCyl
# <br>
# Use `set` to return the unique values for the class types in our dataset.
# In[14]:
vehicleclass = set(d['class'] for d in mpg) # what are the class types
vehicleclass
# <br>
# And here's an example of how to find the average hwy mpg for each class of vehicle in our dataset.
# In[15]:
HwyMpgByClass = []
for t in vehicleclass: # iterate over all the vehicle classes
summpg = 0
vclasscount = 0
for d in mpg: # iterate over all dictionaries
if d['class'] == t: # if the cylinder amount type matches,
summpg += float(d['hwy']) # add the hwy mpg
vclasscount += 1 # increment the count
HwyMpgByClass.append((t, summpg / vclasscount)) # append the tuple ('class', 'avg mpg')
HwyMpgByClass.sort(key=lambda x: x[1])
HwyMpgByClass
# <br>
# # The Python Programming Language: Dates and Times
# In[7]:
import datetime as dt
import time as tm
# <br>
# `time` returns the current time in seconds since the Epoch. (January 1st, 1970)
# In[22]:
tm.time()
# <br>
# Convert the timestamp to datetime.
# In[23]:
dtnow = dt.datetime.fromtimestamp(tm.time())
dtnow
# <br>
# Handy datetime attributes:
# In[24]:
dtnow.year, dtnow.month, dtnow.day, dtnow.hour, dtnow.minute, dtnow.second # get year, month, day, etc.from a datetime
# <br>
# `timedelta` is a duration expressing the difference between two dates.
# In[9]:
delta = dt.timedelta(days = 100) # create a timedelta of 100 days
delta
# <br>
# `date.today` returns the current local date.
# In[12]:
today = dt.date.today()
# In[29]:
today - delta # the date 100 days ago
# In[ ]:
today > today-delta # compare dates
# <br>
# # The Python Programming Language: Objects and map()
# <br>
# An example of a class in python:
# In[ ]:
class Person:
department = 'School of Information' #a class variable
def set_name(self, new_name): #a method
self.name = new_name
def set_location(self, new_location):
self.location = new_location
# In[ ]:
person = Person()
person.set_name('Christopher Brooks')
person.set_location('Ann Arbor, MI, USA')
print('{} live in {} and works in the department {}'.format(person.name, person.location, person.department))
# <br>
# Here's an example of mapping the `min` function between two lists.
# In[32]:
store1 = [10.00, 11.00, 12.34, 2.34]
store2 = [9.00, 11.10, 12.34, 2.01]
cheapest = map(min, store1, store2)
cheapest
# <br>
# Now let's iterate through the map object to see the values.
# In[33]:
for item in cheapest:
print(item)
# <br>
# # The Python Programming Language: Lambda and List Comprehensions
# <br>
# Here's an example of lambda that takes in three parameters and adds the first two.
# In[ ]:
my_function = lambda a, b, c : a + b
# In[ ]:
my_function(1, 2, 3)
# <br>
# Let's iterate from 0 to 999 and return the even numbers.
# In[ ]:
my_list = []
for number in range(0, 1000):
if number % 2 == 0:
my_list.append(number)
my_list
# <br>
# Now the same thing but with list comprehension.
# In[ ]:
my_list = [number for number in range(0,1000) if number % 2 == 0]
my_list
# <br>
# # The Python Programming Language: Numerical Python (NumPy)
# In[14]:
import numpy as np
# <br>
# ## Creating Arrays
# Create a list and convert it to a numpy array
# In[3]:
mylist = [1, 2, 3]
x = np.array(mylist)
x
# <br>
# Or just pass in a list directly
# y = np.array([4, 5, 6])
# y
# <br>
# Pass in a list of lists to create a multidimensional array.
# In[5]:
m = np.array([[7, 8, 9], [10, 11, 12]])
m
# <br>
# Use the shape method to find the dimensions of the array. (rows, columns)
# In[ ]:
m.shape
# <br>
# `arange` returns evenly spaced values within a given interval.
# In[6]:
n = np.arange(0, 30, 2) # start at 0 count up by 2, stop before 30
n
# <br>
# `reshape` returns an array with the same data with a new shape.
# In[7]:
n = n.reshape(3, 5) # reshape array to be 3x5
n
# <br>
# `linspace` returns evenly spaced numbers over a specified interval.
# In[15]:
o = np.linspace(0, 4, 9) # return 9 evenly spaced values from 0 to 4
o
# <br>
# `resize` changes the shape and size of array in-place.
# In[ ]:
o.resize(3, 3)
o
# <br>
# `ones` returns a new array of given shape and type, filled with ones.
# In[8]:
np.ones((3, 2))
# <br>
# `zeros` returns a new array of given shape and type, filled with zeros.
# In[ ]:
np.zeros((2, 3))
# <br>
# `eye` returns a 2-D array with ones on the diagonal and zeros elsewhere.
# In[ ]:
np.eye(3)
# <br>
# `diag` extracts a diagonal or constructs a diagonal array.
# In[9]:
np.diag(y)
# <br>
# Create an array using repeating list (or see `np.tile`)
# In[16]:
np.array([1, 2, 3] * 3)
# <br>
# Repeat elements of an array using `repeat`.
# In[17]:
np.repeat([1, 2, 3], 3)
# <br>
# #### Combining Arrays
# In[10]:
p = np.ones([2, 3], int)
p
# <br>
# Use `vstack` to stack arrays in sequence vertically (row wise).
# In[11]:
np.vstack([p, 2*p])
# <br>
# Use `hstack` to stack arrays in sequence horizontally (column wise).
# In[ ]:
np.hstack([p, 2*p])
# <br>
# ## Operations
# Use `+`, `-`, `*`, `/` and `**` to perform element wise addition, subtraction, multiplication, division and power.
# In[ ]:
print(x + y) # elementwise addition [1 2 3] + [4 5 6] = [5 7 9]
print(x - y) # elementwise subtraction [1 2 3] - [4 5 6] = [-3 -3 -3]
# In[ ]:
print(x * y) # elementwise multiplication [1 2 3] * [4 5 6] = [4 10 18]
print(x / y) # elementwise divison [1 2 3] / [4 5 6] = [0.25 0.4 0.5]
# In[ ]:
print(x**2) # elementwise power [1 2 3] ^2 = [1 4 9]
# <br>
# **Dot Product:**
#
# $ \begin{bmatrix}x_1 \ x_2 \ x_3\end{bmatrix}
# \cdot
# \begin{bmatrix}y_1 \\ y_2 \\ y_3\end{bmatrix}
# = x_1 y_1 + x_2 y_2 + x_3 y_3$
# In[ ]:
x.dot(y) # dot product 1*4 + 2*5 + 3*6
# In[18]:
z = np.array([y, y**2])
print(len(z)) # number of rows of array
# <br>
# Let's look at transposing arrays. Transposing permutes the dimensions of the array.
# In[12]:
z = np.array([y, y**2])
z
# <br>
# The shape of array `z` is `(2,3)` before transposing.
# In[13]:
z.shape
# <br>
# Use `.T` to get the transpose.
# In[14]:
z.T
# <br>
# The number of rows has swapped with the number of columns.
# In[15]:
z.T.shape
# <br>
# Use `.dtype` to see the data type of the elements in the array.
# In[ ]:
z.dtype
# <br>
# Use `.astype` to cast to a specific type.
# In[ ]:
z = z.astype('f')
z.dtype
# <br>
# ## Math Functions
# Numpy has many built in math functions that can be performed on arrays.
# In[ ]:
a = np.array([-4, -2, 1, 3, 5])
# In[ ]:
a.sum()
# In[ ]:
a.max()
# In[ ]:
a.min()
# In[ ]:
a.mean()
# In[ ]:
a.std()
# <br>
# `argmax` and `argmin` return the index of the maximum and minimum values in the array.
# In[ ]:
a.argmax()
# In[ ]:
a.argmin()
# <br>
# ## Indexing / Slicing
# In[19]:
s = np.arange(13)**2
s
# <br>
# Use bracket notation to get the value at a specific index. Remember that indexing starts at 0.
# In[ ]:
s[0], s[4], s[-1]
# <br>
# Use `:` to indicate a range. `array[start:stop]`
#
#
# Leaving `start` or `stop` empty will default to the beginning/end of the array.
# In[ ]:
s[1:5]
# <br>
# Use negatives to count from the back.
# In[ ]:
s[-4:]
# <br>
# A second `:` can be used to indicate step-size. `array[start:stop:stepsize]`
#
# Here we are starting 5th element from the end, and counting backwards by 2 until the beginning of the array is reached.
# In[20]:
s[-5::-2]
# <br>
# Let's look at a multidimensional array.
# In[22]:
r = np.arange(36)**2
r.resize((6, 6))
r
# <br>
# Use bracket notation to slice: `array[row, column]`
# In[23]:
r[2, 2]
# <br>
# And use : to select a range of rows or columns
# In[24]:
r[3, 3:6]
# <br>
# Here we are selecting all the rows up to (and not including) row 2, and all the columns up to (and not including) the last column.
# In[25]:
r[:2, :-1]
# <br>
# This is a slice of the last row, and only every other element.
# In[26]:
r[-1, ::2]
# <br>
# We can also perform conditional indexing. Here we are selecting values from the array that are greater than 30. (Also see `np.where`)
# In[27]:
r[r > 300]
# <br>
# Here we are assigning all values in the array that are greater than 30 to the value of 30.
# In[28]:
r[r > 30] = 30
r
# <br>
# ## Copying Data
# Be careful with copying and modifying arrays in NumPy!
#
#
# `r2` is a slice of `r`
# In[29]:
r2 = r[:3,:3]
r2
# <br>
# Set this slice's values to zero ([:] selects the entire array)
# In[30]:
r2[:] = 0
r2
# <br>
# `r` has also been changed!
# In[31]:
r
# <br>
# To avoid this, use `r.copy` to create a copy that will not affect the original array
# In[33]:
r_copy = r.copy()
r_copy
# <br>
# Now when r_copy is modified, r will not be changed.
# In[34]:
r_copy[:] = 10
print(r_copy, '\n')
print(r)
# <br>
# ### Iterating Over Arrays
# Let's create a new 4 by 3 array of random numbers 0-9.
# In[35]:
test = np.random.randint(0, 10, (4,3))
test
# <br>
# Iterate by row:
# In[36]:
for row in test:
print(row)
# <br>
# Iterate by index:
# In[39]:
for i in range(len(test)):
print(i,test[i])
# <br>
# Iterate by row and index:
# In[37]:
for i, row in enumerate(test):
print('row', i, 'is', row)
# <br>
# Use `zip` to iterate over multiple iterables.
# In[41]:
test2 = test**2
test2
# In[42]:
for i, j in zip(test, test2):
print(i,'+',j,'=',i+j)
# In[ ]:
| [
"juraj80@github.com"
] | juraj80@github.com |
5f46d90b312cf0d3a4a5c46acbcac5799eaec7b9 | 1c33be973ded4f075cdddf18cc275a8b4e42b282 | /recinfo/bin/python-config | 6fa6e9cc3dd321bb55695373a7ca822fab5da6ef | [] | no_license | pedrohcm/ri_lab_01 | 7614e5e0de5c355a832c35c426bd4b5197d87df1 | 004729e832e243eb9ebf1ac476cfee8831ffb353 | refs/heads/master | 2020-05-04T16:49:38.982522 | 2019-06-13T22:42:38 | 2019-06-13T22:42:38 | 179,289,285 | 0 | 0 | null | 2019-04-03T12:51:20 | 2019-04-03T12:51:19 | null | UTF-8 | Python | false | false | 2,353 | #!/home/pedrohcm/RI/ri_lab_01/recinfo/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"pedrohcm@lcc3-099.lcc.ufcg.edu.br"
] | pedrohcm@lcc3-099.lcc.ufcg.edu.br | |
c63a4270aaeefd93934953d35eb9f8f3316bf194 | 8e52c27f1b2823db67db4438b2b7e22c18254eca | /gluon/gluoncv2/models/alexnet.py | 626397e4fda1fb1630db7caf578a3149a8c93c39 | [
"MIT"
] | permissive | earhian/imgclsmob | 5582f5f2d4062b620eecc28d5c4c9245fea47291 | c87c0942420876941868c016211073dec4392e4d | refs/heads/master | 2020-04-12T02:13:55.258601 | 2018-12-17T20:38:19 | 2018-12-17T20:38:19 | 162,242,486 | 1 | 0 | MIT | 2018-12-18T06:40:42 | 2018-12-18T06:40:41 | null | UTF-8 | Python | false | false | 8,438 | py | """
AlexNet, implemented in Gluon.
Original paper: 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
"""
__all__ = ['AlexNet', 'alexnet']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
class AlexConv(HybridBlock):
"""
AlexNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
**kwargs):
super(AlexConv, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=True,
in_channels=in_channels)
self.activ = nn.Activation('relu')
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.activ(x)
return x
class AlexDense(HybridBlock):
"""
AlexNet specific dense block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(AlexDense, self).__init__(**kwargs)
with self.name_scope():
self.fc = nn.Dense(
units=out_channels,
weight_initializer="normal",
in_units=in_channels)
self.activ = nn.Activation('relu')
self.dropout = nn.Dropout(rate=0.5)
def hybrid_forward(self, F, x):
x = self.fc(x)
x = self.activ(x)
x = self.dropout(x)
return x
class AlexOutputBlock(HybridBlock):
"""
AlexNet specific output block.
Parameters:
----------
in_channels : int
Number of input channels.
classes : int
Number of classification classes.
"""
def __init__(self,
in_channels,
classes,
**kwargs):
super(AlexOutputBlock, self).__init__(**kwargs)
mid_channels = 4096
with self.name_scope():
self.fc1 = AlexDense(
in_channels=in_channels,
out_channels=mid_channels)
self.fc2 = AlexDense(
in_channels=mid_channels,
out_channels=mid_channels)
self.fc3 = nn.Dense(
units=classes,
weight_initializer="normal",
in_units=mid_channels)
def hybrid_forward(self, F, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
class AlexNet(HybridBlock):
"""
AlexNet model from 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
kernel_sizes : list of list of int
Convolution window sizes for each unit.
strides : list of list of int or tuple/list of 2 int
Strides of the convolution for each unit.
paddings : list of list of int or tuple/list of 2 int
Padding value for convolution layer for each unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
kernel_sizes,
strides,
paddings,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(AlexNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix='stage{}_'.format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
stage.add(AlexConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_sizes[i][j],
strides=strides[i][j],
padding=paddings[i][j]))
in_channels = out_channels
stage.add(nn.MaxPool2D(
pool_size=3,
strides=2,
padding=0))
self.features.add(stage)
self.output = nn.HybridSequential(prefix='')
self.output.add(nn.Flatten())
in_channels = in_channels * 6 * 6
self.output.add(AlexOutputBlock(
in_channels=in_channels,
classes=classes))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_alexnet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join('~', '.mxnet', 'models'),
**kwargs):
"""
Create AlexNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels = [[64], [192], [384, 256, 256]]
kernel_sizes = [[11], [5], [3, 3, 3]]
strides = [[4], [1], [1, 1, 1]]
paddings = [[2], [2], [1, 1, 1]]
net = AlexNet(
channels=channels,
kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def alexnet(**kwargs):
"""
AlexNet model from 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_alexnet(model_name="alexnet", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
alexnet,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != alexnet or weight_count == 61100840)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| [
"osemery@gmail.com"
] | osemery@gmail.com |
b81f41162f15e29f8b808b8521fb7a1cf808a28c | e0045eec29aab56212c00f9293a21eb3b4b9fe53 | /sale_crm/__manifest__.py | 2ee01b74f3caf0f9facf83b0f88b9115a413b705 | [] | no_license | tamam001/ALWAFI_P1 | a3a9268081b9befc668a5f51c29ce5119434cc21 | 402ea8687c607fbcb5ba762c2020ebc4ee98e705 | refs/heads/master | 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | # -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
{
'name': 'Opportunity to Quotation',
'version': '1.0',
'category': 'Hidden',
'description': """
This module adds a shortcut on one or several opportunity cases in the CRM.
===========================================================================
This shortcut allows you to generate a sales order based on the selected case.
If different cases are open (a list), it generates one sales order by case.
The case is then closed and linked to the generated sales order.
We suggest you to install this module, if you installed both the sale and the crm
modules.
""",
'depends': ['sale_management', 'crm'],
'data': [
'security/ir.model.access.csv',
'views/partner_views.xml',
'views/sale_order_views.xml',
'views/crm_lead_views.xml',
],
'auto_install': True,
}
| [
"50145400+gilbertp7@users.noreply.github.com"
] | 50145400+gilbertp7@users.noreply.github.com |
484644bbb880fdcf085f5e5d6641f10a5231a625 | 08bfc8a1f8e44adc624d1f1c6250a3d9635f99de | /SDKs/swig/Examples/python/varargs/runme.py | fe1e28e881e6fbf9f29462308bf61efca2de0209 | [] | no_license | Personwithhat/CE_SDKs | cd998a2181fcbc9e3de8c58c7cc7b2156ca21d02 | 7afbd2f7767c9c5e95912a1af42b37c24d57f0d4 | refs/heads/master | 2020-04-09T22:14:56.917176 | 2019-07-04T00:19:11 | 2019-07-04T00:19:11 | 160,623,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:f2ae65c42f8358298afb53751299f1957fed0218e6a36f16022a63aa74858f95
size 705
| [
"personwithhats2@Gmail.com"
] | personwithhats2@Gmail.com |
23d8ee0a3400d9351c2e6d880f263452d609f8e5 | 9ae4adfcc523a0d1918bb4a76c2a1bfa13c59b4d | /PobarajOglasi.py | e359d9d4cc7994f93118a5b2eb7abee3d5c7f6e6 | [] | no_license | PsyLee/scrapers | 614dafdb1f34774c4d60daf7c0cbd1085bc8c046 | 211d03ad1da6efe7fe2722add7edc9eb57e16474 | refs/heads/master | 2021-01-19T12:42:15.907856 | 2017-02-17T19:53:31 | 2017-02-17T19:53:31 | 82,331,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,308 | py | #!/usr/bin/env python
# coding: utf-8
import sys
from Downloader import Downloader
from webscraping import xpath
from Utilities import getDescription
from Utilities import adsToJson
from Ad import Ad
from datetime import datetime
from datetime import date, timedelta
from locale import currency
from PyQt4.Qt import dec
def scrapePobarajOglasi():
# UTF-8 support
reload(sys)
sys.setdefaultencoding('utf-8')
now = datetime.now()
down = Downloader('http://www.pobaraj.com.mk/lista_na_oglasi/all/1')
content = down.get_content()
html = unicode(content)
site = xpath.get(html, '//ul[@class="lista_na_oglasi"]')
linkovi = xpath.search(site, '//li')
ads = []
for l in linkovi:
link = "http://www.pobaraj.com.mk" + xpath.get(l, '//a[@class="title"]/@href')
title = xpath.get(l, '//a[@class="title"]')
imageUrl = xpath.get(l, '//a[@class="photo"]/img/@src')
download = Downloader(link)
cont = download.get_content()
cont = unicode(cont)
description = xpath.get(cont, '//div[@class="oglas_prikaz_opis"]').strip()
if description == "":
description = "/"
kategorii = xpath.search(cont, '//a[@class="pateka"]')
category = kategorii[1]
if len(kategorii)>2:
subcategory = kategorii[2]
else:
subcategory = "/"
price = xpath.get(l, '//div[@class="price"]').strip()
price = price.split("<div ")
price = price[0].strip()
price = price.split("Цена: ")
price = price[1]
if price == u"по договор":
value = "/"
currency = "/"
else:
price = price.split(" ")
value = price[0]
if price[1] == u"денари":
currency = "MKD"
elif price[1] == u"евра":
currency = "EUR"
else:
currency = price[1]
region = xpath.get(cont, '//div[@class="oglas_prikaz_left"]').strip()
region = region.split("Град:<")
region = region[1]
region = region.split("<b class")
region = region[0]
region = region.split("b>")
region = region[1]
region = region.strip()
country = u"Македонија"
datum = xpath.get(l, '//div[@class="oglas_date"]').strip()
datum = datum.split(": ")
datum = datum[1]
datum = datum.split(", ")
vreme = datum[1]
datum = datum[0]
if datum == u"Денес":
date = str(now.year)+"-"+str(now.month)+"-"+str(now.day)+" "+vreme
elif datum == u"Вчера":
da=datetime.now()-timedelta(days=1)
date = str(da.year)+"-"+str(da.month)+"-"+str(da.day)+" "+vreme
else:
datum = datum.split(" ")
if datum[1]=="Јан":
datum= str(now.year) + "-1-" + datum[0]
elif datum[1]=="Фев":
datum= str(now.year) + "-2-" + datum[0]
elif datum[1]=="Мар":
datum= str(now.year) + "-3-" + datum[0]
elif datum[1]=="Апр":
datum= str(now.year) + "-4-" + datum[0]
elif datum[1]=="Мај":
datum= str(now.year) + "-5-" + datum[0]
elif datum[1]=="Јун":
datum= str(now.year) + "-6-" + datum[0]
elif datum[1]=="Јул":
datum= str(now.year) + "-7-" + datum[0]
elif datum[1]=="Авг":
datum= str(now.year) + "-8-" + datum[0]
elif datum[1]=="Сеп":
datum= str(now.year) + "-9-" + datum[0]
elif datum[1]=="Окт":
datum= str(now.year) + "-10-" + datum[0]
elif datum[1]=="Ное":
datum= str(now.year) + "-11-" + datum[0]
elif datum[1]=="Дек":
datum= str(now.year) + "-12-" + datum[0]
date = datum +" "+ vreme
ad = Ad(link, title, imageUrl, description, category, subcategory, value, currency, region, date, country)
ads.append(ad)
return adsToJson(ads)
#print scrapePobarajOglasi() | [
"smiley.mk@gmail.com"
] | smiley.mk@gmail.com |
71c9a2cc2b4feb92d6aeebfee0db8c5c8e0099a6 | 6a09460fbff90a99ae6002057ec7dc65bbacf727 | /preprocess.py | 189a7753fa6899a6e0a3215b5067d9ac178effa4 | [
"Apache-2.0"
] | permissive | nickgerend/OntheMove | 6ebc683f6d28ef024d7cdd865482478ac0cbe056 | 956f9a0413480aabd74ba95c4c4288f51d4f0329 | refs/heads/main | 2023-01-29T22:57:43.363592 | 2020-12-14T04:13:00 | 2020-12-14T04:13:00 | 321,228,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,314 | py | # Written by: Nick Gerend, @dataoutsider
# Viz: "On the Move", enjoy!
import pandas as pd
import numpy as np
import os
import numbers
class flow:
def __init__(self, state_id = -1, state_id_source = -1, value = -1, direction = ''):
self.state_id = state_id
self.state_id_source = state_id_source
self.value = value
self.direction = direction
df_matrix = pd.read_csv(os.path.dirname(__file__) + '/Migration_States.csv', engine='python').fillna(0)
df_matrix_data = df_matrix.drop('State', axis=1)
df_matrix_data = df_matrix_data.apply(lambda o: o.str.replace(',', '').astype(float))
data_list = []
value = 0
for i, row in df_matrix_data.iterrows():
for col in enumerate(row):
value = col[1]
icol = col[0]
if isinstance(value, numbers.Number):
if value > 0:
data_list.append(flow(i, icol, value, 'in')) # in
data_list.append(flow(icol, i, value, 'out')) # out
import csv
import os
with open(os.path.dirname(__file__) + '/state_data.csv', 'w',) as csvfile:
writer = csv.writer(csvfile, lineterminator = '\n')
writer.writerow(['state_id', 'state_id_source', 'value', 'direction'])
for item in data_list:
writer.writerow([item.state_id, item.state_id_source, item.value, item.direction]) | [
"nickgerend@gmail.com"
] | nickgerend@gmail.com |
0ce3c9cbc9d4b469edd31794a02179b8676cf857 | 043178ecc37e8d14ea4387f0b332e562c7ce7cf5 | /game.py | 9acc6f1eafe48b4c555651bb7d424e3e8c91e09f | [] | no_license | ChristopherMeneses/CPSC-386-Project-3 | 13876e64085655bd3aae2ea42f06ecdff6d300cf | bda85cec1baaf719fecac41af140132f23276a76 | refs/heads/master | 2021-05-07T01:41:52.714288 | 2017-12-04T00:42:25 | 2017-12-04T00:42:25 | 110,380,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,100 | py | #Project 3 - CPSC 386 - MW 5:30 PM
#AMMU - Danial Moahmmad, Felicia Aubert, Annette Ulrichsen, Christopher Meneses
#Platformer - goal is to find the princess
#Sources: Anthony Biron - https://www.youtube.com/channel/UCy0eKoY5BVtcJHFQGKVe1yg
#ChiliGames - Knight images - https://opengameart.org/content/knight-and-knight-animation
#Platform - Kenney - https://opengameart.org/content/platformer-tiles
#Princess image - ?
#Music - Code Manu - https://opengameart.org/content/platformer-game-music-pack
#Background Image - DPP Reskinning - http://appreskinning.blogspot.com/2017/07/backgrounds-for-2d-platforms-pack.html
#! /usr/bin/python
import sys
import os
import pygame
from pygame import *
from time import sleep
WIN_WIDTH = 256*3
WIN_HEIGHT = 224*3
HALF_WIDTH = int(WIN_WIDTH / 2)
HALF_HEIGHT = int(WIN_HEIGHT / 2)
#determine whether to switch maps - used in while loop
moveNext = False
movePrev = False
done = False
#standard pygame initializations
DISPLAY = (WIN_WIDTH, WIN_HEIGHT)
DEPTH = 32
FLAGS = 0
newX = 0
newY = 0
pygame.init()
screen = pygame.display.set_mode(DISPLAY, FLAGS, DEPTH)
pygame.display.set_caption("Knight's Quest")
pygame.mixer.music.load('music/bg.mp3')
pygame.mixer.music.play(-1)
#set character animations and sizes
character = Surface((16,32),pygame.SRCALPHA)
character = pygame.image.load("images/stand.png").convert_alpha()
character = pygame.transform.scale(character, (16*4,32*3))
knightstand1 = character
character = Surface((16,32),pygame.SRCALPHA)
character = pygame.image.load("images/run1.png").convert_alpha()
character = pygame.transform.scale(character, (16*4,32*3))
knightwalk1 = character
character = Surface((16,32),pygame.SRCALPHA)
character = pygame.image.load("images/run2.png").convert_alpha()
character = pygame.transform.scale(character, (16*4,32*3))
knightwalk2 = character
character = Surface((16,32),pygame.SRCALPHA)
character = pygame.image.load("images/run3.png").convert_alpha()
character = pygame.transform.scale(character, (16*4,32*3))
knightwalk3 = character
character = Surface((16,32),pygame.SRCALPHA)
character = pygame.image.load("images/jump.png").convert_alpha()
character = pygame.transform.scale(character, (16*4,32*3))
knightjump1 = character
#allow for font to be rendered later
pygame.font.init()
dialogFont = pygame.font.SysFont("garuda", 20)
pygame.display.update()
white = (255,255,255)
black = (0,0,0)
def main():
#initialize all required variables
timer = pygame.time.Clock()
currLevel = 1
up = down = left = right = running = False
platforms = []
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((0,0,0))
bgImg = pygame.image.load('images/bg1.png')
background.blit(bgImg, (0,0))
screen.blit(background, (0,0))
time = 0
pygame.display.update()
#bg = Surface((WIN_WIDTH,WIN_HEIGHT)).convert()
entities = pygame.sprite.Group()
speech = pygame.image.load("images/title.png").convert_alpha()
speech = pygame.transform.scale(speech ,(171*2,108*2))
screen.blit(speech, (HALF_WIDTH - 171 ,HALF_HEIGHT - 108))
pygame.display.update()
sleep(2)
x = y = 0
level = getLevel(currLevel)
# build the level based off the level returned
for row in level:
for col in row:
if col == "P":
p = Platform(x, y)
platforms.append(p)
entities.add(p)
if col == "e":
e = ExitBlock(x, y)
platforms.append(e)
entities.add(e)
if col == "B":
B = PreviousBlock(x, y)
platforms.append(B)
entities.add(B)
if col == "K":
k = King(x, y)
platforms.append(k)
entities.add(k)
if col == "F":
F = Princess(x, y)
platforms.append(F)
entities.add(F)
x += 16*3
y += 16*3
x = 0
total_level_width = len(level[0])*16*3
total_level_height = len(level)*16*3
camera = Camera(complex_camera, total_level_width, total_level_height)
player = Player(newX, newY)
entities.add(player)
#game run loop
while 1:
timer.tick(60)
time = time + 1
#3 minute timer to find the princess
if time == 10800:
break;
#allows for character to be moved
for e in pygame.event.get():
if e.type == QUIT: raise SystemExit("QUIT")
if e.type == KEYDOWN and e.key == K_ESCAPE:
raise SystemExit("ESCAPE")
if e.type == KEYDOWN and e.key == K_UP:
up = True
if e.type == KEYDOWN and e.key == K_DOWN:
down = True
if e.type == KEYDOWN and e.key == K_LEFT:
left = True
if e.type == KEYDOWN and e.key == K_RIGHT:
right = True
if e.type == KEYUP and e.key == K_UP:
up = False
if e.type == KEYUP and e.key == K_DOWN:
down = False
if e.type == KEYUP and e.key == K_RIGHT:
right = False
if e.type == KEYUP and e.key == K_LEFT:
left = False
screen.blit(background,(0,0))
camera.update(player)
# update player, draw everything else
player.update(up, down, left, right, running, platforms)
#if reached portal, move to respective map or finish the game
if (moveNext == True or movePrev == True or done == True):
x=0
y=0
if moveNext:
currLevel = currLevel + 1
if movePrev:
currLevel = currLevel - 1
#reset platforms in order to build the new map
platforms=[]
#get the next or previous level
level = getLevel(currLevel)
#if game is finished, display dialogue and leave game loop
if done:
for e in entities:
screen.blit(e.image, camera.apply(e))
speech = pygame.image.load("images/princess-dialogue.png").convert_alpha()
screen.blit(speech, (40,0))
pygame.display.update()
sleep(2)
break
entities = pygame.sprite.Group()
for row in level:
for col in row:
if col == "P":
p = Platform(x, y)
platforms.append(p)
entities.add(p)
if col == "e":
e = ExitBlock(x, y)
platforms.append(e)
entities.add(e)
if col == "B":
B = PreviousBlock(x, y)
platforms.append(B)
entities.add(B)
if col == "K":
k = King(x, y)
platforms.append(k)
entities.add(k)
if col == "F":
F = Princess(x, y)
platforms.append(F)
entities.add(F)
x += 16*3
y += 16*3
x = 0
total_level_width = len(level[0])*16*3
total_level_height = len(level)*16*3
camera = Camera(complex_camera, total_level_width, total_level_height)
player = Player(newX, newY)
entities.add(player)
sleep(.4)
#draw all the entities added through level generation
for e in entities:
screen.blit(e.image, camera.apply(e))
pygame.display.update()
#if done then you reached the princess - show proper message
if done:
#display end dialogue
screen.fill(black)
speech = pygame.image.load("images/end-dialogue.png").convert_alpha()
#363 is width of image and 90 is height - will center it
screen.blit(speech, (HALF_WIDTH - 363/2, HALF_HEIGHT - 90/2))
pygame.display.update()
sleep(2.5)
#otherwise show failure message
else:
screen.fill(black)
speech = pygame.image.load("images/fail.png")
#363 is width of image and 90 is height - will center it
screen.blit(speech, (HALF_WIDTH - 363/2, HALF_HEIGHT - 90/2))
pygame.display.update()
sleep(2.5)
#allows for the camera to focus on the player - source provided
#at start of the file
class Camera(object):
def __init__(self, camera_func, width, height):
self.camera_func = camera_func
self.state = Rect(0, 0, width, height)
def apply(self, target):
return target.rect.move(self.state.topleft)
def update(self, target):
self.state = self.camera_func(self.state, target.rect)
def simple_camera(camera, target_rect):
l, t, _, _ = target_rect
_, _, w, h = camera
return Rect(-l+HALF_WIDTH, -t+HALF_HEIGHT, w, h)
def complex_camera(camera, target_rect):
l, t, _, _ = target_rect
_, _, w, h = camera
l, t, _, _ = -l+HALF_WIDTH, -t+HALF_HEIGHT, w, h
l = min(0, l) # stop scrolling at the left edge
l = max(-(camera.width-WIN_WIDTH), l) # stop scrolling at the right edge
t = max(-(camera.height-WIN_HEIGHT), t) # stop scrolling at the bottom
t = min(0, t) # stop scrolling at the top
return Rect(l, t, w, h)
#returns the next or previous level for generation
#sets character spawn coordinates
def getLevel(currLevel):
global newX
global newY
global moveNext
global movePrev
level = []
if currLevel == 1:
level = [
"PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP",
"P P",
"P e ",
"P p",
"P PPPPPPPPP PPP",
"P PP P",
"P PP P",
"P P",
"P PPPPPPPP P",
"P PP P",
"P PPPPPPP P",
"P PPPPPP P",
"P P",
"P PPPPPPP P",
"P PP P",
"P PPPPPP P",
"P P",
"P PPPPPPPPPPP P",
"P P",
"P P",
"P PPPPPPPPPPP P",
"P PP P",
"P PP P",
"P P",
"P K P",
"PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP",]
total_level_width = len(level[0])*16*3
total_level_height = len(level)*16*3
if movePrev:
newX = total_level_width - 150
newY = 164
else:
newX = total_level_width - 150
newY = total_level_height - 80
elif currLevel == 2:
level = [
"PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP",
"e P",
"p P",
"PPP P",
"P PP P",
"P PP P",
"P PP PP P",
"P P P",
"P PP P",
"P PP P",
"P P",
"P P",
"P PP P",
"P P",
"P P",
"P PP P",
"P P",
"P P",
"P PP P",
"P PP P",
"P P",
"P P P",
"P P",
"B PP P",
"B P",
"PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP",]
total_level_width = len(level[0])*16*3
total_level_height = len(level)*16*3
if movePrev:
newX = 100
newY = 150
else:
newX = 100
newY = total_level_height - 80
elif currLevel == 3:
level = [
"PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP",
" P",
"F P",
"PPP P",
"P P P",
"P PP PP P",
"P P PP P",
"P PP P",
"P P",
"P P",
"P P P",
"P P",
"P PP P",
"P PP P",
"P P P",
"P P P",
"P P",
"P P P",
"P P P",
"P PP P",
"P PP P",
"P P P",
"P PP P P",
"P P",
"P P",
"P PP P",
"P P",
"P P",
"P PPP P",
"P B",
"P B",
"PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP",]
total_level_width = len(level[0])*16*3
total_level_height = len(level)*16*3
if movePrev:
newX = total_level_width - 150
newY = total_level_height - 80
else:
newX = total_level_width - 150
newY = total_level_height - 80
moveNext = False
movePrev = False
return level
#base entity that is used to create most objects
class Entity(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
#player class that contains the image and position(rectangle)
#updates player position based on keypress and detects collision
class Player(Entity):
def __init__(self, x, y):
Entity.__init__(self)
self.xvel = 0
self.yvel = 0
self.faceright = False
self.onGround = False
self.airborne = True
self.counter = 0
self.image = knightstand1
self.rect = Rect(x, y, 16*4, 32*3)
def update(self, up, down, left, right, running, platforms):
if up:
# only jump if on the ground
if self.onGround: self.yvel -= 10
if down:
pass
if running:
self.xvel = 12
if left:
self.xvel = -8
self.faceright = False
if right:
self.xvel = 8
self.faceright = True
if not self.onGround:
# only accelerate with gravity if in the air
self.yvel += 0.35
# max falling speed
if self.yvel > 100: self.yvel = 100
if not(left or right):
self.xvel = 0
if self.yvel < 0 or self.yvel > 1.2: self.airborne = True
# increment in x direction
self.rect.left += self.xvel
# do x-axis collisions
self.collide(self.xvel, 0, platforms)
# increment in y direction
self.rect.top += self.yvel
# assuming we're in the air
self.onGround = False;
# do y-axis collisions
self.collide(0, self.yvel, platforms)
self.animate()
#will perform different actions based on what the player
#is colliding with
def collide(self, xvel, yvel, platforms):
global done
for p in platforms:
if pygame.sprite.collide_rect(self, p):
if isinstance(p, PreviousBlock):
global movePrev
movePrev = True
if isinstance(p, ExitBlock):
global moveNext
moveNext = True
if isinstance(p, Princess):
done = True
if isinstance(p, King):
speech = pygame.image.load("images/king-dialogue.png").convert_alpha()
screen.blit(speech, (440,450))
if xvel > 0:
self.rect.right = p.rect.left
if xvel < 0:
self.rect.left = p.rect.right
if yvel > 0:
self.rect.bottom = p.rect.top
self.onGround = True
self.airborne = False
self.yvel = 0
if yvel < 0:
self.rect.top = p.rect.bottom
#Jump animations
def animate(self):
if self.xvel > 0 or self.xvel < 0:
self.walkloop()
if self.airborne: self.updatecharacter(knightjump1)
else:
self.updatecharacter(knightstand1)
if self.airborne: self.updatecharacter(knightjump1)
#walk animations - done by alernating images
def walkloop(self):
if self.counter == 5:
self.updatecharacter(knightwalk1)
elif self.counter == 10:
self.updatecharacter(knightwalk2)
elif self.counter == 15:
self.updatecharacter(knightwalk3)
self.counter = 0
self.counter = self.counter + 1
def updatecharacter(self, ansurf):
if not self.faceright: ansurf = pygame.transform.flip(ansurf,True,False)
self.image = ansurf
#block used to create the map
class Platform(Entity):
def __init__(self, x, y):
Entity.__init__(self)
self.image = pygame.image.load("images/block.png").convert_alpha()
self.image = pygame.transform.scale(self.image,(16*3,16*3))
self.rect = Rect(x, y, 16*3, 16*3)
#used to create the King NPC
class King(Entity):
def __init__(self, x, y):
Entity.__init__(self)
self.image = pygame.image.load("images/king.png").convert_alpha()
self.image = pygame.transform.scale(self.image,(16*4,16*3*2))
self.rect = Rect(x, y-16*3, 16*3, 16*3*2)
#create the Prinecss NPC
class Princess(Entity):
def __init__(self, x, y):
Entity.__init__(self)
self.image = pygame.image.load("images/p2.png").convert_alpha()
self.image = pygame.transform.scale(self.image,(16*4,16*3*2))
self.rect = Rect(x, y-16*3, 16*3, 16*3*2)
#used to determine exit to the new map
#currently image just set to blue filling for distinction
class ExitBlock(Entity):
def __init__(self, x, y):
Entity.__init__(self)
self.image = pygame.image.load("images/gate.png").convert_alpha()
self.image = pygame.transform.scale(self.image,(16*6, 16*6))
self.rect = Rect(x, y, 16*6, 16*6)
#used to determine exit to the previous map
#currently image just set to blue filling for distinction
class PreviousBlock(Platform):
def __init__(self, x, y):
Entity.__init__(self)
self.image = pygame.image.load("images/gate.png").convert_alpha()
self.image = pygame.transform.scale(self.image,(1,16*6))
self.rect = Rect(x, y, 16*6, 16*6)
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | ChristopherMeneses.noreply@github.com |
c349a23b05207454c50dc53da385c0f8d8822ee9 | b3edb28272b2c064f7302e8ec52f0140292d21d7 | /pipeline.py | fbfd95a1c26c4970d3d3afa5392c6cdf60baa6e3 | [] | no_license | mattmiller899/LSTM_app | 4cdadc62e67c2e7a1c923def493b765c22ecf7e0 | ae599e5157f69be827a841d1f46681baaf5d78ea | refs/heads/master | 2020-04-03T17:45:54.566811 | 2018-10-30T21:21:35 | 2018-10-30T21:21:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,525 | py | import sys
import numpy as np
import argparse
from preprocess import *
def get_args():
parser = argparse.ArgumentParser(description='Generate n-grams and GloVe vectors')
parser.add_argument('-k', dest='k', type=int, default=6, help='Length of kmer')
parser.add_argument('-s', dest='s', type=int, default=2, help='Stride of splitting sequences into kmers')
parser.add_argument('-n', dest='neg_dir', help='Path to the directory of the negative files')
parser.add_argument('-o', dest='work_dir', help='Directory where output will be stored')
parser.add_argument('-p', dest='pos_dir', help='Path to the directory of the positive files')
args = parser.parse_args()
return args
def main():
args = get_args()
Pipeline(**args.__dict__).run(input_dir=args.input_dir)
if __name__ == "__main__":
main()
class Pipeline:
def __init__(self,
k,
s,
work_dir,
pos_dir,
neg_dir,
**kwargs # allows some command line arguments to be ignored
):
self.k = k
self.s = s
self.work_dir = output_dir
if not os.path.isdir(self.work_dir):
os.makedirs(self.work_dir)
self.pos_dir = pos_dir
self.neg_dir = neg_dir
def run(self, input_dir):
output_dir_list = list()
output_dir_list.append(self.step_01_gen_kmers())
output_dir_list.append(self.step_02_qc_reads_with_vsearch(input_dir=output_dir_list[-1]))
def initialize_step(self):
function_name = sys._getframe(1).f_code.co_name
log = logging.getLogger(name=function_name)
log.setLevel(logging.WARNING)
output_dir = create_output_dir(output_dir_name=function_name, parent_dir=self.work_dir, debug=self.debug)
return log, output_dir
def complete_step(self, log, output_dir):
output_dir_list = sorted(os.listdir(output_dir))
if len(output_dir_list) == 0:
raise PipelineException('ERROR: no output files in directory "{}"'.format(output_dir))
return
def step_01_gen_kmers(self):
log, output_dir = self.initialize_step()
if len(os.listdir(output_dir)) > 0:
log.warning('output directory "%s" is not empty, this step will be skipped', output_dir)
else:
pos_files = glob.glob(os.path.join(self.pos_dir, '*'))
neg_files = glob.glob(os.path.join(self.neg_dir, '*'))
log.info('Pos files: "%s"' % pos_files)
log.info('Neg files: "%s"' % neg_files)
for input_file in pos_files:
input_name = os.path.splittext(os.path.basename(input_file))[0]
output_file = os.path.join(output_dir, '%s_%dmer_%dstride' % (input_name, self.k, self.s))
seq2kmer(input_file, output_file)
glove_file = '%s_corpus'
forglove(output_file, glove_file)
for input_file in neg_files:
input_name = os.path.splittext(os.path.basename(input_file))[0]
output_file = os.path.join(output_dir, '%s_%dmer_%dstride' % (input_name, self.k, self.s))
seq2kmer(input_file, output_file)
complete_step(log, output_dir)
return output_dir
def step_02_glove(self, input_dir):
log, output_dir = self.initialize_step()
if len(os.listdir(output_dir)) > 0:
log.warning('output directory "%s" is not empty, this step will be skipped', output_dir)
else:
| [
"mattmiller899@login3.cm.cluster"
] | mattmiller899@login3.cm.cluster |
3eac202a15e7925e0dda2c104389112a52b42967 | 0bfc74145835d49068a721ad05184ced140392be | /localx.py | 8db1de647f71a98c86ae848422335effa7b4d923 | [
"MIT"
] | permissive | rencoder/Jupyter-Notebooks | 5110b4e504d261487b3ebd548232c3c2cde97353 | aa53cd7fc14b6ab9c0e0e7223a0dcb256cc8fbca | refs/heads/master | 2022-02-22T17:39:15.699113 | 2018-12-16T14:37:46 | 2018-12-16T14:37:46 | 83,718,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,895 | py | import spacy, alg
from scipy import sparse
from alg import dphs, dtls, outphs, nlp
from sklearn.metrics.pairwise import cosine_distances, euclidean_distances
from sklearn.feature_extraction.text import TfidfTransformer, TfidfVectorizer
print "Loading complete."
def glem(x):
d = nlp(unicode(x))
return " ".join((x.lemma_ for x in d
if not x.is_stop and x.is_alpha and len(x.text) > 1))
try:
d_xtr = []
for title in dtls.title.tolist():
b = alg.inxter(title,)
b = alg.get_phs(*b)
d_xtr.append("".join(b[:-1].rtw))
except IndexError, ex:
print "Warning: %s\r\n" % ex.message
v = TfidfVectorizer(token_pattern=u'(?ui)\\b\\w*[a-z]+\\w*\\b', preprocessor=glem, min_df=8, max_df=.9, norm="l2", stop_words=spacy.en.STOP_WORDS).fit(d_xtr)
d_xtrtr = v.transform(d_xtr)
def get_answer(q, upr=10):
temp_results = dtls.iloc[cosine_distances(v.transform([q]), d_xtrtr)[0].argsort()[:10]].title.tolist()
topresult = "".join(
alg.get_phs(*alg.inxter(temp_results[0]))[:-1]\
.apply(lambda x: "<p style=\"font-size:%dpx;font-family:Century Gothic;\">"
% int(x.fqs * (1.15 if x.fqs > 16 else 1.))
+ ("<b>" if "bold" in x.fqf.lower() else "")
+ x.rtw
+ ("</b>" if "bold" in x.fqf.lower() else "")
+ "</p>", axis=1))
if upr == 1:
return topresult
return [topresult] + ["".join(
alg.get_phs(*alg.inxter(resix)).pipe(lambda x: x[:min(5, x.shape[0])])\
.apply(lambda x: "<p style=\"font-size:%dpx;font-family:Century Gothic;\">"
% int(x.fqs * (1.15 if x.fqs > 16 else 1.))
+ ("<b>" if "bold" in x.fqf.lower() else "")
+ x.rtw
+ ("</b>" if "bold" in x.fqf.lower() else "")
+ "</p>", axis=1)) for resix in temp_results[1:min(upr, 10)]]
| [
"noreply@github.com"
] | rencoder.noreply@github.com |
0743a4b391e3f2f6b183b0a8e8edfc654b8a1ece | 9ce0159c84ac4e63dbd7d4f8593ff1f7a5c3ae9d | /jblog/bin/markdown_py | 47320103d1d0c96c10fa005ee678ec3206b58081 | [] | no_license | cvanderbush/cvanderbush.github.io | 8c7af21f4bbbfcbcc14bccb2d207eea883a41e45 | 89958814e8b5cd24f23981e0bfcf5f05d89f9c9a | refs/heads/master | 2021-08-14T13:33:33.615612 | 2017-11-15T21:01:50 | 2017-11-15T21:01:50 | 110,868,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | #!/Users/carl/github/cvanderbush.github.io/jblog/bin/python3.6
"""
Python Markdown, the Command Line Script
========================================
This is the command line script for Python Markdown.
Basic use from the command line:
markdown source.txt > destination.html
Run "markdown --help" to see more options.
See markdown/__init__.py for information on using Python Markdown as a module.
## Authors and License
Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and
maintained by [Yuri Takhteyev](http://www.freewisdom.org), [Waylan
Limberg](http://achinghead.com/) and [Artem Yunusov](http://blog.splyer.com).
Contact: markdown@freewisdom.org
Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
Copyright 200? Django Software Foundation (OrderedDict implementation)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see docs/LICENSE for details).
"""
if __name__ == '__main__':
from markdown.__main__ import run
run()
| [
"carl.vanderbush@gmail.com"
] | carl.vanderbush@gmail.com | |
d4c9f65b91264b74274254e5de4ecd7a15110a30 | f038f701cd9c8b6ef646c83275f4f81a5104be1a | /demo03.py | 6a6a0be001eee378ccd1993e5e4f2901b8deab46 | [] | no_license | wang-yating1995/wyt-office | 8052c96e0dc921af0c5891985dce79a3b46cb174 | 91d823201cb8fc90a4d46b470e348c95f33b7b8b | refs/heads/master | 2022-12-13T06:37:04.134118 | 2020-09-08T10:29:54 | 2020-09-08T10:29:54 | 290,373,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29 | py | from test.a import b
print(b) | [
"1103120302@qq.com"
] | 1103120302@qq.com |
a7d224c883d330dc4e25fc718ee44cf83213c069 | 2c77676a38b8fb62e7aaee85ac04c41435af7f52 | /samples/benchmarks.py | ee129006e0ab2137dd570cd49a6e27c6e5954535 | [
"MIT"
] | permissive | Azure/azure-data-lake-store-python | 6e083289b575830e9d02123c01cc5bbb7430cd98 | b44a68d213288b0caa3cc60c9517277ced93b309 | refs/heads/master | 2023-08-28T22:32:55.873164 | 2023-04-24T19:30:51 | 2023-04-24T19:30:51 | 62,902,455 | 81 | 77 | MIT | 2023-04-24T19:30:53 | 2016-07-08T16:31:32 | Python | UTF-8 | Python | false | false | 8,579 | py | from __future__ import print_function
import functools
import hashlib
import logging
import os
import shutil
import sys
import time
from azure.datalake.store import core, multithread
from azure.datalake.store.transfer import ADLTransferClient
from azure.datalake.store.utils import WIN
from tests.testing import md5sum
def benchmark(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
print('[%s] starting...' % (f.__name__))
start = time.time()
result = f(*args, **kwargs)
stop = time.time()
elapsed = stop - start
print('[%s] finished in %2.4fs' % (f.__name__, elapsed))
return result, elapsed
return wrapped
def mock_client(adl, nthreads):
def transfer(adlfs, src, dst, offset, size, buffersize, blocksize, shutdown_event=None):
pass
def merge(adlfs, outfile, files, shutdown_event=None):
pass
return ADLTransferClient(
adl,
'foo',
transfer=transfer,
merge=merge,
nthreads=nthreads)
def checksum(path):
""" Generate checksum for file/directory content """
if not os.path.exists(path):
return None
if os.path.isfile(path):
return md5sum(path)
partial_sums = []
for root, dirs, files in os.walk(path):
for f in files:
filename = os.path.join(root, f)
if os.path.exists(filename):
partial_sums.append(str.encode(md5sum(filename)))
return hashlib.md5(b''.join(sorted(partial_sums))).hexdigest()
def du(path):
""" Find total size of content used by path """
if os.path.isfile(path):
return os.path.getsize(path)
size = 0
for root, dirs, files in os.walk(path):
for f in files:
size += os.path.getsize(os.path.join(root, f))
return size
def verify(instance):
""" Confirm whether target file matches source file """
adl = instance.client._adlfs
lfile = instance.lpath
rfile = instance.rpath
print("finish w/o error:", instance.successful())
print("local file :", lfile)
if os.path.exists(lfile):
print("local file size :", du(lfile))
else:
print("local file size :", None)
print("remote file :", rfile)
if adl.exists(rfile, invalidate_cache=False):
print("remote file size:", adl.du(rfile, total=True, deep=True))
else:
print("remote file size:", None)
@benchmark
def bench_upload_1_50gb(adl, lpath, rpath, config):
return multithread.ADLUploader(
adl,
lpath=lpath,
rpath=rpath,
**config[bench_upload_1_50gb.__name__])
@benchmark
def bench_upload_50_1gb(adl, lpath, rpath, config):
return multithread.ADLUploader(
adl,
lpath=lpath,
rpath=rpath,
**config[bench_upload_50_1gb.__name__])
@benchmark
def bench_download_1_50gb(adl, lpath, rpath, config):
return multithread.ADLDownloader(
adl,
lpath=lpath,
rpath=rpath,
**config[bench_download_1_50gb.__name__])
@benchmark
def bench_download_50_1gb(adl, lpath, rpath, config):
return multithread.ADLDownloader(
adl,
lpath=lpath,
rpath=rpath,
**config[bench_download_50_1gb.__name__])
def setup_logging(level='INFO'):
""" Log only Azure messages, ignoring 3rd-party libraries """
levels = dict(
CRITICAL=logging.CRITICAL,
ERROR=logging.ERROR,
WARNING=logging.WARNING,
INFO=logging.INFO,
DEBUG=logging.DEBUG)
if level in levels:
level = levels[level]
else:
raise ValueError('invalid log level: {}'.format(level))
logging.basicConfig(
format='%(asctime)s %(name)-17s %(levelname)-8s %(message)s')
logger = logging.getLogger('azure.datalake.store')
logger.setLevel(level)
def print_summary_statistics(stats):
from statistics import mean, median, pstdev
print("benchmark min mean sd median max")
for benchmark, samples in stats.items():
if samples:
metrics = [int(round(fn(samples), 0)) for fn in [min, mean, pstdev, median, max]]
else:
metrics = [0, 0, 0, 0, 0]
print(benchmark, *metrics)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('local_path', type=str)
parser.add_argument('remote_path', type=str)
parser.add_argument('-l', '--log-level', default='INFO')
parser.add_argument('-n', '--iterations', default=1, type=int)
parser.add_argument('-q', '--quiet', dest='verbose', action='store_false')
parser.add_argument('-s', '--statistics', action='store_true')
parser.add_argument('--no-verify', dest='verify', action='store_false')
parser.add_argument('--no-checksum', dest='validate', action='store_false')
args = parser.parse_args(sys.argv[1:])
setup_logging(level=args.log_level)
adl = core.AzureDLFileSystem()
# Required setup until outstanding issues are resolved
adl.mkdir(args.remote_path)
# OS-specific settings
if WIN:
config = {
'bench_upload_1_50gb': {
'nthreads': 64,
'buffersize': 32 * 2**20,
'blocksize': 4 * 2**20
},
'bench_upload_50_1gb': {
'nthreads': 64,
'buffersize': 32 * 2**20,
'blocksize': 4 * 2**20
},
'bench_download_1_50gb': {
'nthreads': 64,
'buffersize': 32 * 2**20,
'blocksize': 4 * 2**20
},
'bench_download_50_1gb': {
'nthreads': 64,
'buffersize': 32 * 2**20,
'blocksize': 4 * 2**20
}
}
else:
config = {
'bench_upload_1_50gb': {
'nthreads': 64,
'buffersize': 4 * 2**20,
'blocksize': 4 * 2**20
},
'bench_upload_50_1gb': {
'nthreads': 64,
'buffersize': 4 * 2**20,
'blocksize': 4 * 2**20
},
'bench_download_1_50gb': {
'nthreads': 16,
'buffersize': 4 * 2**20,
'blocksize': 4 * 2**20
},
'bench_download_50_1gb': {
'nthreads': 16,
'buffersize': 4 * 2**20,
'blocksize': 4 * 2**20
}
}
for benchmark in config:
config[benchmark]['verbose'] = args.verbose
stats = {}
for _ in range(args.iterations):
# Upload/download 1 50GB files
lpath_up = os.path.join(args.local_path, '50gbfile.txt')
lpath_down = os.path.join(args.local_path, '50gbfile.txt.out')
rpath = args.remote_path + '/50gbfile.txt'
if adl.exists(rpath, invalidate_cache=False):
adl.rm(rpath)
if os.path.exists(lpath_down):
os.remove(lpath_down)
result, elapsed = bench_upload_1_50gb(adl, lpath_up, rpath, config)
if args.verify:
verify(result)
if result.successful:
stats.setdefault('up-1-50gb', []).append(elapsed)
result, elapsed = bench_download_1_50gb(adl, lpath_down, rpath, config)
if args.verify:
verify(result)
if result.successful:
stats.setdefault('down-1-50gb', []).append(elapsed)
if args.validate:
print(checksum(lpath_up), lpath_up)
print(checksum(lpath_down), lpath_down)
# Upload/download 50 1GB files
lpath_up = os.path.join(args.local_path, '50_1GB_Files')
lpath_down = os.path.join(args.local_path, '50_1GB_Files.out')
rpath = args.remote_path + '/50_1GB_Files'
if adl.exists(rpath):
adl.rm(rpath, recursive=True)
if os.path.exists(lpath_down):
shutil.rmtree(lpath_down)
result, elapsed = bench_upload_50_1gb(adl, lpath_up, rpath, config)
if args.verify:
verify(result)
if result.successful:
stats.setdefault('up-50-1gb', []).append(elapsed)
result, elapsed = bench_download_50_1gb(adl, lpath_down, rpath, config)
if args.verify:
verify(result)
if result.successful:
stats.setdefault('down-50-1gb', []).append(elapsed)
if args.validate:
print(checksum(lpath_up), lpath_up)
print(checksum(lpath_down), lpath_down)
if args.statistics:
print_summary_statistics(stats)
| [
"begoldsm@microsoft.com"
] | begoldsm@microsoft.com |
3eeb174ed996dc804605eeb4676938567646c899 | 205b602b45e9e81df515bb12234c8ac014576bdd | /풀이/1/연습문제1-1.py | 3ebf5114a231c32e31db8dc006dec049acc3fb7c | [] | no_license | DongGeon0908/python_basic_algorithm | bc1745b9d9b8c7b19a3277f2734b63d7d14a8272 | 0b29dcfb53d2b17bb630eebf9319991cd5ae69fb | refs/heads/master | 2023-01-04T18:17:47.855981 | 2020-10-12T05:30:00 | 2020-10-12T05:30:00 | 294,415,579 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | def sum(n):
output = 0
for i in range(1,1+n):
output = output + i*i
return output
print(sum(5))
| [
"noreply@github.com"
] | DongGeon0908.noreply@github.com |
0592c15f4d126bbd32ef99d56ba758dc6a24ac90 | eed7e01d9e381e84da33b11e1475bebb0afc8615 | /MNIST/NN.py | 0d863fa141e45e13fb6ed86d01364683bb8f86bf | [] | no_license | JensenQi/lab | 1f1974848a3d29dfd79b721bbb227141798fa085 | b21e7352443dac150745893b99676d6610c3e16f | refs/heads/master | 2021-06-19T23:49:49.257371 | 2017-08-02T17:00:47 | 2017-08-02T17:00:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 05 17:42:10 2014
@author: Administrator
"""
from numpy import *
from MNISTData import *
trainingData, trainingLabel, testData, testLabel = getData()
hit = 0
for sample in range(10000):
# sample = random.randint(0,10000)
testPoint = testData[sample,:]
distance = subtract(trainingData, testPoint,dtype = int)
distance = distance**2
distance = sum(distance, axis = 1)
guest = trainingLabel[argmin(distance)]
if guest == testLabel[sample]:
hit += 1
# else:
# print "sample",sample,":real",testLabel[sample],"guest", guest
print "\ntest complete!"
print hit,"hit"
print "correct rate:", (hit/100.0)*100,"%"
| [
"SuiterChik@gmail.com"
] | SuiterChik@gmail.com |
734748a7d00403f32a4378d028e322462aeeabe3 | 09cead98874a64d55b9e5c84b369d3523c890442 | /py200421_python2/day14_py200606/tuple_1.py | c02f948ae526bf8546174abec4408a4458357833 | [] | no_license | edu-athensoft/stem1401python_student | f12b404d749286036a090e941c0268381ce558f8 | baad017d4cef2994855b008a756758d7b5e119ec | refs/heads/master | 2021-08-29T15:01:45.875136 | 2021-08-24T23:03:51 | 2021-08-24T23:03:51 | 210,029,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py | """
tuple
read-only list
"""
# create a tuple
my_tuple1 = (1, 2, 3, 4, 5, 6, 7, 8)
print(my_tuple1)
my_tuple2 = ()
print(my_tuple2)
# create a tuple with only one element
my_tuple3 = (1)
print(my_tuple3)
my_tuple3 = ('abc')
print(my_tuple3)
my_tuple3 = 1
my_tuple3 = (1,)
print(my_tuple3)
# create nested tuple
my_tuple4 = (1, 2, 3)
print(my_tuple4)
my_tuple4 = (('a','b'), 2, ('c','d'))
print(my_tuple4)
my_tuple4 = (('a','b'), ('c','d'), ('c','d'))
print(my_tuple4)
# create mix tuple
my_tuple5 = (['a','b'], ('c','d'), ('c','d'))
my_tuple5 = (['a','b'], ([1,2],'d'), ('c','d'))
# compare
# student profile collection
# pre-set scouting path
a = [(), (), ()]
# saving-slot in a game
b = ([], [], [])
# create a tuple by auto-packing
my_tuple = 1,2,'a'
print(my_tuple, type(my_tuple))
# unpacking
x, y, z = my_tuple
print(x)
print(y)
print(z)
| [
"lada314@gmail.com"
] | lada314@gmail.com |
ca522f202ed49ef06a6e38688ef772194e821554 | 31f8ca4db5f4d1cdb31cdd9d7ec509e97ec4596a | /plgx-esp/polylogyx/utils.py | 3aaf97a6ae948efca093c194a7d8dd4a0e54a74f | [
"MIT"
] | permissive | preetpoly/plgx-esp | 076b13c1d9804b0a354d6304cb5945cbc4105d80 | ef03fbec2f875cc7a84db5eb2e0972c614747a9d | refs/heads/master | 2020-09-11T02:36:44.699716 | 2019-11-14T10:17:33 | 2019-11-14T10:17:33 | 221,913,093 | 0 | 0 | MIT | 2019-11-15T11:45:06 | 2019-11-15T11:45:06 | null | UTF-8 | Python | false | false | 22,746 | py | # -*- coding: utf-8 -*-
import ast
import datetime as dt
import json
import sqlite3
import string
import threading
import uuid
from collections import namedtuple
import jinja2
from jinja2 import Markup, Template
from operator import itemgetter
from os.path import basename, join, splitext
from flask_mail import Message, Mail
from sqlalchemy import func
from polylogyx.plugins import AbstractAlerterPlugin
from polylogyx.constants import DEFAULT_PLATFORMS, PolyLogyxServerDefaults, public_server
import pkg_resources
import requests
import six
from flask import current_app, flash, render_template
from polylogyx.database import db
from polylogyx.models import (
DistributedQuery, DistributedQueryTask,
Node, Pack, Query, ResultLog, querypacks,
Options, Tag, DefaultQuery, DefaultFilters, StatusLog)
Field = namedtuple('Field', ['name', 'action', 'columns', 'timestamp','uuid'])
# Read DDL statements from our package
schema = pkg_resources.resource_string('polylogyx', join('resources', 'osquery_schema.sql'))
schema = schema.decode('utf-8')
schema = [x for x in schema.strip().split('\n') if not x.startswith('--')]
# SQLite in Python will complain if you try to use it from multiple threads.
# We create a threadlocal variable that contains the DB, lazily initialized.
osquery_mock_db = threading.local()
def assemble_configuration(node):
platform = node.platform
if platform not in DEFAULT_PLATFORMS:
platform = 'linux'
platform_filter = DefaultFilters.query.filter(DefaultFilters.platform == platform).first()
configuration = {}
if platform_filter and platform_filter.filters:
configuration = platform_filter.filters
configuration['options'] = assemble_options(node)
configuration['file_paths'] = assemble_file_paths(node)
configuration['schedule'] = assemble_schedule(node)
configuration['packs'] = assemble_packs(node)
return configuration
def assemble_options(node):
options = {'disable_watchdog': True, 'logger_tls_compress': True}
# https://github.com/facebook/osquery/issues/2048#issuecomment-219200524
if current_app.config['POLYLOGYX_EXPECTS_UNIQUE_HOST_ID']:
options['host_identifier'] = 'uuid'
else:
options['host_identifier'] = 'hostname'
options['schedule_splay_percent'] = 10
existing_option = Options.query.filter(Options.name == PolyLogyxServerDefaults.plgx_config_all_options).first()
if existing_option:
existing_option_value = json.loads(existing_option.option)
options = merge_two_dicts(options, existing_option_value)
return options
def assemble_file_paths(node):
file_paths = {}
for file_path in node.file_paths.options(db.lazyload('*')):
file_paths.update(file_path.to_dict())
return file_paths
def assemble_schedule(node):
schedule = {}
for query in node.queries.options(db.lazyload('*')):
schedule[query.name] = query.to_dict()
platform = node.platform
if platform not in DEFAULT_PLATFORMS:
platform = 'linux'
for default_query in DefaultQuery.query.filter(DefaultQuery.status == True).filter(
DefaultQuery.platform == platform).all():
schedule[default_query.name] = default_query.to_dict()
return schedule
def assemble_packs(node):
packs = {}
for pack in node.packs.join(querypacks).join(Query) \
.options(db.contains_eager(Pack.queries)).all():
packs[pack.name] = pack.to_dict()
return packs
def assemble_distributed_queries(node):
'''
Retrieve all distributed queries assigned to a particular node
in the NEW state. This function will change the state of the
distributed query to PENDING, however will not commit the change.
It is the responsibility of the caller to commit or rollback on the
current database session.
'''
now = dt.datetime.utcnow()
pending_query_count = 0
query_recon_count = db.session.query(db.func.count(DistributedQueryTask.id)) \
.filter(
DistributedQueryTask.node == node,
DistributedQueryTask.status == DistributedQueryTask.NEW,
DistributedQueryTask.priority == DistributedQueryTask.HIGH,
)
for r in query_recon_count:
pending_query_count = r[0]
if pending_query_count > 0:
query = db.session.query(DistributedQueryTask) \
.join(DistributedQuery) \
.filter(
DistributedQueryTask.node == node,
DistributedQueryTask.status == DistributedQueryTask.NEW,
DistributedQuery.not_before < now,
DistributedQueryTask.priority == DistributedQueryTask.HIGH,
).options(
db.lazyload('*'),
db.contains_eager(DistributedQueryTask.distributed_query)
)
else:
query = db.session.query(DistributedQueryTask) \
.join(DistributedQuery) \
.filter(
DistributedQueryTask.node == node,
DistributedQueryTask.status == DistributedQueryTask.NEW,
DistributedQuery.not_before < now,
DistributedQueryTask.priority == DistributedQueryTask.LOW,
).options(
db.lazyload('*'),
db.contains_eager(DistributedQueryTask.distributed_query)
).limit(1)
queries = {}
for task in query:
if task.sql:
queries[task.guid] = task.sql
else:
queries[task.guid] = task.distributed_query.sql
task.update(status=DistributedQueryTask.PENDING,
timestamp=now,
commit=False)
# add this query to the session, but don't commit until we're
# as sure as we possibly can be that it's been received by the
# osqueryd client. unfortunately, there are no guarantees though.
db.session.add(task)
return queries
def merge_two_dicts(x, y):
if not x:
x = {}
if not y:
y = {}
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
def create_query_pack_from_upload(upload):
'''
Create a pack and queries from a query pack file. **Note**, if a
pack already exists under the filename being uploaded, then any
queries defined here will be added to the existing pack! However,
if a query with a particular name already exists, and its sql is
NOT the same, then a new query with the same name but different id
will be created (as to avoid clobbering the existing query). If its
sql is identical, then the query will be reused.
'''
# The json package on Python 3 expects a `str` input, so we're going to
# read the body and possibly convert to the right type
body = upload.data.read()
if not isinstance(body, six.string_types):
body = body.decode('utf-8')
try:
data = json.loads(body)
except ValueError:
flash(u"Could not load pack as JSON - ensure it is JSON encoded",
'danger')
return None
else:
if 'queries' not in data:
flash(u"No queries in pack", 'danger')
return None
name = splitext(basename(upload.data.filename))[0]
pack = Pack.query.filter(Pack.name == name).first()
if not pack:
current_app.logger.debug("Creating pack %s", name)
pack = Pack.create(name=name, **data)
for query_name, query in data['queries'].items():
if not validate_osquery_query(query['query']):
flash('Invalid osquery query: "{0}"'.format(query['query']), 'danger')
return None
q = Query.query.filter(Query.name == query_name).first()
if not q:
q = Query.create(name=query_name, **query)
pack.queries.append(q)
current_app.logger.debug("Adding new query %s to pack %s",
q.name, pack.name)
continue
if q in pack.queries:
continue
if q.sql == query['query']:
current_app.logger.debug("Adding existing query %s to pack %s",
q.name, pack.name)
pack.queries.append(q)
else:
q2 = Query.create(name=query_name, **query)
current_app.logger.debug(
"Created another query named %s, but different sql: %r vs %r",
query_name, q2.sql.encode('utf-8'), q.sql.encode('utf-8'))
pack.queries.append(q2)
else:
pack.save()
flash(u"Imported query pack {0}".format(pack.name), 'success')
return pack
def get_node_health(node):
checkin_interval = current_app.config['POLYLOGYX_CHECKIN_INTERVAL']
if isinstance(checkin_interval, (int, float)):
checkin_interval = dt.timedelta(seconds=checkin_interval)
if (dt.datetime.utcnow() - node.last_checkin) > checkin_interval:
return u'danger'
else:
return ''
# Not super-happy that we're duplicating this both here and in the JS, but I
# couldn't think of a nice way to pass from JS --> Python (or the other
# direction).
PRETTY_OPERATORS = {
'equal': 'equals',
'not_equal': "doesn't equal",
'begins_with': 'begins with',
'not_begins_with': "doesn't begins with",
'contains': 'contains',
'not_contains': "doesn't contain",
'ends_with': 'ends with',
'not_ends_with': "doesn't end with",
'is_empty': 'is empty',
'is_not_empty': 'is not empty',
'less': 'less than',
'less_or_equal': 'less than or equal',
'greater': 'greater than',
'greater_or_equal': 'greater than or equal',
'matches_regex': 'matches regex',
'not_matches_regex': "doesn't match regex",
}
def pretty_operator(cond):
return PRETTY_OPERATORS.get(cond, cond)
PRETTY_FIELDS = {
'query_name': 'Query name',
'action': 'Action',
'host_identifier': 'Host identifier',
'timestamp': 'Timestamp',
}
def pretty_field(field):
return PRETTY_FIELDS.get(field, field)
_js_escapes = {
'\\': '\\u005C',
'\'': '\\u0027',
'"': '\\u0022',
'>': '\\u003E',
'<': '\\u003C',
'&': '\\u0026',
'=': '\\u003D',
'-': '\\u002D',
';': '\\u003B',
u'\u2028': '\\u2028',
u'\u2029': '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update(('%c' % z, '\\u%04X' % z) for z in range(32))
def jinja2_escapejs_filter(value):
retval = []
if not value:
return ''
else:
for letter in value:
if letter in _js_escapes.keys():
retval.append(_js_escapes[letter])
else:
retval.append(letter)
return jinja2.Markup("".join(retval))
# Since 'string.printable' includes control characters
PRINTABLE = string.ascii_letters + string.digits + string.punctuation + ' '
def quote(s, quote='"'):
buf = [quote]
for ch in s:
if ch == quote or ch == '\\':
buf.append('\\')
buf.append(ch)
elif ch == '\n':
buf.append('\\n')
elif ch == '\r':
buf.append('\\r')
elif ch == '\t':
buf.append('\\t')
elif ch in PRINTABLE:
buf.append(ch)
else:
# Hex escape
buf.append('\\x')
buf.append(hex(ord(ch))[2:])
buf.append(quote)
return ''.join(buf)
def _carve(string):
return str(string).title()
def create_mock_db():
mock_db = sqlite3.connect(':memory:')
mock_db.create_function("carve", 1, _carve)
for ddl in schema:
mock_db.execute(ddl)
cursor = mock_db.cursor()
cursor.execute("SELECT name,sql FROM sqlite_master WHERE type='table';")
from polylogyx.constants import PolyLogyxServerDefaults
extra_schema = current_app.config.get('POLYLOGYX_EXTRA_SCHEMA', [])
for ddl in extra_schema:
mock_db.execute(ddl)
for osquery_table in cursor.fetchall():
PolyLogyxServerDefaults.POLYLOGYX_OSQUERY_SCHEMA_JSON[osquery_table[0]] = osquery_table[1]
return mock_db
def create_tags(*tags):
values = []
existing = []
# create a set, because we haven't yet done our association_proxy in
# sqlalchemy
for value in (v.strip() for v in set(tags) if v.strip()):
tag = Tag.query.filter(Tag.value == value).first()
if not tag:
values.append(Tag.create(value=value))
else:
existing.append(tag)
else:
if values:
flash(u"Created tag{0} {1}".format(
's' if len(values) > 1 else '',
', '.join(tag.value for tag in values)),
'info')
return values + existing
def validate_osquery_query(query):
# Check if this thread has an instance of the SQLite database
db = getattr(osquery_mock_db, 'db', None)
if db is None:
db = create_mock_db()
osquery_mock_db.db = db
try:
db.execute(query)
except sqlite3.Error:
current_app.logger.exception("Invalid query: %s", query)
return False
return True
def learn_from_result(result, node):
if not result['data']:
return
capture_columns = set(
map(itemgetter(0),
current_app.config['POLYLOGYX_CAPTURE_NODE_INFO']
)
)
if not capture_columns:
return
node_info = node.get('node_info', {})
orig_node_info = node_info.copy()
for _, action, columns, _, _, in extract_results(result):
# only update columns common to both sets
for column in capture_columns & set(columns):
cvalue = node_info.get(column) # current value
value = columns.get(column)
if action == 'removed' and (cvalue is None or cvalue != value):
pass
elif action == 'removed' and cvalue == value:
node_info.pop(column)
elif action == 'added' and (cvalue is None or cvalue != value):
node_info[column] = value
# only update node_info if there's actually a change
if orig_node_info == node_info:
return
node = Node.get_by_id(node['id'])
node.update(node_info=node_info)
return
def process_result(result, node):
if not result['data']:
return
result_logs=[]
subject_dn = []
for name, action, columns, timestamp,uuid in extract_results(result):
if 'subject_dn' in columns and columns['subject_dn'] and columns['subject_dn'] != '':
subject_dn.append(columns['subject_dn'])
else:
result_logs.append( ResultLog(name=name,uuid=uuid, action=action, columns=columns, timestamp=timestamp, node_id=node['id']))
return result_logs
def extract_results(result):
"""
extract_results will convert the incoming log data into a series of Fields,
normalizing and/or aggregating both batch and event format into batch
format, which is used throughout the rest of polylogyx.
"""
if not result['data']:
return
timefmt = '%a %b %d %H:%M:%S %Y UTC'
strptime = dt.datetime.strptime
for entry in result['data']:
if 'uuid' not in entry:
entry['uuid']=str(uuid.uuid4())
name = entry['name']
timestamp = strptime(entry['calendarTime'], timefmt)
if 'columns' in entry:
yield Field(name=name,
action=entry['action'],
columns=entry['columns'],
timestamp=timestamp,uuid=entry['uuid'])
elif 'diffResults' in entry:
added = entry['diffResults']['added']
removed = entry['diffResults']['removed']
for (action, items) in (('added', added), ('removed', removed)):
# items could be "", so we're still safe to iter over
# and ensure we don't return an empty value for columns
for columns in items:
yield Field(name=name,
action=action,
columns=columns,
timestamp=timestamp,uuid=entry['uuid'])
elif 'snapshot' in entry:
for columns in entry['snapshot']:
yield Field(name=name,
action='snapshot',
columns=columns,
timestamp=timestamp,uuid=entry['uuid'])
else:
current_app.logger.error("Encountered a result entry that "
"could not be processed! %s",
json.dumps(entry))
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, dt.datetime):
return o.isoformat()
return json.JSONEncoder.default(self, o)
def render_column(value, column):
renders = current_app.config.get('DOORMAN_COLUMN_RENDER', {})
if column not in renders:
return value
template = renders[column]
try:
if callable(template):
return template(value)
else:
template = Template(template, autoescape=True)
rendered = template.render(value=value)
# return a markup object so that the template where this is
# rendered is not escaped again
return Markup(rendered)
except Exception:
current_app.logger.exception(
"Failed to render %s, returning original value",
column
)
return value
class Serializer(object):
@staticmethod
def serialize(object):
return json.dumps(object, default=lambda o: o.__dict__.values()[0])
def check_and_save_intel_alert(scan_type, scan_value, data, source, severity):
from polylogyx.models import Alerts, db
result_logs = ResultLog.query.filter(ResultLog.columns[scan_type].astext == scan_value).all()
for result_log in result_logs:
alert_count = db.session.query(func.count(Alerts.id)).filter(Alerts.source == source).filter(
Alerts.result_log_uid == result_log.uuid).scalar()
if alert_count == 0:
save_intel_alert(data=data, source=source, severity=severity, query_name=result_log.name,
uuid=result_log.uuid, columns=result_log.columns, node_id=result_log.node_id)
def save_intel_alert(data, source, severity, query_name, uuid, columns, node_id):
from polylogyx.models import Alerts
alert = Alerts.create(message=columns, query_name=query_name, result_log_uid=uuid,
node_id=node_id,
rule_id=None, type=Alerts.THREAT_INTEL, source=source, source_data=data, recon_queries={},
severity=severity)
from polylogyx.rules import IntelMatch
intel = {'type': Alerts.THREAT_INTEL, 'source': source, 'severity': severity, 'query_name': query_name}
json_data = ""
if data:
json_data = json.dumps(data)
intel_match = IntelMatch(intel=intel,
result=columns, data=json_data, alert_id=alert.id,
node=node_id)
node = db.session.query(Node).filter(Node.id == node_id).first().to_dict()
send_alert(node, None, intel_match)
def send_alert(node, rule_match, intel_match):
from polylogyx.models import current_app as app
alerters = app.config.get('POLYLOGYX_ALERTER_PLUGINS', {})
for name, (plugin, config) in alerters.items():
package, classname = plugin.rsplit('.', 1)
from importlib import import_module
module = import_module(package)
klass = getattr(module, classname, None)
if klass is None:
raise ValueError('Could not find a class named "{0}" in package "{1}"'.format(classname, package))
if not issubclass(klass, AbstractAlerterPlugin):
raise ValueError('{0} is not a subclass of AbstractAlerterPlugin'.format(name))
alerters[name] = klass(config)
for alerter in alerters:
try:
alerters[alerter].handle_alert(node, rule_match, intel_match)
except Exception as e:
current_app.logger.error(e)
def flatten_json(input):
output = dict(input)
if 'columns' in output:
for key, value in output['columns'].items():
output[key] = value
output.pop('columns', None)
return output
def append_node_information_to_result_log(node, input):
output = dict(input)
try:
output['platform'] = node['platform']
output['last_checkin'] = node['last_checkin']
output['is_active'] = node['is_active']
output['last_ip'] = node['last_ip']
if 'os_info' in node:
os_info = node['os_info']
output['osname'] = os_info.get("name", "")
output['version'] = os_info['version']
if 'node_info' in node:
node_info = node['node_info']
output['computername'] = node_info['computer_name']
output['hardware_model'] = node_info['hardware_model']
output['hardware_vendor'] = node_info['hardware_vendor']
output['cpu_physical_cores'] = node_info['cpu_physical_cores']
except Exception as e:
current_app.logger.error(e)
return output
def append_node_and_rule_information_to_alert(node, input):
output = dict(input)
try:
output['platform'] = node['platform']
output['is_active'] = node['is_active']
output['last_ip'] = node['last_ip']
output['platform'] = node['platform']
if 'os_info' in node:
os_info = node['os_info']
output['osname'] = os_info['name']
if 'network_info' in node:
network_info = node['network_info']
output['macaddress'] = network_info['mac_address']
if 'node_info' in node:
node_info = node['node_info']
output['computername'] = node_info['computer_name']
output['hardware_model'] = node_info['hardware_model']
except Exception as e:
print(e)
return output
def extract_result_logs(result):
"""
extract_results will convert the incoming log data into a series of Fields,
normalizing and/or aggregating both batch and event format into batch
format, which is used throughout the rest of polylogyx.
"""
Field = namedtuple('Field', ['name', 'action', 'columns', 'timestamp', 'uuid','node_id'])
for data in result:
if not data.uuid:
data.uuid = str(uuid.uuid4())
yield Field(name=data.name,
action=data.action,
columns=data.columns,
timestamp=data.timestamp, uuid=data.uuid, node_id=data.node_id)
| [
"moulik@polylogyx.com"
] | moulik@polylogyx.com |
3348fdcc7921ae5ff3b1d096abd5c91d23e56d2c | 287d35c59665030109ad91c8872cbd356056810a | /models/soil/deliverables.py | a25627e4832fe0dfd26ad5ad70d096f359f8084f | [] | no_license | nicetech-creator/agritecture-v2 | d63573eb6e0e1abee8c157893c54c1f1ad243106 | 74f6f1bcc72dd48c67691ae7bb99dd34651660c4 | refs/heads/master | 2023-03-17T12:22:15.892900 | 2021-03-17T18:30:20 | 2021-03-17T18:30:20 | 348,546,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,464 | py | from connection import fetch_one, fetch_all
from cachetools.func import ttl_cache
from caching import hashable_cache
from util import parse_inputs, preload_outputs, quantize_outputs, set_if_unset
import math
from models.soil.input.land_status import Soil_Input_Land_Status
from models.soil.sizing.area import Soil_Sizing_Area
from models.soil.crop.characteristics import Soil_Crop_Characteristics
from models.soil.crop.crop import Soil_Crop
from models.soil.wastage.wastage import Soil_Wastage
from models.soil.opex.overview import Soil_Opex_Overview
from models.soil.capex.overview import Soil_Capex_Overview
from models.financials.interest import Financials_Interest
from models.financials.main import Financials_Main
from models.financials.employees import Financials_Employees
from models.financials.population_fed import Financials_Population_Fed
class Soil_Deliverables:
@staticmethod
def info():
return {
"name": "soil_deliverables",
"description": "Deliverables for Soil"
}
@staticmethod
def input_type():
return {
"latitude": {
"description": "Geographical latitude",
"type": "decimal"
},
"longitude": {
"description": "Geographical longitude",
"type": "decimal"
},
"land status": {
"type": "choice",
"description": "Selection land status",
"default": 1
},
"land cost": {
"description": "Land cost per area ($ / sqft)",
"type": "decimal"
},
"owner is headgrower": {
"description": "Whether owner counts as headgrower",
"type": "bool",
"default": True
},
"headGrowerSalary": {
"description": "Salary for headgrower ($ / y)",
"default": 60000,
"type": "decimal"
},
"grower experience": {
"type": "choice",
"description": "Level of grower experience",
"default": 1
},
"site area": {
"description": "Total area to be allocated (sqft)",
"type": "decimal"
},
"crops": {
"type": "list",
"description": "Selection of crops to compute",
"element type": {
"id": {
"type": "choice",
"description": "Selected crop item",
"primary key": True
},
"system fraction": {
"description": "Fraction of bedspace allocated to crop",
"type": "decimal"
},
"sale unit id": {
"type": "choice",
"description": "Selected sale unit for crop item"
},
"price per unit": {
"description": "Sale price for crop per selected unit",
"type": "decimal"
}
}
},
"organic production": {
"description": "Whether the system will use organic production",
"type": "bool",
"default": False
},
"water cost": {
"description": "Water cost ($ / gallon)",
"default": 0.002,
"type": "decimal"
},
"labor wages": {
"description": "Labor wages ($ / h)",
"default": 15.73,
"type": "decimal"
},
"rent cost": {
"description": "Rent cost per area ($ / sqft month)",
"default": 12,
"type": "decimal"
},
"tax rate": {
"description": "Tax rate (%)",
"default": 0.261,
"type": "decimal"
},
"financing option": {
"type": "choice",
"description": "How capital expenses are financed",
"default": 1
},
"interest rate": {
"description": "Loan interest rate (% / y)",
"default": 0.08,
"type": "decimal"
},
"repayment time": {
"description": "Repayment period for loan (y)",
"default": 7,
"type": "decimal"
}
}
@staticmethod
@hashable_cache(ttl_cache())
def input_values(input_name):
if input_name == "land status":
return Soil_Input_Land_Status.input_values("land status")
if input_name == "grower experience":
return Soil_Wastage.input_values("grower experience")
if input_name == "crops.id":
return fetch_all("SELECT id, crop_type FROM crops ORDER BY id")
if input_name == "crops.sale unit id":
return fetch_all(
"SELECT id, description FROM sale_units ORDER BY id")
if input_name == "financing option":
return [{'id': 1, 'description': 'Debt'}, {
'id': 2, 'description': 'Equity'}, {'id': 3, 'description': 'Self-funded'}]
raise ValueError("Unexpected input: " + str(input_name))
@staticmethod
def output_type():
return {
"capex breakout": {
"type": "list",
"description": "Capex breakout",
"element type": {
"category": {
"type": "text",
"description": "Category"
},
"total price": {
"description": "Total price ($)",
"digits": 2,
"type": "decimal"
},
"fraction": {
"description": "% of total price (%)",
"digits": 4,
"type": "decimal"
}
}
},
"cogs & opex breakout": {
"type": "list",
"description": "COGS & Opex breakout",
"element type": {
"category": {
"type": "text",
"description": "Category"
},
"total price": {
"description": "Total price ($)",
"digits": 2,
"type": "decimal"
},
"fraction": {
"description": "% of total price (%)",
"digits": 4,
"type": "decimal"
}
}
},
"cogs breakout": {
"type": "list",
"description": "COGS breakout",
"element type": {
"category": {
"type": "text",
"description": "Category"
},
"total price": {
"description": "Total price ($)",
"digits": 2,
"type": "decimal"
}
}
},
"opex breakout": {
"type": "list",
"description": "Opex breakout",
"element type": {
"category": {
"type": "text",
"description": "Category"
},
"total price": {
"description": "Total price ($)",
"digits": 2,
"type": "decimal"
}
}
},
"max total yield": {
"description": "Total Yield without Wastage (lbs)",
"digits": 0,
"type": "decimal"
},
"bedspace": {
"description": "Total Bedspace needed (sqft)",
"digits": 0,
"type": "decimal"
},
"annual summary": {
"type": "list",
"description": "Annual summary",
"element type": {
"year": {
"type": "integer",
"description": "Year"
},
"wastage": {
"description": "Wastage",
"digits": 4,
"type": "decimal"
},
"total yield": {
"description": "Total Yield (lbs)",
"digits": 0,
"type": "decimal"
},
"crop yield": {
"description": "Yield per each crop",
"type": "list",
"element type": {
"crop type": {
"type": "string",
"description": "crop name"
},
"yield" : {
"digits": 0,
"type": "decimal"
}
}
},
"waste-adjusted revenue": {
"description": "Waste-adjusted Revenue ($)",
"digits": 0,
"type": "decimal"
},
"cogs": {
"description": "COGS ($)",
"digits": 0,
"type": "decimal"
},
"opex": {
"description": "Opex ($)",
"digits": 0,
"type": "decimal"
},
"ebitda": {
"description": "EBITDA ($)",
"digits": 0,
"type": "decimal"
},
"depreciation & amortization": {
"description": "Depreciation & Amortization ($)",
"digits": 0,
"type": "decimal"
},
"interest payment": {
"description": "Interest Payment ($)",
"digits": 0,
"type": "decimal"
},
"taxes": {
"description": "Taxes ($)",
"digits": 0,
"type": "decimal"
},
"net profit": {
"description": "Net Profit ($)",
"digits": 0,
"type": "decimal"
}
}
},
"cash flow": {
"type": "list",
"description": "Cash flow",
"element type": {
"year": {
"type": "integer",
"description": "Year"
},
"net profit": {
"description": "Net Profit ($)",
"digits": 2,
"type": "decimal"
},
"d&a": {
"description": "Plus: D&A ($)",
"digits": 2,
"type": "decimal"
},
"net changes in working capital": {
"description": "Plus: Net Changes in Working Capital ($)",
"digits": 2,
"type": "decimal"
},
"free cash flow": {
"description": "Free Cash Flow ($)",
"digits": 2,
"type": "decimal"
},
"remaining balance": {
"description": "Remaining balance ($)",
"digits": 2,
"type": "decimal"
}
}
},
"pv of cash flows": {
"description": "PV of Cash Flows ($)",
"digits": 2,
"type": "decimal"
},
"payback period": {
"description": "Payback period (y)",
"digits": 2,
"type": "decimal"
},
"financial summary": {
"type": "list",
"description": "Financial summary",
"element type": {
"capex": {
"digits": 0,
"description": "Capex",
"type": "decimal"
},
"opex": {
"digits": 0,
"description": "Opex",
"type": "decimal"
},
"cogs": {
"digits": 0,
"description": "COGS",
"type": "decimal"
},
"opex + cogs": {
"digits": 0,
"description": "Opex + COGS",
"type": "decimal"
},
"max annual revenue": {
"digits": 0,
"description": "Max. Annual Revenue",
"type": "decimal"
},
"payback period": {
"digits": 2,
"description": "Payback Period (in years)",
"type": "decimal"
}
}
},
"operating summary": {
"type": "list",
"description": "10-year operating summary",
"element type": {
"year": {
"type": "integer",
"description": "Year"
},
"wastage": {
"digits": 4,
"description": "Wastage",
"type": "decimal"
},
"waste-adjusted revenue": {
"digits": 0,
"description": "Wastage-adjusted Revenue",
"type": "decimal"
},
"ebitda": {
"digits": 0,
"description": "EBITDA",
"type": "decimal"
},
"ebitda margin": {
"digits": 4,
"description": "EBITDA Margin",
"type": "decimal"
},
"net profit": {
"digits": 0,
"description": "Net Profit",
"type": "decimal"
},
"net margin": {
"digits": 4,
"description": "Net Margin",
"type": "decimal"
}
}
},
"number of people employed": {
"description": "Estimated number of people employed",
"type": "text"
},
"potential population fed": {
"description": "Estimated number of people fed",
"digits": 0,
"type": "decimal"
}
}
@staticmethod
@hashable_cache(ttl_cache())
def compute(args, quantize_output=False):
result = dict()
inputs = parse_inputs(Soil_Deliverables.input_type(), args)
output_type = Soil_Deliverables.output_type()
preload_result = preload_outputs(output_type, inputs)
for key in preload_result:
result[key] = preload_result[key]
def print_debug(model_name):
print(model_name, result[model_name])
# First, translate inputs into land status
args_soil_input_land_status = dict()
args_soil_input_land_status["land status"] = inputs["land status"]
result["soil_input_land_status"] = Soil_Input_Land_Status.compute(
args_soil_input_land_status)
print_debug('soil_input_land_status')
# Compute sizes for models
args_soil_sizing_area = dict()
args_soil_sizing_area["area"] = inputs["site area"]
result["soil_sizing_area"] = Soil_Sizing_Area.compute(
args_soil_sizing_area)
print_debug('soil_sizing_area')
# Compute crop characteristics
args_soil_crop_characteristics = dict()
args_soil_crop_characteristics["crops"] = inputs["crops"]
result["soil_crop_characteristics"] = Soil_Crop_Characteristics.compute(
args_soil_crop_characteristics)
print_debug('soil_crop_characteristics')
# Compute crop
args_soil_crop = dict()
args_soil_crop["latitude"] = inputs["latitude"]
args_soil_crop["longitude"] = inputs["longitude"]
args_soil_crop["bedspace"] = result["soil_sizing_area"]["main bedspace"]
args_soil_crop["crops"] = inputs["crops"]
result["soil_crop"] = Soil_Crop.compute(args_soil_crop)
print_debug('soil_crop')
set_if_unset(
result,
"max total yield",
result["soil_crop"]["total yield"])
set_if_unset(
result,
"bedspace",
result["soil_sizing_area"]["main bedspace"])
# Compute wastage projection based on inputs and selected crops
args_soil_wastage = dict()
args_soil_wastage["grower experience"] = inputs["grower experience"]
args_soil_wastage["organic production"] = inputs["organic production"]
args_soil_wastage["crop base difficulty"] = result["soil_crop_characteristics"]["combined difficulty factor"]
args_soil_wastage["number of crops"] = len(inputs["crops"])
result["soil_wastage"] = Soil_Wastage.compute(args_soil_wastage)
print_debug('soil_wastage')
# Compute capital expenditures
args_soil_capex_overview = dict()
args_soil_capex_overview["land area"] = inputs["site area"]
args_soil_capex_overview["nursery trays"] = math.ceil(
result["soil_crop"]["total plant sites"] / 128)
args_soil_capex_overview["real estate cost"] = inputs["site area"] * \
inputs["land cost"] if result["soil_input_land_status"]["is buy"] else 0
args_soil_capex_overview["seeding equipment cost"] = 1500 if result["soil_crop"]["total plant sites"] > 4000 else 0
result["soil_capex_overview"] = Soil_Capex_Overview.compute(
args_soil_capex_overview)
print_debug('soil_capex_overview')
def get_capex_cost(description):
for item in result['soil_capex_overview']['line items']:
if item['description'].startswith(description):
return item['cost']
return None
# Compute operating expenses
args_soil_opex_overview = dict()
args_soil_opex_overview["bedspace"] = result["soil_sizing_area"]["main bedspace"]
args_soil_opex_overview["rent area"] = inputs["site area"] if result["soil_input_land_status"]["is rent"] else 0
args_soil_opex_overview["annual number of plant sites"] = result["soil_crop"]["total plant sites"]
args_soil_opex_overview["water used"] = result["soil_crop"]["total water use"]
args_soil_opex_overview["cold storage capex"] = get_capex_cost(
'Cold Storage')
args_soil_opex_overview["tools capex"] = get_capex_cost('Tools')
args_soil_opex_overview["transportation capex"] = get_capex_cost(
'Transportation')
args_soil_opex_overview["harvest capex"] = get_capex_cost('Harvesting')
args_soil_opex_overview["water cost"] = inputs["water cost"]
args_soil_opex_overview["labor wages"] = inputs["labor wages"]
args_soil_opex_overview["rent cost"] = 12 * inputs["rent cost"]
args_soil_opex_overview["owner is headgrower"] = inputs["owner is headgrower"]
args_soil_opex_overview["headgrower salary"] = inputs["headGrowerSalary"]
args_soil_opex_overview["packaging type"] = 1
args_soil_opex_overview["revenue"] = result["soil_crop"]["total revenue"] * (
1 - result["soil_wastage"]["initial wastage"])
args_soil_opex_overview["depreciation"] = result["soil_capex_overview"]["total depreciation"]
result["soil_opex_overview"] = Soil_Opex_Overview.compute(
args_soil_opex_overview)
print_debug('soil_opex_overview')
# Compute repayment schedule for debt
# Financing amount is 0 if funding option is not debt
args_financials_interest = dict()
args_financials_interest["amount"] = result["soil_capex_overview"]["total cost"] if inputs["financing option"] == 1 else 0
args_financials_interest["interest rate"] = inputs["interest rate"]
args_financials_interest["repayment time"] = inputs["repayment time"]
args_financials_interest["payments per year"] = 4
result["financials_interest"] = Financials_Interest.compute(
args_financials_interest)
print_debug('financials_interest')
# Combine outputs into financial model
args_financials_main = dict()
args_financials_main["capex line items"] = result["soil_capex_overview"]["line items"]
args_financials_main["opex line items"] = result["soil_opex_overview"]["line items"]
args_financials_main["wastage schedule"] = result["soil_wastage"]["wastage schedule"]
args_financials_main["crops"] = result["soil_crop"]["crops"]
args_financials_main["base yield"] = result["soil_crop"]["total yield"]
args_financials_main["base revenue"] = result["soil_crop"]["total revenue"]
args_financials_main["depreciation"] = result["soil_capex_overview"]["total depreciation"]
args_financials_main["interest repayment schedule"] = result["financials_interest"]["repayment schedule"]
args_financials_main["tax rate"] = inputs["tax rate"]
result["financials_main"] = Financials_Main.compute(
args_financials_main)
print_debug('financials_main')
# Output number of employees
args_financials_employees = dict()
args_financials_employees["total staff"] = result["soil_opex_overview"]["total staff"]
result["financials_employees"] = Financials_Employees.compute(
args_financials_employees)
print_debug('financials_employees')
# Output population fed
args_financials_population_fed = dict()
args_financials_population_fed["total yield"] = result["soil_crop"]["total yield"]
result["financials_population_fed"] = Financials_Population_Fed.compute(
args_financials_population_fed)
print_debug('financials_population_fed')
# Output financial model results
set_if_unset(
result,
"number of people employed",
result["financials_employees"]["number of people employed"])
set_if_unset(
result,
"potential population fed",
result["financials_population_fed"]["potential population fed"])
set_if_unset(
result,
"capex breakout",
result["financials_main"]["capex breakout"])
set_if_unset(
result,
"cogs & opex breakout",
result["financials_main"]["cogs & opex breakout"])
set_if_unset(
result,
"cogs breakout",
result["financials_main"]["cogs breakout"])
set_if_unset(
result,
"opex breakout",
result["financials_main"]["opex breakout"])
set_if_unset(
result,
"annual summary",
result["financials_main"]["annual summary"])
set_if_unset(
result,
"cash flow",
result["financials_main"]["cash flow"])
set_if_unset(
result,
"pv of cash flows",
result["financials_main"]["pv of cash flows"])
set_if_unset(
result,
"payback period",
result["financials_main"]["payback period"])
set_if_unset(
result,
"financial summary",
result["financials_main"]["financial summary"])
set_if_unset(
result,
"operating summary",
result["financials_main"]["operating summary"])
if quantize_output:
quantize_outputs(output_type, result)
return result | [
"toptech20190609@gmail.com"
] | toptech20190609@gmail.com |
f4f14f8d9e87b48ae2a849923822030a6333cbdc | 9e4e829db6d4197764c9e71f3ca65dacc5792c8f | /sudoku.py | 648081e7c55d3995a15842f3214fda4d0cf26907 | [] | no_license | en-chang/sudoku | c48d6f66d04bf0426d84d41c2ab7967d76764c31 | 1e0e46421a2cfb7f9e289aed5e8f72e6e6687d46 | refs/heads/master | 2022-10-23T09:11:39.163457 | 2020-06-15T19:05:00 | 2020-06-15T19:05:00 | 266,910,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,712 | py | import requests
import json
import time
import pprint
# Delay to make display slower
def delay():
time.sleep(0.475)
# Ask difficulty
def askDiff():
while True:
diff = input('What difficulty level would you like?\nEasy, Medium, or Hard?\n').lower()
if diff not in ('easy', 'medium', 'hard'):
print('Sorry, please enter a valid difficulty level.')
else:
break
if diff == 'easy':
return 1
elif diff == 'medium':
return 2
elif diff == 'hard':
return 3
# Calls a sudoku API to generate a sudoku board based on a difficulty level
# Source: "http://www.cs.utep.edu/cheon/ws/sudoku/"
def createBoard(difficulty):
response = requests.get(f"http://www.cs.utep.edu/cheon/ws/sudoku/new/?size=9&level={difficulty}")
data = response.json()
data = data['squares']
board = [[0] * 9 for row in range(9)]
for point in range(len(data)):
col = data[point]['x']
row = data[point]['y']
val = data[point]['value']
board[row][col] = val
return board
# Solves sudoku boards using backtracking
def sudokuSolver(board, row, col):
if sudokuComplete(board) is True:
global completeBoard
completeBoard = board
return True
if board[row][col] == 0:
# Find nums that are potential guesses in a row
usedNums = [num for num in board[row] if num in range(1, 10)]
possibNums = [num for num in range(1, 10) if num not in usedNums]
for guess in possibNums:
board[row][col] = guess
# Print statements to show the process
'''
print('row, col', (row, col))
print('guess', guess)
printBoard(board)
print()
'''
if sudokuChecker(board) is True:
if rowComplete(board[row]) is True:
if sudokuSolver(board, row + 1, 0) is True:
return True
elif sudokuSolver(board, row, col + 1) is True:
return True
board[row][col] = 0
continue
# Backtrack if all guesses are wrong
if guess == possibNums[-1]:
board[row][col] = 0
return False
# Backtracking multiple steps
if board[row][col] == 0:
return False
# Continue to next col if space is filled
if sudokuSolver(board, row, col + 1) is True:
return True
# Checks if the board is valid
def sudokuChecker(board):
if rowChecker(board) is False:
return False
if colChecker(board) is False:
return False
if subSquareChecker(board) is False:
return False
return True
# Helper function to sudokuChecker and sudokuComplete
def rowChecker(board):
# Check rows for duplicates except 0 (empty spaces)
for i in range(len(board)):
# Remove 0s
row = [x for x in board[i] if x != 0]
if dupChecker(row) is False:
return False
# Helper function to sudokuChecker
def colChecker(board):
# Check columns for duplicates except 0
for i in range(len(board)):
col = []
for j in range(len(board[i])):
col.append(board[j][i])
# Remove 0s
col = [x for x in col if x != 0]
if dupChecker(col) is False:
return False
# Helper function to sudokuChecker
def subSquareChecker(board):
# Loop within the first, fourth, and seventh row 3 times
# Because this is the start of the subSquares
for i in range(0, len(board), 3):
subSquare = []
# Check two rows ahead to form subSquare
# Because this finishes the subSquares
# After each subSquare check with dupChecker
for j in range(0, len(board[i]), 3):
subSquare.extend(board[i][j : j + 3])
subSquare.extend(board[i + 1][j : j + 3])
subSquare.extend(board[i + 2][j : j + 3])
# Remove 0s
subSquare = [x for x in subSquare if x != 0]
if dupChecker(subSquare) is False:
return False
subSquare.clear()
return True
# Helper function to row/col/subSquareChecker
# Checks for duplicates in a row, col, or subSquare
def dupChecker(rowOrCol):
if len(rowOrCol) != len(set(rowOrCol)):
return False
''' # For testing purposes
else:
print(rowOrCol)
print('correct row, col, or subSquare')
'''
# Helper function to sudokuSolver
def sudokuComplete(board):
for i in range(len(board)):
if rowComplete(board[i]) is False:
return False
return True
# Heper function to sudokuComplete
def rowComplete(row):
if 0 in row:
return False
return True
def printBoard(board):
boardPrinter = pprint.PrettyPrinter()
boardPrinter.pprint(board)
# Sample board for testing
'''
testBoard = [
[4, 0, 0, 0, 0, 0, 1, 0, 7],
[2, 1, 0, 5, 0, 7, 0, 9, 0],
[7, 0, 3, 0, 0, 0, 2, 0, 4],
[0, 0, 0, 3, 1, 0, 0, 0, 9],
[6, 0, 0, 0, 5, 0, 0, 0, 0],
[5, 3, 1, 0, 0, 9, 0, 0, 0],
[0, 7, 0, 1, 0, 0, 0, 2, 8],
[0, 0, 0, 0, 0, 4, 0, 6, 3],
[0, 6, 0, 0, 0, 0, 0, 0, 0]
]
'''
def main():
difficulty = askDiff()
board = createBoard(difficulty)
if difficulty == 1:
level = 'Easy'
elif difficulty == 2:
level = 'Medium'
elif difficulty == 3:
level = 'Hard'
print()
print(f'Board ({level}):')
printBoard(board)
print()
global completeBoard
sudokuSolver(board, 0, 0)
delay()
print('Solution:')
printBoard(completeBoard)
if __name__ == "__main__":
main()
| [
"enchangcsprof@gmail.com"
] | enchangcsprof@gmail.com |
fb9400b52436488379a3045805c0f588682f0306 | 878c3b571e0f2ccf9688a3d5aefa443312c706fe | /bd_api/bd_api/users/views.py | 09a7d1d49a2f69d8c8a959eaecb0a5b1f728d1f0 | [
"MIT"
] | permissive | jorimyk/body_dimensions | 785870a092856979d6059991a43462cb6571d81e | 6787570661d6dd032ade5d0c737a7d07ed3ec14a | refs/heads/master | 2021-04-30T16:26:29.665121 | 2019-02-20T19:43:38 | 2019-02-20T19:43:38 | 80,038,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,154 | py | from flask import request, jsonify, make_response
from bd_api import app, db, limiter, auth, CORS
from . models import User, Role
from bd_api.users.measurements.models import Measurement
from bd_api import Config
from bd_api.utils import CommonUtils, UserUtils
from bd_api.auth import Password, Token
# Creeate admin account if no users in database
if not User.query.all():
admin = User(username='admin', email=Config.admin_email , password=Password.hashPassword(Config.admin_password), role=Role.ADMIN, public=False)
db.session.add(admin)
db.session.commit()
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.authorization:
auth = request.authorization
elif request.form['username'] and request.form['password']:
auth = {'username': request.form['username'], 'password': request.form['password']}
else:
return jsonify(error='username/password required'), 400
q = User.query.filter_by(username=auth.get('username')).first()
if q and Password.verifyPassword(auth.get('username'), auth.get('password')):
response = jsonify({'token': Token.generateToken(q.id, q.username, q.role, Config.expiration).decode('ascii')})
response.headers['Content-Location'] = '/users/%s' % str(q.id)
response.headers['Access-Control-Expose-Headers'] = 'Content-Location'
return response
else:
return jsonify(error='Invalid username/password'), 401
@app.route('/users', methods = ['POST', 'GET', 'PUT', 'DELETE']) # /users
def users():
headers = request.headers
auth = request.authorization
if not auth:
user = (None, Role.USER, None)
elif 'error' in Token.verifyToken(auth.get('username')):
return jsonify(Token.verifyToken(auth.get('username'))), 401
else:
user = Token.verifyToken(auth.get('username'))
# Add a new user
if request.method == 'POST':
d = request.get_json(silent=True)
return addNewUser(headers, d)
# Read users
elif request.method == 'GET':
return getAllUsers(user)
# Delete all users
elif request.method == 'DELETE':
return deleteUsers(user)
# Return 405 if method not POST, GET or DELETE
else:
return jsonify(error='HTTP method %s not allowed' % request.method), 405
@app.route('/users/<int:userId>', methods = ['GET', 'PUT', 'DELETE']) # /users/<user>
def user(userId):
headers = request.headers
auth = request.authorization
if not auth:
user = (None, Role.USER, None)
elif 'error' in Token.verifyToken(auth.get('username')):
return jsonify(Token.verifyToken(auth.get('username'))), 401
else:
user = Token.verifyToken(auth.get('username'))
# Read user
if request.method == 'GET':
return getUser(userId, user)
# Update user if owner or admin
elif request.method == 'PUT':
d = request.get_json(silent=True)
return updateUser(headers, userId, user, d)
# Delete user and all measurements for user if owner or admin
elif request.method == 'DELETE':
return deleteUser(userId, user)
# Return 405 if method not GET, PUT or DELETE
else:
return jsonify(error='HTTP method %s not allowed' % request.method), 405
def addNewUser(headers, d):
"""Add new user to database if valid username/password and if optional values are valid in d"""
if not 'Content-Type' in headers or not 'application/json' in headers.get('Content-Type'):
return jsonify(error='Content Type must be application/json'), 400
elif not d:
return jsonify(error='no JSON in request'), 400
elif not 'username' in d or not 'password' in d or not 'email' in d:
return jsonify(error='username, password and email required'), 400
elif UserUtils.validate_user_values(d):
return jsonify(UserUtils.validate_user_values(d)), 400
else:
q = User( \
firstName = d.get('firstName'), \
lastName = d.get('lastName'), \
email = d.get('email'), \
gender = d.get('gender'), \
dateOfBirth = CommonUtils.convertFromISODate(d.get('dateOfBirth')), \
username = d.get('username'), \
password = Password.hashPassword(d.get('password')), \
public = d.get('public'))
db.session.add(q)
db.session.commit()
user = q.serialize
user['token'] = Token.generateToken(user['id'], d.get('username'), 'user', Config.expiration).decode('ascii')
response = jsonify(user)
response.status_code = 201
response.headers['Content-Location'] = '/users/' + str(user['id'])
response.headers['Access-Control-Expose-Headers'] = 'Content-Location'
return response
def getAllUsers(user):
"""Return all users from database"""
if user[1] == Role.ADMIN:
q = User.query.all()
else:
q = User.query.filter((User.id == user[0]) | (User.public == True)).all()
if q:
q = [i.serialize for i in q]
for index in q:
index['measurements'] = CommonUtils.countNumberOfRows(index['id'])
return jsonify(q)
else:
return ('', 204)
def deleteUsers(user):
"""Delete all measurements and users with role user"""
if user[1] == Role.ADMIN:
measurements_deleted = Measurement.query.delete()
users_deleted = User.query.filter_by(role=Role.USER).delete()
db.session.commit()
return jsonify(usersDeleted=users_deleted, measurementsDeleted=measurements_deleted)
else:
return jsonify(error='Unauthorized'), 403
def getUser(userId, user):
"""Query user based on userId"""
q = User.query.filter_by(id = userId).first()
if not q:
return jsonify(error='User not found', userId=userId), 404
elif not user[0] and not q.public:
return jsonify(error='Authentication required'), 401
elif user[0] != userId and user[1] != Role.ADMIN and not q.public:
return jsonify(error='Unauthorized'), 403
else:
q = q.serialize
q['measurements'] = CommonUtils.countNumberOfRows(userId)
return jsonify(q)
def updateUser(headers, userId, user, d):
"""Update user details if valid keys/values in d"""
if user[0] != userId and user[1] != Role.ADMIN:
return jsonify(error='Unauthorized'), 403
else:
q = User.query.filter_by(id = userId).first()
if not q:
return jsonify(error='User not found', userId=userId), 404
if not 'Content-Type' in headers or not 'application/json' in headers.get('Content-Type'):
return jsonify(error='Content Type must be application/json'), 400
if not d:
return jsonify(error='no JSON in request'), 400
if not any(key in d for key in User.user_keys):
return jsonify(error='No valid keys'), 400
if UserUtils.validate_user_values(d):
return jsonify(UserUtils.validate_user_values(d)), 400
if d.get('firstName'):
q.firstName = d.get('firstName')
if d.get('lastName'):
q.lastName = d.get('lastName')
if d.get('gender'):
q.gender = d.get('gender')
if d.get('dateOfBirth'):
q.dateOfBirth = CommonUtils.convertFromISODate(d.get('dateOfBirth'))
if d.get('username'):
q.username = d.get('username')
if d.get('password'):
q.password = Password.hashPassword(d.get('password'))
if d.get('public'):
q.public = d.get('public')
db.session.add(q)
db.session.commit()
user = q.serialize
return jsonify(user), 200
def deleteUser(userId, user):
"""Delete user from database based on userId"""
if user[0] != userId and user[1] != Role.ADMIN:
return jsonify(error='Unauthorized'), 403
else:
q = User.query.filter_by(id = userId).first()
if not q:
return jsonify(error='User not found', userId=userId), 404
else:
measurements_deleted = Measurement.query.filter_by(owner_id = userId).delete()
db.session.delete(q)
db.session.commit()
return jsonify(result='user removed', userId=userId, username=user[2], measurementsDeleted=measurements_deleted)
| [
"jori.mykkanen@gmail.com"
] | jori.mykkanen@gmail.com |
bd5c3dd7bb88057f50d2e513ec9577f5ae4516df | e465ec4e9af2358ed8a63b555b03d24a30a68790 | /study_python/day5/scratch.py | d7d052c2111034f226472fc75b1d084dffe5054f | [] | no_license | veritakim/study_python | 40930442130d78190977157229bef9e03f6303fe | 8c25ec8716fc16223473f53e87d29f102fc80adb | refs/heads/main | 2023-02-27T16:40:36.189980 | 2021-02-05T11:42:56 | 2021-02-05T11:42:56 | 330,602,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | # file_read 에서 sell.txt를 프린트 했을 때
'''
1일 : 얼마
2일: 얼마
중간에 엔터가 하나 더 쳐진다. 왜냐하면 print 자체가 엔터의 기능이 있는데 텍스트에도 엔터가 들어가서 두 번의 엔터 기능이 됐다
\n
'''
# .strip() : 화이트 스페이스. 글자들 사이 이외의 빈 공백들을 처리
with open('data/sell.txt', 'r', encoding='UTF-8') as f:
for line in f:
print(line.strip())
| [
"kjs3597@gmail.com"
] | kjs3597@gmail.com |
13bedc0acf5fbc26a65317a03374a0f4a5d2083f | 7f4ba7877def60e9fd9f60794633b3893f36599b | /aleatorio.py | 6894515a4d1320e1b6eeefe4bdb1aff9e1cc89d8 | [] | no_license | cgonzalesmo/PS_LAB09 | c33cc3b43fdf4aa9dfd2ab7ffa2f3f67d15aec93 | 6843768543ac206cfe99743149af7de34816b4a1 | refs/heads/master | 2022-11-20T08:35:06.247181 | 2020-07-19T22:11:26 | 2020-07-19T22:11:26 | 280,283,084 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | import random as r
import os
directorio = './files/aleatorio/'
if not os.path.exists(directorio):
os.makedirs(directorio)
contador = 50
letras = 'abcdefghijklmnopqrstuvwxyz123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
for i in range(contador):
tamano = r.randint(1000000,10000000)
archivo = open(directorio + 'archivo' + str(i), 'w+')
texto = ""
for j in range(tamano):
texto = texto + r.choice(letras)
archivo.write(texto)
archivo.close() | [
"cgonzalesmo@unsa.edu.pe"
] | cgonzalesmo@unsa.edu.pe |
1d74ef8462950a6d0001f53e3884fb6d831e1a36 | e7729e83f4caa78586a57de7c651b8e705e73305 | /app/flags/agents/flag_6.py | 45cf12711db4e7a9ee386fa13e8ae664ea83a475 | [] | no_license | dekoder/training | 873674b985a0f2f8d0e3740f3b2004da2e0af02d | c7509ae9d13ba1ebbd127aeb4dadcaf88ffd9749 | refs/heads/master | 2023-01-01T01:00:56.709768 | 2020-10-29T19:51:19 | 2020-10-29T19:51:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | name = 'Contact points'
challenge = 'Deploy a new agent, using a different contact point then your first agent'
extra_info = """If an adversary deploys all of their agents on a host using the same protocol, say HTTP, then when their agent is
detected and shut down, the defenders will likely close access to the C2 over that protocol. Therefore, an adversary
will want multiple agents on a host, each using a different protocol to talk to the C2. """
async def verify(services):
contacts = set([agent.contact for agent in await services.get('data_svc').locate('agents')])
if len(contacts) > 1:
return True
return False
| [
"davidhunt@mitre.org"
] | davidhunt@mitre.org |
f03a99ced8288a757786e5b1fc6ace50e7b0e072 | 9b7e51a1640c2287fb91ea4d6a19c6aecbfc0c44 | /src/authentication/migrations/0004_auto_20161025_2106.py | 2bc09ae66e3d9df21cb844c47b3f55bd017464f7 | [
"MIT"
] | permissive | poffey21/edge | c4bc2fdf25eed89fe23d1e533d0e1febe72a67fc | f8aea6de3ede6031b29146b530e98fd6d9ed8fac | refs/heads/master | 2021-01-12T16:07:14.928202 | 2016-10-26T16:06:10 | 2016-10-26T16:06:10 | 71,941,701 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-26 02:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0003_auto_20161025_2051'),
]
operations = [
migrations.AlterField(
model_name='token',
name='hint',
field=models.CharField(blank=True, help_text="*'s do not accurately reflect length of password", max_length=16),
),
]
| [
"poffey21@gmail.com"
] | poffey21@gmail.com |
af4d32d383245d7f962fec2306a3de7d93ab66ef | 11fc50030ae0e13a8a3dc22828550093d8fb555e | /core/migrations/0005_recipe_time_minutes.py | 0ecd28b8937cb32dd01920ee6d4579284835971b | [
"MIT"
] | permissive | subrata153/dj-receipe-api | cea1b856685c2b75da2d4a2342c37c5db8d2ce21 | 3a45c1a6f7b54c1e837db985bfdcca4579163739 | refs/heads/master | 2022-12-20T03:23:19.566375 | 2020-09-24T12:21:01 | 2020-09-24T12:21:01 | 293,527,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | # Generated by Django 3.1 on 2020-09-14 13:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_recipe'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='time_minutes',
field=models.IntegerField(default=1),
preserve_default=False,
),
]
| [
"subrata.goswami153@gmail.com"
] | subrata.goswami153@gmail.com |
215cefa51b03851506da1563a910e5c4573baa31 | 7df74b6338ff481add296e1358149b4f99c2e32b | /utils.py | 5b2c8220b360eda4b57d7977d395754148cfa26d | [] | no_license | biansy000/CS410-Drug-Molecular-Toxicity-Prediction | d311f80d1feb300703b0c47dd14cd90702cd105f | 7748708069ee69155dc1a81fa99e8ee8989a8504 | refs/heads/master | 2023-03-13T09:56:43.611027 | 2021-03-03T02:20:52 | 2021-03-03T02:20:52 | 343,323,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,907 | py | import numpy as np
import os
import copy
import pandas as pd
import torch
paths = {'test': "../code/data/test",
'train': "../code/data/train",
'valid': "../code/data/validation"}
def read_data(choice='train', pos_weight=7.0, add_valid=False):
if choice == 'train':
return read_train_data(pos_weight, add_valid)
elif choice == 'validation':
return read_valid_data(pos_weight, add_valid)
else:
return read_test_data(pos_weight)
def read_train_data(pos_weight=7.0, add_valid=False, to_valid=False):
path = paths['train']
rdata = []
names_onehots = np.load(os.path.join(path, 'names_onehots.npy'), allow_pickle=True)
df_smiles = pd.read_csv(os.path.join(path, 'names_smiles.txt'))
df_labels = pd.read_csv(os.path.join(path, 'names_labels.txt'))
names_onehots = dict(names_onehots[()])
names = names_onehots['names']
onehots = names_onehots['onehots']
if not (type(names[0]) == str): # due to some strange error
names = [key.decode("utf-8") for key in names]
for i, key in enumerate(names):
weight = 1.0
assert df_labels.iloc[i, 1] == 0 or df_labels.iloc[i, 1] == 1
if df_labels.iloc[i, 1] > 0.5:
weight = pos_weight
tmp = {'name': key, 'onehots': onehots[i], 'SMILES': df_smiles.iloc[i, 1], \
'label': float(df_labels.iloc[i, 1]), 'weight': weight}
rdata.append(tmp)
if add_valid: # only use 500+ as training data
return rdata[500:]
elif to_valid: # add 0 ~ 500 data to test set
return rdata[:500]
return rdata
def read_valid_data(pos_weight=7.0, add_valid=False):
path = paths['valid']
rdata = []
names_onehots = np.load(os.path.join(path, 'names_onehots.npy'), allow_pickle=True)
df_smiles = pd.read_csv(os.path.join(path, 'names_smiles.txt'))
df_labels = pd.read_csv(os.path.join(path, 'names_labels.txt'))
names_onehots = dict(names_onehots[()])
names = names_onehots['names']
onehots = names_onehots['onehots']
if not (type(names[0]) == str): # due to some strange error
names = [key.decode("utf-8") for key in names]
for i, key in enumerate(names):
weight = 1.0
if df_labels.iloc[i, 1] > 0.5:
weight = pos_weight
tmp = {'name': key, 'onehots': onehots[i], 'SMILES': df_smiles.iloc[i, 1], \
'label': float(df_labels.iloc[i, 1]), 'weight': weight}
rdata.append(tmp)
if add_valid:
add_data = read_train_data(pos_weight, to_valid=True)
rdata = rdata + add_data
return rdata
def read_test_data(pos_weight=7.0):
path = paths['test']
rdata = []
names_onehots = np.load(os.path.join(path, 'names_onehots.npy'), allow_pickle=True)
df_smiles = pd.read_csv(os.path.join(path, 'names_smiles.txt'))
names_onehots = dict(names_onehots[()])
names = names_onehots['names']
onehots = names_onehots['onehots']
if not (type(names[0]) == str): # due to some strange error
names = [key.decode("utf-8") for key in names]
for i, key in enumerate(names):
tmp = {'name': key, 'onehots': onehots[i], 'SMILES': df_smiles.iloc[i, 1]}
#print(onehots[i].shape)
rdata.append(tmp)
return rdata
def calc_mean(data, dim1=None):
summary = []
dim = [1, 1]
dim[0] = np.array(data[0]['onehots']).shape[0]
if dim1:
dim[1] = dim1
else:
dim[1] = np.array(data[0]['onehots']).shape[1]
for item in data:
num = np.array(item['onehots']).sum()
summary.append(num)
summary = np.array(summary)
mean = np.mean(summary) / (dim[0] * dim[1])
var = np.mean(summary**2) - mean**2
var = np.sqrt(var) / (dim[0] * dim[1])
print('mean', mean, 'var', var)
# raise SyntaxError
return mean, var
def sigmoid(x):
return 1/(1+np.exp(-x)) | [
"biansy000@github.com"
] | biansy000@github.com |
13eafdf4cca9a65dfa2e6bccb504ab6397013fb7 | d5292505eb7b8b93eca743eb187a04ea58d6b6a3 | /venv/Lib/site-packages/networkx/algorithms/operators/unary.py | 71a6303f16c9db7a764e15fa906e9421b5937b55 | [
"Unlicense"
] | permissive | waleko/facerecognition | 9b017b14e0a943cd09844247d67e92f7b6d658fa | ea13b121d0b86646571f3a875c614d6bb4038f6a | refs/heads/exp | 2021-06-03T10:57:55.577962 | 2018-09-04T19:45:18 | 2018-09-04T19:45:18 | 131,740,335 | 5 | 1 | Unlicense | 2020-01-19T10:45:25 | 2018-05-01T17:10:42 | Python | UTF-8 | Python | false | false | 1,646 | py | """Unary operations on graphs"""
# Copyright (C) 2004-2018 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.utils import not_implemented_for
__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
__all__ = ['complement', 'reverse']
def complement(G):
"""Return the graph complement of G.
Parameters
----------
G : graph
A NetworkX graph
Returns
-------
GC : A new graph.
Notes
------
Note that complement() does not create self-loops and also
does not produce parallel edges for MultiGraphs.
Graph, node, and edge data are not propagated to the new graph.
"""
R = G.fresh_copy()
R.add_nodes_from(G)
R.add_edges_from(((n, n2)
for n, nbrs in G.adjacency()
for n2 in G if n2 not in nbrs
if n != n2))
return R
def reverse(G, copy=True):
"""Return the reverse directed graph of G.
Parameters
----------
G : directed graph
A NetworkX directed graph
copy : bool
If True, then a new graph is returned. If False, then the graph is
reversed in place.
Returns
-------
H : directed graph
The reversed G.
"""
if not G.is_directed():
raise nx.NetworkXError("Cannot reverse an undirected graph.")
else:
return G.reverse(copy=copy)
| [
"a.kovrigin0@gmail.com"
] | a.kovrigin0@gmail.com |
368ac5546d6b48d0e6f35422fca7463b8ce900c1 | 1bc5f5ffa7a7544b3a77f5981a648821549283d3 | /Python/a^b.py | 6cda25c4f4c0ec7ac9bf3e660b61ba23f9dc7e80 | [
"MIT"
] | permissive | Mario263/Hacktoberfest_2021 | 22047f5eaa0b876a86c3a890a654b65f7ab04600 | 57965f48d3b19d25d2c0b75525eab4c4dce0157a | refs/heads/main | 2023-09-03T00:14:24.695642 | 2021-10-31T06:49:24 | 2021-10-31T06:49:24 | 423,075,314 | 1 | 0 | MIT | 2021-10-31T06:50:13 | 2021-10-31T06:50:12 | null | UTF-8 | Python | false | false | 171 | py |
x = int(input("Enter base number: "))
y = int(input("Enter exponent number: "))
number = 1
for n in range(y):
number *= x
print(x,"^",y," = ",number,sep="")
| [
"noreply@github.com"
] | Mario263.noreply@github.com |
3e53ef1658987ecc2bc55594ea180866af5b582c | 7c8f6edd87cbee33cf998e9d2cc673fdcd39dd5a | /bots/Voodtwo/python/voodoo.py | 149d114051c2baf9989c6c3621aadc1cea98e223 | [] | no_license | tarehart/RLBotSpikeLeague | 89ce96417d8e201dcfc2f67ed5c1c81c7941131b | 311b3753e770cc642fdde87b6d4083db4072af88 | refs/heads/master | 2020-07-04T11:45:30.564487 | 2019-08-24T05:31:55 | 2019-08-24T05:31:55 | 202,278,639 | 0 | 3 | null | 2019-08-23T14:31:27 | 2019-08-14T05:09:20 | Python | UTF-8 | Python | false | false | 823 | py | from rlbot.agents.base_agent import BOT_CONFIG_AGENT_HEADER
from rlbot.agents.executable_with_socket_agent import ExecutableWithSocketAgent
from rlbot.parsing.custom_config import ConfigHeader, ConfigObject
class Voodoo(ExecutableWithSocketAgent):
def get_port(self) -> int:
return 19231
def load_config(self, config_header: ConfigHeader):
self.executable_path = config_header.getpath('java_executable_path')
self.logger.info("Java executable is configured as {}".format(self.executable_path))
@staticmethod
def create_agent_configurations(config: ConfigObject):
params = config.get_header(BOT_CONFIG_AGENT_HEADER)
params.add_value('java_executable_path', str, default=None,
description='Relative path to the executable that runs java.')
| [
"tarehart@gmail.com"
] | tarehart@gmail.com |
37a662efa91bfe7aa9f62dd5632cf64ca090ce2e | 1fba7af33f2424db7612775627dad8a062a63fc4 | /SivleyExam2Q1.py | c0e808ee2a16d7957e527141ec35ce26e2093bc8 | [
"MIT"
] | permissive | cjsivley/homework | eb8979eb923800c556357e67e02f976e6f899e2c | 643386ce4c34ae26a0b0730fee40e093a206abb1 | refs/heads/main | 2023-03-29T03:39:48.918553 | 2023-03-27T15:20:39 | 2023-03-27T15:20:39 | 40,308,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | #line problem
#exam 2 question 1
#Codey Sivley
#OOP Fall 2021
def pointVsLine(p0, p1, p2):
#setup
x0 = p0[0]
x1 = p1[0]
x2 = p2[0]
y0 = p0[1]
y1 = p1[1]
y2 = p2[1]
#is formula for A and B were incorrect on original problem?
#should be lineA = y0-y1, lineB = x1-x0
#answers keep coming out inverse as given.
#source: Fundamentals of Computer Graphics
#By Steve Marschner, Peter Shirley
lineA = y1-y0
lineB = x0-x1
lineC = (x0*y1)-(x1*y0)
#calculation
return ((lineA*x2)+(lineB*y2)-lineC)
testPoint1 = (1,0)
testPoint2 = (10,5)
testPoint3 = (1,2)
print(pointVsLine(testPoint1, testPoint2, testPoint3))
| [
"cjsivley@gmail.com"
] | cjsivley@gmail.com |
60a604d51abe28c15f4cbe9b135d530edf6eb603 | f87d1ce970ed414f62b90d79d8cf5a38556da592 | /repetory_api/migrations/0011_auto_20170609_1056.py | 670d2d9ff2b9b11106c16fd09dc242ea35f2ab32 | [] | no_license | zhangxu0307/repertory-rest | 331d58009c15e014d1a5e39447219817d77b08d9 | dc48a8e1e484254e1daa0712ffe66a52ec896ea7 | refs/heads/master | 2021-07-13T22:30:00.246833 | 2017-10-19T11:27:30 | 2017-10-19T11:27:30 | 107,536,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,712 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-09 02:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('repetory_api', '0010_auto_20170609_1044'),
]
operations = [
migrations.RemoveField(
model_name='material',
name='materailYear',
),
migrations.RemoveField(
model_name='material',
name='materialBand',
),
migrations.RemoveField(
model_name='material',
name='materialMark',
),
migrations.RemoveField(
model_name='material',
name='materialOriginal',
),
migrations.RemoveField(
model_name='material',
name='materialPostion',
),
migrations.RemoveField(
model_name='material',
name='materialState',
),
migrations.RemoveField(
model_name='material',
name='materialUnit',
),
migrations.AddField(
model_name='materialinput',
name='materailYear',
field=models.DateTimeField(blank=True, null=True, verbose_name='\u6750\u6599\u5e74\u4efd'),
),
migrations.AddField(
model_name='materialinput',
name='materialBand',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='\u6750\u6599\u54c1\u724c'),
),
migrations.AddField(
model_name='materialinput',
name='materialMark',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='\u6750\u6599\u578b\u53f7'),
),
migrations.AddField(
model_name='materialinput',
name='materialOriginal',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='\u6750\u6599\u539f\u4ea7\u5730'),
),
migrations.AddField(
model_name='materialinput',
name='materialPostion',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='\u6750\u6599\u4f4d\u7f6e'),
),
migrations.AddField(
model_name='materialinput',
name='materialState',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='\u6750\u6599\u72b6\u6001'),
),
migrations.AddField(
model_name='materialinput',
name='materialUnit',
field=models.DecimalField(decimal_places=4, default=0, max_digits=8, verbose_name='\u6750\u6599\u5355\u4f4d\u539f\u503c'),
),
]
| [
"zhangxu0307@163.com"
] | zhangxu0307@163.com |
aa7e217944d3d5deef2326ff26f742794898bedb | 6c144b3870b03b335cf171f2084835f29afbcb9d | /ps/prop/migrations/0020_auto_20161116_2255.py | a4c3b46221276ef8e7482e2aa9f5b1e274a4d6cc | [] | no_license | thementat/ps | 126514f70492171d632a3f1bc7075ea7c860b359 | c2bb6af592b78110ba79757f61bdf21e2665d56f | refs/heads/master | 2021-01-12T04:40:33.026049 | 2017-11-15T17:20:51 | 2017-11-15T17:20:51 | 77,701,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,056 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-16 22:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('prop', '0019_auto_20161116_2138'),
]
operations = [
migrations.RemoveField(
model_name='src',
name='muni',
),
migrations.RemoveField(
model_name='src_imp',
name='imp',
),
migrations.RemoveField(
model_name='src_imp',
name='src',
),
migrations.AddField(
model_name='imp_parcel',
name='parcel',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='prop.Parcel'),
),
migrations.AddField(
model_name='imp_tie',
name='lot',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='prop.Lot'),
),
migrations.AddField(
model_name='imp_tie',
name='parcel',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='prop.Parcel'),
),
migrations.AlterField(
model_name='imp_address',
name='parcel',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='prop.Parcel'),
),
migrations.AlterField(
model_name='imp_property',
name='parcel',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='prop.Parcel'),
),
migrations.AlterField(
model_name='imp_value',
name='property',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='prop.Property'),
),
migrations.DeleteModel(
name='Src',
),
migrations.DeleteModel(
name='Src_Imp',
),
]
| [
"chrisbradley@192.168.0.50"
] | chrisbradley@192.168.0.50 |
41270d7d2aac12ae55586464834bdd7b1f1a7029 | b11099e6136f9d449ad60a0f446dd434402b1be0 | /startsetup.py | f391bc47ec73f13b2aaa6d2659e2b8fce7f9fcaa | [] | no_license | cloudedrhyme/HowdyHack21 | 48c05a060b8a272c3254202425c25b6f0baf66db | 24733f6b50a81b05ad9b20b71e14d74a752ec62b | refs/heads/master | 2023-07-28T01:44:02.342049 | 2021-09-12T16:36:47 | 2021-09-12T16:36:47 | 405,486,680 | 0 | 0 | null | 2021-09-12T16:37:51 | 2021-09-11T21:28:18 | Python | UTF-8 | Python | false | false | 655 | py | import pygame
from pygame.locals import *
import sys;
#test window size
displaysurface = pygame.display.set_mode((300, 300))
image = pygame.Surface([640,480], pygame.SRCALPHA, 32)
image = image.convert_alpha()
mySurface = pygame.Surface((50, 50))
#mySurface.fill((0,255,0))
mySurface2 = pygame.Surface((100, 50))
mySurface2.fill((0,255,0))
player = pygame.image.load("resources/images/dude.png")
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
displaysurface.blit(mySurface, (50,50))
displaysurface.blit(mySurface2, (50,150))
pygame.display.update() | [
"74265937+cloudedrhyme@users.noreply.github.com"
] | 74265937+cloudedrhyme@users.noreply.github.com |
e19e6de5115b068231c810df6fd849b3fa53ff42 | b4af88b8da98b33a57de4bbdda42dd7bfa4af6f2 | /cs308_2014_TH01_Greenhouse Temperature Regulation/Code/PC/erts_server.py | ae63db5c7be8f828718f823bfd083cc715eef873 | [
"MIT"
] | permissive | eyantra/cs308_2014 | b5e7618775c4ce8d125984f6ab3e61ee1e6ca333 | c40126ec0e2a6b02a75243dcc5c7e910f96b564a | refs/heads/master | 2022-06-23T19:47:31.297517 | 2019-10-16T09:59:46 | 2019-10-16T09:59:46 | 19,315,199 | 0 | 3 | MIT | 2022-06-17T21:18:28 | 2014-04-30T13:55:41 | HTML | UTF-8 | Python | false | false | 2,853 | py | import threading
import webbrowser
import BaseHTTPServer
import SimpleHTTPServer
import serial
import time
ser = serial.Serial(11) # serial where to write
FILE = 'erts_client.html' # url to accept requests
PORT = 9000 # port number for the server
class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
'''
Handles HTTP web requests and inherits "SimpleHTTPRequestHandler"
'''
postdata = dict()
def do_POST(self):
'''
Handles POST request
'''
length = int(self.headers.getheader('content-length'))
data_string = self.rfile.read(length) # read post data as URL encoded string
for datapoint in data_string.split('&'):
# this loop splits stringified post data and stores has key-value pairs
datapoint = datapoint.split('=')
self.postdata[datapoint[0]] = datapoint[1]
print self.postdata # print on server terminal for debugging purposes
try:
# now take action according to inputs and get output
result = process_post_data(self.postdata)
except:
result = 'Error'
self.wfile.write(result) # display output
def sanitize_and_send(threshold1,threshold2,temp_constant):
# check if inputs are in desired format
flag = 1 # flag will be set to 0 if something is wrong
result = ""
print threshold1,threshold2,temp_constant
if(threshold1 > 50 or threshold1<10):
result += "Threshold Temperature 1 should be between 10 and 50\n"
flag = 0
if(threshold2>50 or threshold2<10):
result += "Threshold Temperature 2 should be between 10 and 50\n"
flag = 0
if(threshold2<threshold1):
result += "Threshold Temperature 2 should be greater than Threshold Temperature 1\n"
flag = 0
if(temp_constant<1 or temp_constant>9):
result += "Temperature Buffer should be between 1 and 9\n"
flag = 0
if (flag): # if flag == 1, then aal iz well
# write data to serial (this serial communicates with our bot)
ser.write(str(threshold1))
ser.write(str(threshold2))
ser.write(str(temp_constant))
result += "Values sent successfully\n"
return result
def process_post_data(postdata):
# processes the inputs
print postdata # for debugging
result = ""
try:
t1 = postdata['threshold1']
t2 = postdata['threshold2']
tb = postdata['tbuff']
# convert string to int
t1 = int(t1)
t2 = int(t2)
tb = int(tb)
# sanitize input and send
result = sanitize_and_send(t1,t2,tb)
except Exception as e:
result = e
return result
def open_browser():
# Start a browser after waiting for half a second.
def _open_browser():
webbrowser.open('http://localhost:%s/%s' % (PORT, FILE))
thread = threading.Timer(0.5, _open_browser)
thread.start()
def start_server():
# Start the server.
server_address = ("", PORT)
server = BaseHTTPServer.HTTPServer(server_address, RequestHandler)
server.serve_forever()
if __name__ == "__main__":
open_browser()
start_server()
| [
"eyantra.erts@gmail.com"
] | eyantra.erts@gmail.com |
40c83ee5b23ec45325e73155816f4301806781e5 | 8747eadb3e7f6d0883a201a7f1c4945ecf12f842 | /flightservices/flightapp/views.py | 74009c9754a19ae0875297bc713abbc6a5d6ebae | [] | no_license | GBethani/drf_demo | 3d61439d996cb34e1d6794ffeda5127c63687a45 | 75d47ffc5876480c56bcf1018bcadc12398a7412 | refs/heads/main | 2023-02-07T23:41:42.425327 | 2020-12-30T05:34:52 | 2020-12-30T05:34:52 | 324,276,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,737 | py | from django.shortcuts import render
from .models import Flight, Passenger, Reservation
from .serializers import FlightSerializer, PassengerSerializer, ReservationSerializer
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.permissions import IsAuthenticated
# Create your views here.
@api_view(['POST'])
def find_flights(request):
flights = Flight.objects.filter(departurecity=request.data['departurecity'],arrivalcity=request.data['arrivalcity'],dateofdeparture=request.data['dateofdeparture'])
serializer = FlightSerializer(flights,many=True)
return Response(serializer.data)
@api_view(['POST'])
def save_reservation(request):
flight = Flight.objects.get(id=request.data['flightId'])
passenger = Passenger()
passenger.firstname = request.data['firstname']
passenger.middlename = request.data['middlename']
passenger.lastname = request.data['lastname']
passenger.email = request.data['email']
passenger.phone = request.data['phone']
passenger.save()
reservation = Reservation()
reservation.flight = flight
reservation.passenger = passenger
reservation.save()
return Response(status=status.HTTP_201_CREATED)
class FlightViewSet(viewsets.ModelViewSet):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = [IsAuthenticated,]
class PassengerViewSet(viewsets.ModelViewSet):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class ReservationViewSet(viewsets.ModelViewSet):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer
| [
"jinkiller69@gmail.com"
] | jinkiller69@gmail.com |
c1a278d0c191ec9f7a09ffb015bef1cb08eebb82 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startPyquil3029.py | 4d24e8f4ba438bbba1f8ddf9e36daac828244176 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,914 | py | # qubit number=4
# total number=41
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(3) # number=19
prog += CZ(0,3) # number=20
prog += H(3) # number=21
prog += CNOT(0,3) # number=23
prog += X(3) # number=24
prog += CNOT(0,3) # number=25
prog += CNOT(0,3) # number=17
prog += RX(-0.48380526865282825,3) # number=26
prog += H(1) # number=2
prog += Y(3) # number=18
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=12
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(1) # number=34
prog += CZ(0,1) # number=35
prog += H(1) # number=36
prog += CNOT(0,1) # number=31
prog += CNOT(0,1) # number=38
prog += X(1) # number=39
prog += CNOT(0,1) # number=40
prog += CNOT(0,1) # number=33
prog += CNOT(0,1) # number=30
prog += H(3) # number=8
prog += H(3) # number=37
prog += H(0) # number=9
prog += Y(2) # number=10
prog += X(2) # number=22
prog += Y(2) # number=11
prog += X(0) # number=13
prog += X(0) # number=14
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil3029.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
7ad01d201b6f2e29e6b858389712d658123664e1 | 41f5632dd1fb878eef163a07f07f987a6a1eb3c6 | /Python/Preprocessing.py | 9ef4eb934f5659d2e86d888ffdcc1d45697726ff | [] | no_license | eugeneALU/Text-Classification | 9e9fa695456a674c27d1fe0db9496c7193bba54d | cd9a8dd679a5dee92b56720b91085b9d53ae5581 | refs/heads/master | 2020-12-03T11:42:21.764190 | 2020-01-02T03:47:47 | 2020-01-02T03:47:47 | 231,302,752 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,279 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 20 09:43:39 2019
@author: eugene
"""
import numpy as np
import pandas as pd
import re
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
#function to clean the word of any punctuation or special characters
def cleanPunc(sentence):
#remove word in ()
cleaned = re.sub(r'\([A-Z|a-z]+\)',r'',sentence)
#preserve some important punctuation and add a space between them and words
cleaned = re.sub(r"([.!?])", r" \1", cleaned)
#remove other punctuation
cleaned = re.sub(r'[\'|"|#|:|;|,|%|<|>|\(|\)|\^|\||/|\[|\]|&|*|+|=|@|`|~]',r'',cleaned)
#remove space \n in the end of the sentences
cleaned = cleaned.strip()
#remove numbers and words right follow numbers like XXcm(or XX-cm, XX-) but XX cm won't be remove
cleaned = re.sub(r'\d+\-\w*|\d+\w+|\d+',r'',cleaned)
#remove - in -XXX or XXX- or -- but no XXX-XXX
cleaned = re.sub(r'(?<=\w)-(?!\w)|(?<!\w)-(?=\w)|--|(?<!\w)-(?!\w)',r'',cleaned)
#remove space and - in the start or end of the sentences again
cleaned = cleaned.strip(' -')
#incase there are two space
cleaned = cleaned.replace(' ',' ')
return cleaned
def cleanPunc2(sentence):
#remove word in ()
cleaned = re.sub(r'\([A-Z|a-z|0-9]+\)',r'',sentence)
#preserve some important punctuation and add a space between them and words
cleaned = re.sub(r"([.!?])", r" \1 ", cleaned)
#remove other punctuation
cleaned = re.sub(r'[^a-zA-Z0-9?.!]+',r' ',cleaned)
#restore special term
cleaned = re.sub(r'e \. g \.', "e.g.", cleaned)
cleaned = re.sub(r'a \. k \. a .', "a.k.a.", cleaned)
cleaned = re.sub(r'i \. e \.', "i.e.", cleaned)
cleaned = re.sub(r'\. \. \.', "...", cleaned)
cleaned = re.sub(r'(\d+) \. (\d*)', r"\1.\2", cleaned)
#incase there are many spaces
cleaned = re.sub(r'[" "]+', " ", cleaned)
#remove space \n in the end of the sentences
cleaned = cleaned.strip()
#remove space and - in the start or end of the sentences again
cleaned = cleaned.strip(' -')
return cleaned
def cleanPunc3(sentence):
#remove word in ()
cleaned = re.sub(r'\([A-Z|a-z|0-9]+\)',r'',sentence)
#rule-based, replace the nameentity to certain entity (EX: 3D P2P CNN Tokenizer)
# -- word with uppercase but appears in the middle of the sentence
cleaned = re.sub(r'(?<= )(?:[A-Z]+[\w-]*|[\w\-]*[A-Z]+[\w\-]*)', '<NAME>', cleaned)
#replace the number to certain entity (EX: 2000 2.05 2,000 2-5)
cleaned = re.sub(r'(?:(?<= )\d+[\.\,\-]*\d*(?= )|^\d+(?= )|\d+(?=$))', '<NUMBER>', cleaned)
#preserve some important punctuation and add a space between them and words
cleaned = re.sub(r"([.!?])", r" \1 ", cleaned)
#remove other punctuation
cleaned = re.sub(r'[^a-zA-Z0-9?.!]+',r' ',cleaned)
#restore special term
cleaned = re.sub(r'e \. g \.', "e.g.", cleaned)
cleaned = re.sub(r'a \. k \. a .', "a.k.a.", cleaned)
cleaned = re.sub(r'i \. e \.', "i.e.", cleaned)
cleaned = re.sub(r'\. \. \.', "...", cleaned)
cleaned = re.sub(r'(\d+) \. (\d*)', r"\1.\2", cleaned)
#incase there are many spaces
cleaned = re.sub(r'[" "]+', " ", cleaned)
#remove space \n in the end of the sentences
cleaned = cleaned.strip()
#remove space and - in the start or end of the sentences again
cleaned = cleaned.strip(' -')
return cleaned
def removeStopWords(sentence):
stop_words = set(stopwords.words('english'))
re_stop_words = re.compile(r"\b(" + "|".join(stop_words) + ")\\W", re.I)
return re_stop_words.sub("", sentence)
def stemming(sentence):
stemmer = SnowballStemmer("english")
stemSentence = ""
for word in sentence.split():
stem = stemmer.stem(word)
stemSentence += stem
stemSentence += " "
stemSentence = stemSentence.strip()
return stemSentence
DATA = pd.read_csv('./train.csv')
''' Remove special punctuation and lower and stemming'''
#DATA['Sentences'] = DATA['Sentences'].str.lower()
DATA['Sentences'] = DATA['Sentences'].apply(cleanPunc3)
#DATA['Sentences'] = DATA['Sentences'].apply(stemming)
''' Count length'''
if DATA.Sentences.notna().all():
COUNT = np.zeros((DATA.shape[0]))
for i in range(DATA.shape[0]):
COUNT[i] = len(DATA.Sentences[i].split(' '))
DATA['LENGTH'] = COUNT
## replace empty sentences with <UNK> token
DATA.loc[DATA['Sentences']=='','Sentences'] = '<UNK>'
## count the sentences with only Length==1/2 -- could be one of the features
#COUNT1 = DATA[COUNT==1]
#COUNT2 = DATA[COUNT==2]
## Tokenize
#DATA['TOKEN'] = DATA.apply(lambda row: word_tokenize(row['Sentences']), axis=1) # using tokenizer
#DATA['TOKEN'] = DATA.apply(lambda row: row['Sentences'].split(), axis=1) # or just split with space
DATA.to_csv('train_removesomepunctuation_addentity.csv', index=False)
#%% TEST
TEST = pd.read_csv('test.csv')
''' Remove special punctuation and lower and stemming'''
#TEST['Sentences'] = TEST['Sentences'].str.lower()
TEST['Sentences'] = TEST['Sentences'].apply(cleanPunc3)
#TEST['Sentences'] = TEST['Sentences'].apply(stemming)
if TEST.Sentences.notna().all():
COUNT = np.zeros((TEST.shape[0]))
for i in range(TEST.shape[0]):
COUNT[i] = len(TEST.Sentences[i].split(' '))
TEST['LENGTH'] = COUNT
## replace empty sentences with <UNK> token
TEST.loc[TEST['Sentences']=='','Sentences'] = '<unk>'
## Tokenize
#TEST['TOKEN'] = TEST.apply(lambda row: word_tokenize(row['Sentences']), axis=1)
#TEST['TOKEN'] = TEST.apply(lambda row: row['Sentences'].split(), axis=1)
TEST.to_csv('test_removesomepunctuation_addentity.csv', index=False)
#%% PRIVATE
PRIVATE = pd.read_csv('private.csv')
''' Remove special punctuation and lower and stemming'''
PRIVATE['Sentences'] = PRIVATE['Sentences'].apply(cleanPunc2)
if PRIVATE.Sentences.notna().all():
COUNT = np.zeros((TEST.shape[0]))
for i in range(PRIVATE.shape[0]):
COUNT[i] = len(PRIVATE.Sentences[i].split(' '))
PRIVATE['LENGTH'] = COUNT
## replace empty sentences with <UNK> token
PRIVATE.loc[PRIVATE['Sentences']=='','Sentences'] = '<unk>'
# save
PRIVATE.to_csv('private_removesomepunctuation.csv', index=False) | [
"eugenelu49@yahoo.com.tw"
] | eugenelu49@yahoo.com.tw |
0a5a10fc5960abab4709c50c8d9d9a98632a00ae | 6189f34eff2831e3e727cd7c5e43bc5b591adffc | /alembic/versions/00036_11dbcd6e5ee3_.py | 23c7b78690e2f34d72284e8b9134eea9c3b21604 | [
"BSD-3-Clause"
] | permissive | fake-name/ReadableWebProxy | 24603660b204a9e7965cfdd4a942ff62d7711e27 | ca2e086818433abc08c014dd06bfd22d4985ea2a | refs/heads/master | 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 | BSD-3-Clause | 2023-09-11T15:48:15 | 2015-07-24T04:30:43 | Python | UTF-8 | Python | false | false | 1,392 | py | """empty message
Revision ID: 11dbcd6e5ee3
Revises: 5aa994117f07
Create Date: 2017-09-28 04:06:18.968893
"""
# revision identifiers, used by Alembic.
revision = '11dbcd6e5ee3'
down_revision = '5aa994117f07'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute("UPDATE nu_release_item SET fetch_attempts = 0 WHERE fetch_attempts IS NULL")
op.execute("commit")
op.alter_column('nu_release_item', 'fetch_attempts',
existing_type=sa.INTEGER(),
nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('nu_release_item', 'fetch_attempts',
existing_type=sa.INTEGER(),
nullable=True)
### end Alembic commands ###
| [
"something@fake-url.com"
] | something@fake-url.com |
b6fb8eb2e7f812c6c108c58bc950f2ee0e489bc4 | b3f46fdcb04110ed842b0ae13509fa162a6b422a | /Code/heartland/hl_admin/tests.py | d81983fa9ee5919458df595c79648bbfe04d73a6 | [] | no_license | CS-4503-Gambit/vigilant-winner | ec10b3bd4c7ad5400ba4245f0272d0b5278cc4e6 | 5ee1d42aa9e75468f766d37ae841852b15f681cc | refs/heads/master | 2021-08-14T07:06:45.307165 | 2017-11-08T04:58:52 | 2017-11-08T04:58:52 | 106,452,195 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,163 | py | from django.test import TestCase, Client
from core.models import *
# Create your tests here.
class AdminTestCase(TestCase):
# Test by Jon
def setup(self):
exec(open('populate.py').read())
self.client = Client()
self.client.login(username='admin', password='heartland')
# Test the home screen for admin login
# Test by Jon
def test_admin_home(self):
self.setup()
response = self.client.get('/admin/home/')
# Check that the response was successful
self.assertEqual(response.status_code, 200)
# Check all the links are present
self.assertContains(response, "Manage Database")
self.assertContains(response, "View Results")
self.assertContains(response, "View Judging Statistics")
self.assertContains(response, "Create Judge or Registrar")
self.assertContains(response, "View QR Codes")
# Test the category display
# Test by Jon
def test_category_display(self):
self.setup()
response = self.client.get('/admin/scores/')
# Check that the response was successful
self.assertEqual(response.status_code, 200)
# Check that both categories are present
self.assertContains(response, "Video Games")
self.assertContains(response, "Art")
# Test the create user screen
# Test by Jon
def test_create_user(self):
self.setup()
response = self.client.get('/admin/create_user/')
# Check that the response was successful
self.assertEqual(response.status_code, 200)
# Check that the fields are present
self.assertContains(response, 'Username')
self.assertContains(response, 'Password')
self.assertContains(response, 'Type')
# Test the View QR Code screen
# Test by Jon
def test_view_qr(self):
self.setup()
response = self.client.get('/admin/viewqr/')
# Check that the response was successful
self.assertEqual(response.status_code, 200)
# Check that each category is present
self.assertContains(response, 'Registrars')
self.assertContains(response, 'Judges')
self.assertContains(response, 'Teams')
# check that the specific entries are present
self.assertContains(response, 'registrar')
self.assertContains(response, 'reg')
self.assertContains(response, 'judge')
self.assertContains(response, 'judy')
self.assertContains(response, 'Bethesda Softworks')
self.assertContains(response, 'Irrational Studios')
self.assertContains(response, 'Leonardo Da Vinci')
# Test that the judge listing is correct
# Test by Jon
def test_judge_list(self):
self.setup()
response = self.client.get('/admin/judges/')
# Check that the response was successful
self.assertEqual(response.status_code, 200)
# Check that the list header is present
self.assertContains(response, 'List of Judges')
# Check that the judge is present
self.assertContains(response, 'judge')
self.assertContains(response, 'judy')
| [
"jonb377@gmail.com"
] | jonb377@gmail.com |
4e69ca51f263610e83bd6a3bd4ea91ec3352c3e3 | 19a1ce0e3e45a2d45c395a95ca9b675473c24ea9 | /Python/deprecated/pyqtgraphHandler.py | fbcf577b26dd1a9fc70e20efb7358a279ecb5f18 | [
"MIT",
"Beerware"
] | permissive | italogsfernandes/libraries | 6fe34f71b60d41e551bcca9a983e828890946a77 | 16f021b867a4a61e75cb45e0dcc595fc88e1ad6a | refs/heads/master | 2021-03-27T10:52:03.664874 | 2019-11-19T00:38:06 | 2019-11-19T00:38:06 | 102,471,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,232 | py | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# FEDERAL UNIVERSITY OF UBERLANDIA
# Faculty of Electrical Engineering
# Biomedical Engineering Lab
# ------------------------------------------------------------------------------
# Author: Italo Gustavo Sampaio Fernandes
# Contact: italogsfernandes@gmail.com
# Git: www.github.com/italogfernandes
# ------------------------------------------------------------------------------
# Decription:
# ------------------------------------------------------------------------------
from ThreadHandler import InfiniteTimer
from Queue import Queue
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
from pyqtgraph.ptime import time
from PyQt4.QtGui import QBrush, QColor, QPen, QGraphicsRectItem
# ------------------------------------------------------------------------------
class pyqtgraphHandler:
def __init__(self, qnt_pontos=10):
self.qnt_pontos = qnt_pontos
self.app = QtGui.QApplication([])
self.plot = pg.plot()
self.plot.setBackgroundBrush(QBrush(QColor.fromRgb(255, 255, 255)))
#self.plot.windowTitle().
#self.plot.setForegroundBrush(QBrush(QColor.fromRgb(250,0,0,30)))
self.plot.setWindowTitle('PlotterHandler')
self.plot.setRange(QtCore.QRectF(0, -10, 5000, 20))
self.plot.setLabel('bottom', 'Index', units='un')
self.plot.setLabel('left', 'Valor', units='V')
self.curve = self.plot.plot(pen='b')
# self.plot.enableAutoRange('xy', True)
self.plot.enableAutoRange('xy', False)
self.plot.setXRange(0, 5000)
self.plot.setYRange(0, 5)
#self.plot.enableAutoRange('xy', True)
#grade = pg.GridItem()
#self.plot.addItem(grade)
self.plot.showGrid(True, True)
#self.plot.showAxis('top', False)
#self.plot.showAxis('bottom', False)
#self.plot.showAxis('left', False)
#self.plot.showAxis('right', False)
self.plot.getAxis('left').setPen(QPen(QColor.fromRgb(0,0,0)))
self.plot.getAxis('bottom').setPen(QPen(QColor.fromRgb(0, 0, 0)))
#print self.plot.getRange
#self.plot.
# self.plot.showGrid(2,3)
#self.plot.hideAxis(ax)
#
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update_y_points)
self.lastTime = time()
self.fps = None
self.y_values = [0] * self.qnt_pontos
self.plot_buffer = Queue(qnt_pontos)
def put(self, item):
self.plot_buffer.put(item)
def update_y_points(self):
points_to_add = self.plot_buffer.qsize()
if points_to_add > 0:
for n in range(points_to_add): # obtains the new values
num = self.plot_buffer.get()
self.y_values.append(num)
if len(self.y_values) > self.qnt_pontos:
self.y_values.pop(0)
self.update()
def update(self):
# global curve, data, ptr, p, lastTime, fps
self.curve.setData(self.y_values)
now = time()
dt = now - self.lastTime
self.lastTime = now
if self.fps is None:
self.fps = 1.0 / dt
else:
s = np.clip(dt * 3., 0, 1)
self.fps = self.fps * (1 - s) + (1.0 / dt) * s
self.plot.setTitle('<font color="red">%0.2f fps</font>' % self.fps)
self.app.processEvents() # force complete redraw for every plot
def get_buffers_status(self):
return "Plot: %4d" % (self.plot_buffer.qsize()) + '/' + str(self.plot_buffer.maxsize)
def appear(self):
self.timer.start(0)
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
self.timer.stop()
if __name__ == '__main__':
my_plot = pyqtgraphHandler(5000)
from datetime import datetime
def generate_point():
agr = datetime.now()
y_value = agr.microsecond / 1000000.0
my_plot.put(np.sin(2*np.pi*y_value))
#print y_value
timer = InfiniteTimer(0.001, generate_point)
timer.start()
my_plot.appear()
| [
"italogsfernandes@gmail.com"
] | italogsfernandes@gmail.com |
fe5fcf11ba7e46ac07d902574d302ba4f0975311 | f1dbd0ba7f1951491a245f792bbc07d85df0edf3 | /dfd/timm/models/helpers.py | 8669d4f0509f5da7825e5a78bbbf814a0f1d5c9e | [
"Apache-2.0"
] | permissive | Alinccc/Deepfake_Detection | 8f05a431fb1030ca1dc721faa4a96e5d51454cdd | fce0c80f78e28fe78674fa00c3b70a6184ae1d3d | refs/heads/main | 2023-08-21T18:05:41.606910 | 2021-10-19T05:05:37 | 2021-10-19T05:05:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,791 | py | import torch
import torch.utils.model_zoo as model_zoo
import os
import logging
from collections import OrderedDict
def load_state_dict(checkpoint_path, use_ema=False):
if checkpoint_path and os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
state_dict_key = 'state_dict'
if isinstance(checkpoint, dict):
if use_ema and 'state_dict_ema' in checkpoint:
state_dict_key = 'state_dict_ema'
if state_dict_key and state_dict_key in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint[state_dict_key].items():
# strip `module.` prefix
name = k[7:] if k.startswith('module') else k
new_state_dict[name] = v
state_dict = new_state_dict
else:
state_dict = checkpoint
logging.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path))
return state_dict
else:
logging.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_checkpoint(model, checkpoint_path, use_ema=False, strict=True, ignore_keys=None):
state_dict = load_state_dict(checkpoint_path, use_ema)
if ignore_keys:
for del_key in ignore_keys:
state_dict.pop(del_key)
# for key in state_dict:
# print(key,state_dict[key].shape)
# exit()
if not strict:
model_dict = model.state_dict()
for key in model_dict:
if key in state_dict and model_dict[key].shape != state_dict[key].shape:
state_dict.pop(key)
model.load_state_dict(state_dict, strict=strict)
def resume_checkpoint(model, checkpoint_path):
other_state = {}
resume_epoch = None
if os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict'].items():
name = k[7:] if k.startswith('module') else k
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
if 'optimizer' in checkpoint:
other_state['optimizer'] = checkpoint['optimizer']
if 'amp' in checkpoint:
other_state['amp'] = checkpoint['amp']
if 'epoch' in checkpoint:
resume_epoch = checkpoint['epoch']
if 'version' in checkpoint and checkpoint['version'] > 1:
resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save
logging.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
else:
model.load_state_dict(checkpoint)
logging.info("Loaded checkpoint '{}'".format(checkpoint_path))
return other_state, resume_epoch
else:
logging.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_pretrained(model, cfg=None, num_classes=1000, in_chans=3, filter_fn=None, strict=True):
if cfg is None:
cfg = getattr(model, 'default_cfg')
if cfg is None or 'url' not in cfg or not cfg['url']:
logging.warning("Pretrained model URL is invalid, using random initialization.")
return
state_dict = model_zoo.load_url(cfg['url'], progress=False, map_location='cpu')
if in_chans == 1:
conv1_name = cfg['first_conv']
logging.info('Converting first conv (%s) from 3 to 1 channel' % conv1_name)
conv1_weight = state_dict[conv1_name + '.weight']
state_dict[conv1_name + '.weight'] = conv1_weight.sum(dim=1, keepdim=True)
elif in_chans != 3:
assert False, "Invalid in_chans for pretrained weights"
classifier_name = cfg['classifier']
if num_classes == 1000 and cfg['num_classes'] == 1001:
# special case for imagenet trained models with extra background class in pretrained weights
classifier_weight = state_dict[classifier_name + '.weight']
state_dict[classifier_name + '.weight'] = classifier_weight[1:]
classifier_bias = state_dict[classifier_name + '.bias']
state_dict[classifier_name + '.bias'] = classifier_bias[1:]
elif num_classes != cfg['num_classes']:
# completely discard fully connected for all other differences between pretrained and created model
del state_dict[classifier_name + '.weight']
del state_dict[classifier_name + '.bias']
strict = False
if filter_fn is not None:
state_dict = filter_fn(state_dict)
model.load_state_dict(state_dict, strict=strict)
| [
"huangsy1314@163.com"
] | huangsy1314@163.com |
cd8ff3b1036a37a03dbc1430032d1f214f4c9c47 | 4798adcdd8b03cddf8ec892091e67eda64db5379 | /plasmidlord/plasmidlord.py | 4e1d0e79c14d76bb3bd5dc10fa750e245b3f8b87 | [] | no_license | maxlcummins/pipelord_old | e11bb92d958b4ed5da5e0371e0c05b28b095bc5c | 049cb4a48242ce5ab08e6fd3802ef4d619de9ca3 | refs/heads/master | 2023-07-20T05:24:16.888318 | 2021-08-31T04:23:23 | 2021-08-31T04:23:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,816 | py | from scipy.spatial.distance import pdist
from scipy.cluster.hierarchy import linkage, dendrogram
import numpy as np
import pandas as pd
import glob
import matplotlib
matplotlib.use('Agg') #avoid problems with headless environments
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib import gridspec
def binning(data, step):
"""
Bin an array by step width, report the median of each bin
:param data: array of ordred values to bin
:param step: the bin step size
:return: medians, bins
"""
meds = []
c = []
n = 0
bins = []
for ir in xrange(data.shape[0]):
if data[ir, 0] > (n+1)*step:
if len(c) > 0:
meds.append(np.median(c))
else:
meds.append(0)
bins.append((n+1)*step)
n += 1
c = []
else:
c.append(data[ir, 1])
return np.array(meds), np.array(bins)
def get_block_medians(glob_path, step_size):
"""
Read in the per-position coverage data for each sample and bin to a corser size
:param glob_path: path which to scan for files
:param step_size: the binning size in bp
:return: pandas dataframe of binned depths (median values), bins for each set of medians
"""
meds = []
bins = []
smpl_names = []
# manually parse each organism directory. Manually meaning, change this path
# for each one. Could be made a loop, but unnecessary for 3 runs
for n, fn in enumerate(glob.glob('{}/*_coverage.txt'.format(glob_path))):
print '\t {} found {}...'.format(n+1, fn)
# read the data file
smpl = fn.split('/')[-1][:-len('_coverage.txt')]
smpl_names.append(smpl)
data = pd.read_csv(fn, delimiter='\t', names=['_', 'x', 'depth'])
data = data.as_matrix()[:, 1:]
for row in data:
if row[1] >= 10 and row[1] < 20:
row[1] = 0.5
elif row[1] >= 20:
row[1] = 1
m, b = binning(data, step_size)
meds.append(m)
bins.append(b)
max_bins = 0
for bi in bins:
if max(bi) > max_bins:
max_bins = max(bi)
for i in xrange(len(bins)):
if max(bins[i]) < max_bins:
print 'Warning, data file {} had fewer bins that the largest. Extra empty bins added'.format(i+1)
bi = bins[i].tolist()
mi = meds[i].tolist()
while max(bi) < max_bins:
bi.append(bi[-1] + step_size)
mi.append(0)
bins[i] = np.array(bi)
meds[i] = np.array(mi)
return pd.DataFrame(np.vstack(meds).T, columns=smpl_names), bins
def run_hclust(outname, meds, bins, step_size, tick_spc, use_polo=True, save_plot=False):
"""
Cluster and plot the binned data matrix. This calls optimal leaf ordering
algorithm (polo) by default, which has significant time-complexity.
:param outname: pdf output file
:param meds: median values
:param bins: bins for medians
:param step_size: size of a step
:param tick_spc: tick spacing in "# bins"
:param olo: reorder tips of tree with polo
:param savePlot: save plot to file, otherwise plot to screen
"""
# just for indexing
names = meds.columns
# normalise rows by their median value
D = meds.as_matrix().T
# D = (D.T/np.median(D, 1)).T
# center rows on their medians
# D = (D.T - np.median(D, 1)).T
# clustering options. We will use correlation as measure of distance
# and complete linkage clustering
metric = 'euclidean'
method = 'complete'
# calculate
Y = linkage(D, method=method, metric=metric)
# additionally, find optimal leaf ordering
if use_polo:
import polo
print '\tcalculating optimal leaf ordering...'
Y = polo.optimal_leaf_ordering(Y, pdist(D, metric=metric))
# now we do some plotting
fig = plt.figure(figsize=(12, 0.25*len(meds.columns)), dpi=150)
gs = gridspec.GridSpec(2, 2, width_ratios=[7, 1], height_ratios= [0.2,10])
axmatrix = plt.subplot(gs[2])
axmatrix.set_xlabel('genomic coord (kbp)')
axcolor = plt.subplot(gs[0])
axcolor.set_title('log median relative abundance ')
axdend = plt.subplot(gs[3])
# calculate and plot the dendrogram
Z = dendrogram(Y, ax=axdend, orientation='right', no_labels=True, color_threshold=0 )
# the tips (leaves) of the tree become the order for rows
idx = Z['leaves']
# reorder rows
D = np.log(D[idx, :]+1)
# the largest value in the matrix will set the upper and lower bounds
# for the heatmap color-range. This assures 0 as the center.
vmin = np.percentile(D, 2)
vmax = np.percentile(D, 98)
# plot the matrix, 5% extra bounds above and below on colour range
im = axmatrix.matshow(D, aspect='auto', origin='lower', cmap='PuRd',
norm=colors.Normalize(vmin=vmin, vmax=vmax))
axmatrix.set_xticks([])
axmatrix.set_yticks([])
# try and get some useful axis labels
#xticks = np.linspace(0, max(bins) - max(bins) % step_size, 5)
#print xticks
print '\tticks will be every {}x{} = {} bp'.format(step_size, tick_spc, tick_spc*step_size)
xticks = np.arange(0, len(bins), tick_spc) # every 100 bins
axmatrix.set_xticks(xticks)
axmatrix.set_xticklabels(xticks * step_size/1000)
axmatrix.set_yticks(range(D.shape[0]))
axmatrix.set_yticklabels(np.array(names)[idx], minor=False, )
axmatrix.xaxis.set_label_position('bottom')
axmatrix.xaxis.tick_bottom()
# Plot colorbar.
fig.colorbar(im, cax=axcolor, orientation='horizontal')
if save_plot:
plt.savefig('{}_hclust.pdf'.format(outname), bbox_inches='tight')
pd.DataFrame(D.T, columns=names).to_csv('{}_dat.csv'.format(outname))
else:
plt.show()
plt.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--use-polo', action='store_true', default=False, help='Enable optimal leaf ordering')
parser.add_argument('-b', '--bin-size', default=5000, type=int, help='Bin size in BP [5000]')
parser.add_argument('-t', '--tick-spacing', default=100, type=int, help='Tick spacing in bins [100]')
parser.add_argument('input_dir', help='Input directory containing coverage files')
parser.add_argument('output_file', help='Output filename')
args = parser.parse_args()
print 'Reading and binning coverage files from {}'.format(args.input_dir)
meds, bins = get_block_medians(args.input_dir, step_size=args.bin_size)
print 'Clustering and plotting...'
run_hclust(args.output_file, meds, bins[0], args.bin_size, args.tick_spacing, save_plot=True, use_polo=args.use_polo)
| [
"maxcummins@MacBook-Air-8.local"
] | maxcummins@MacBook-Air-8.local |
ba69e9897c4199039f0aede645cd587dc089c2ea | 1747054ce51f1775870b8135adfdb666028f7305 | /tree/sum_of_all_nodes.py | bfd435ff5ffc808da0033e2eb9640bc7bc4efa9a | [] | no_license | anshulgera/DSA | ac609b48383ebf5309e2017cc3e8344a83135b89 | 19ac5ad05460a3f707667bbc978223a0a55c8c3a | refs/heads/master | 2020-07-24T20:32:32.619357 | 2019-12-15T06:18:59 | 2019-12-15T06:18:59 | 208,040,935 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,143 | py | # Approach 1: divide the problem => sum of all nodes equals
# = current node data + leftsubtree sum + rightsubtree sum
# Apporach 2: level order traversal
# Approach 3: Iterative traversal-pre, in, post
from node import example_tree
def preorder(root):
if root:
print(root.data, end=", ")
preorder(root.left)
preorder(root.right)
def sum_of_all_nodes_inorder(node):
if not root:
print("Root is None")
return
stack = list()
current = root
total = 0
while True:
if current:
stack.append(current)
current = current.left
elif stack:
current = stack.pop()
total += current.data
current = current.right
else:
return total
def sum_of_all_nodes(node):
if not node:
return 0
return node.data + sum_of_all_nodes(node.left) + sum_of_all_nodes(node.right)
# ----------------------
# construct tree
root = example_tree()
s = sum_of_all_nodes(root)
print(f"from sum of all nodes {s}")
s = sum_of_all_nodes_inorder(root)
print(f"from sum of all nodes inorder {s}") | [
"anshul.gera@jda.com"
] | anshul.gera@jda.com |
d25fd414d0cc93dd4bdd265401237349de38f3c9 | 23636b8144e37bb101c0d7e823f107f396c694dc | /Andrii-Lysyi/CodeWars-18.py | 0f6b7594f73748f607b4524cd344e56f9bf1b279 | [] | no_license | Andros1303/Andrew-Lusi | 437b78cf61a7c04b61d6072ea5d14bba9e76c15f | 8d25a149f0206c3b9a67c9aa6d2e241df4881c36 | refs/heads/master | 2020-05-17T20:00:27.575961 | 2019-06-05T17:11:11 | 2019-06-05T17:11:11 | 183,932,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | def list_animals(animals):
list = ''
for i in range(len(animals)):
list += str(i + 1) + '. ' + animals[i] + '\n'
return list
# Your collegue wrote an simple loop to list his favourite animals.
# But there seems to be a minor mistake in the grammar, which prevents the program to work. Fix it! :)
# If you pass the list of your favourite animals to the function, you should get the list of the animals with orderings
# and newlines added. | [
"noreply@github.com"
] | Andros1303.noreply@github.com |
fc0fecc42711bdc8005c76234c04e40af133500a | 3a9f2b3d79cf214704829427ee280f4b49dca70a | /saigon/rat/RuckusAutoTest/scripts/zd/ats_ZD_Combo_CLI_Application_Visibility_Standard_OPEN.py | 4c9c5d9a1708723af600c8124e9f074a6b2e5ab3 | [] | no_license | jichunwei/MyGitHub-1 | ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791 | f826fc89a030c6c4e08052d2d43af0b1b4b410e3 | refs/heads/master | 2021-01-21T10:19:22.900905 | 2016-08-20T03:34:52 | 2016-08-20T03:34:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,649 | py | """
Created on 2014-06
@author: chen.tao@odc-ruckuswireless.com
"""
import sys
import random
import libZD_TestSuite as testsuite
from RuckusAutoTest.common import lib_KwList as kwlist
from RuckusAutoTest.common import Ratutils as utils
def define_wlan_cfg():
key_string_wpa2 = utils.make_random_string(random.randint(8, 63), "hex")
wlan_cfg = {
'ssid': 'Application_Visibility_OPEN',
'type': 'standard',
'auth': 'PSK',
'wpa_ver': 'WPA2',
'encryption': 'AES',
'key_index': '',
'key_string': key_string_wpa2,
'sta_auth': 'PSK',
'sta_wpa_ver': 'WPA2',
'sta_encryption': 'AES',
'enable_application_visibility': True
}
return wlan_cfg
def define_test_cfg(cfg,enable_tunnel):
test_cfgs = []
target_ip_addr = '172.16.10.252'
target_addr_for_denial_policy = 'www.example.net'
radio_mode = cfg['radio_mode']
sta_radio_mode = radio_mode
if sta_radio_mode == 'bg':
sta_radio_mode = 'g'
sta_tag = 'sta%s' % radio_mode
ap_tag = 'ap%s' % radio_mode
wlan_cfg = define_wlan_cfg()
case_name_suffix = ''
if enable_tunnel:
wlan_cfg['do_tunnel'] = True
case_name_suffix = '_with_tunnel'
test_name = 'CB_ZD_Remove_All_Wlans'
common_name = 'Remove all the WLANs from ZD'
test_cfgs.append(({}, test_name, common_name, 0, False))
test_name = 'CB_ZD_Create_Station'
common_name = 'Create target station'
test_cfgs.append(({'sta_ip_addr':cfg['target_station'],
'sta_tag': sta_tag}, test_name, common_name, 0, False))
test_name = 'CB_Station_Remove_All_Wlans'
common_name = 'Remove all WlANs from station'
test_cfgs.append(({'sta_tag': sta_tag}, test_name, common_name, 0, False))
test_name = 'CB_ZD_Config_AP_Radio'
common_name = 'Config All APs Radio - Disable WLAN Service'
test_params = {'cfg_type': 'init',
'all_ap_mac_list': cfg['all_ap_mac_list']}
test_cfgs.append((test_params, test_name, common_name, 0, False))
test_name = 'CB_ZD_Create_Active_AP'
common_name = 'Create active AP'
test_cfgs.append(({'active_ap':cfg['active_ap'],
'ap_tag': ap_tag}, test_name, common_name, 0, False))
test_name = 'CB_ZD_Config_AP_Radio'
common_name = 'Config active AP Radio %s - Enable WLAN Service' % (radio_mode)
test_params = {'cfg_type': 'config',
'ap_tag': ap_tag,
'ap_cfg': {'radio': radio_mode, 'wlan_service': True},
}
test_cfgs.append((test_params, test_name, common_name, 0, False))
test_name = 'CB_ZD_CLI_Application_Visibility_Init_Env'
common_name = 'Try to delete all application visibility rules.'
test_params = {}
test_cfgs.append((test_params, test_name, common_name, 0, False))
#testcase 1
test_case_name = '[user_app_open_standard%s]'%case_name_suffix
user_app_rule_cfg = {'rule_description':'user_app_open_none',
'dest_ip':target_ip_addr,
'dest_port':'12345',
'netmask':'255.255.255.0',
'protocol':'udp'}
test_name = 'CB_ZD_CLI_Add_User_Defined_App'
common_name = '%s Add a user app.'% (test_case_name)
test_params = {'user_app_cfg':[user_app_rule_cfg],
'negative': False,}
test_cfgs.append((test_params,test_name, common_name, 1, False))
test_name = 'CB_ZD_Create_Wlan'
common_name = '%sCreate WLAN on ZD'% (test_case_name)
test_cfgs.append(({'wlan_cfg_list':[wlan_cfg],
'enable_wlan_on_default_wlan_group': True,
}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Associate_Station_1'
common_name = '%sAssociate the station to the WLAN'% (test_case_name)
test_cfgs.append(({'wlan_cfg': wlan_cfg,
'sta_tag': sta_tag}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Get_Station_Wifi_Addr_1'
common_name = '%sGet WiFi address of the station'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Verify_Station_Info_V2'
common_name = '%sVerify client information Authorized status in ZD'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag,
'ap_tag': ap_tag,
'status': 'Authorized',
'wlan_cfg': wlan_cfg,
'radio_mode':sta_radio_mode,},
test_name, common_name, 2, False))
test_name = 'CB_ZD_Client_Ping_Dest'
common_name = '%sVerify station pings to the server successfully'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag,
'condition': 'allowed',
'target': target_ip_addr}, test_name, common_name, 2, False))
test_name = 'CB_Server_Start_Iperf'
common_name = '%sStart iperf server on linux PC'% (test_case_name)
test_cfgs.append(({'server_addr':'',
'test_udp': True,
'packet_len':'',
'bw':'',
'timeout':'',
'tos':'',
'multicast_srv':False,
'port':12345 }, test_name, common_name, 2, False))
test_name = 'CB_Station_Start_Iperf'
common_name = '%sStart iperf client and send traffic to server'% (test_case_name)
test_cfgs.append(({'sta_tag':sta_tag,
'server_addr':target_ip_addr,
'test_udp': True,
'packet_len':'',
'bw':'',
'timeout':60,
'tos':'',
'multicast_srv':False,
'port':12345 }, test_name, common_name, 2, False))
test_name = 'CB_Server_Stop_Iperf'
common_name = '%sStop iperf server on linux PC'% (test_case_name)
test_cfgs.append(({}, test_name, common_name, 2, False))
test_name = 'CB_Station_Stop_Iperf'
common_name = '%sStop iperf client on station'% (test_case_name)
test_cfgs.append(({'sta_tag':sta_tag}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Verify_Application_Visibility_Info'
common_name = '%sVerify application info in Monitor Clients page'% (test_case_name)
test_cfgs.append(({'application_description':'user_app_open_none'}, test_name, common_name, 2, False))
test_name = 'CB_Station_Remove_All_Wlans'
common_name = '%sRemove all WlANs from station'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag}, test_name, common_name, 2, True))
test_name = 'CB_ZD_Remove_All_Wlans'
common_name = '%sRemove all the WLANs from ZD'% (test_case_name)
test_cfgs.append(({}, test_name, common_name, 2, True))
test_name = 'CB_ZD_CLI_Del_User_Defined_App'
common_name = '%s Delete all user apps.'% (test_case_name)
test_params = {}
test_cfgs.append((test_params,test_name, common_name, 2, True))
#testcase 2
test_case_name = '[port_mapping_open_standard%s]'%case_name_suffix
port_mapping_rule_cfg = {'rule_description':'port_mapping_open_none','protocol':'udp','port':'54321'}
test_name = 'CB_ZD_CLI_Add_Port_Mapping_Policy'
common_name = '%s Add a port mapping rule.'% (test_case_name)
test_params = {'port_mapping_cfg':[port_mapping_rule_cfg],}
test_cfgs.append((test_params,test_name, common_name, 1, False))
test_name = 'CB_ZD_Create_Wlan'
common_name = '%sCreate WLAN on ZD'% (test_case_name)
test_cfgs.append(({'wlan_cfg_list':[wlan_cfg],
'enable_wlan_on_default_wlan_group': True,
}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Associate_Station_1'
common_name = '%sAssociate the station to the WLAN'% (test_case_name)
test_cfgs.append(({'wlan_cfg': wlan_cfg,
'sta_tag': sta_tag}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Get_Station_Wifi_Addr_1'
common_name = '%sGet WiFi address of the station'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Verify_Station_Info_V2'
common_name = '%sVerify client information Authorized status in ZD'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag,
'ap_tag': ap_tag,
'status': 'Authorized',
'wlan_cfg': wlan_cfg,
'radio_mode':sta_radio_mode,},
test_name, common_name, 2, False))
test_name = 'CB_ZD_Client_Ping_Dest'
common_name = '%sVerify station pings to the server successfully'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag,
'condition': 'allowed',
'target': target_ip_addr}, test_name, common_name, 2, False))
test_name = 'CB_Server_Start_Iperf'
common_name = '%sStart iperf server on linux PC'% (test_case_name)
test_cfgs.append(({'server_addr':'',
'test_udp': True,
'packet_len':'',
'bw':'',
'timeout':'',
'tos':'',
'multicast_srv':False,
'port':54321 }, test_name, common_name, 2, False))
test_name = 'CB_Station_Start_Iperf'
common_name = '%sStart iperf client and send traffic to server'% (test_case_name)
test_cfgs.append(({'sta_tag':sta_tag,
'server_addr':target_ip_addr,
'test_udp': True,
'packet_len':'',
'bw':'',
'timeout':60,
'tos':'',
'multicast_srv':False,
'port':54321 }, test_name, common_name, 2, False))
test_name = 'CB_Server_Stop_Iperf'
common_name = '%sStop iperf server on linux PC'% (test_case_name)
test_cfgs.append(({}, test_name, common_name, 2, False))
test_name = 'CB_Station_Stop_Iperf'
common_name = '%sStop iperf client on station'% (test_case_name)
test_cfgs.append(({'sta_tag':sta_tag}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Verify_Application_Visibility_Info'
common_name = '%sVerify application info in Monitor Clients page'% (test_case_name)
test_cfgs.append(({'application_description':'port_mapping_open_none',}, test_name, common_name, 2, False))
test_name = 'CB_Station_Remove_All_Wlans'
common_name = '%sRemove all WlANs from station'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag}, test_name, common_name, 2, True))
test_name = 'CB_ZD_Remove_All_Wlans'
common_name = '%sRemove all the WLANs from ZD'% (test_case_name)
test_cfgs.append(({}, test_name, common_name, 2, True))
test_name = 'CB_ZD_CLI_Del_Port_Mapping_Policy'
common_name = '%s Delete all port mapping policies.'% (test_case_name)
test_params = {}
test_cfgs.append((test_params,test_name, common_name, 2, True))
#testcase 3
test_case_name = '[denial_policy_open_standard%s]'%case_name_suffix
denial_policy_cfg = {'policy_description': 'test_app_denial_policy',
'policy_name': 'test_app_denial_policy',
'rules': [{'application': 'Port', 'rule_description': 80, 'rule_id': 1},
{'application': 'HTTP hostname', 'rule_description': 'www.example.net', 'rule_id': 2}]}
test_name = 'CB_ZD_CLI_Add_App_Denial_Policy'
common_name = '%s Add a denial policy.'% (test_case_name)
test_params = {'denial_policy_cfg':[denial_policy_cfg],}
test_cfgs.append((test_params,test_name, common_name, 1, False))
test_name = 'CB_ZD_Create_Wlan'
common_name = '%sCreate WLAN on ZD'% (test_case_name)
test_cfgs.append(({'wlan_cfg_list':[wlan_cfg],
'enable_wlan_on_default_wlan_group': True,
}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Associate_Station_1'
common_name = '%sAssociate the station to the WLAN'% (test_case_name)
test_cfgs.append(({'wlan_cfg': wlan_cfg,
'sta_tag': sta_tag}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Get_Station_Wifi_Addr_1'
common_name = '%sGet WiFi address of the station'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Verify_Station_Info_V2'
common_name = '%sVerify client information Authorized status in ZD'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag,
'ap_tag': ap_tag,
'status': 'Authorized',
'wlan_cfg': wlan_cfg,
'radio_mode':sta_radio_mode,},
test_name, common_name, 2, False))
test_name = 'CB_Station_Ping_Dest_Is_Allowed'
common_name = '%sVerify station pinging to the server succeeds'%(test_case_name)
test_cfgs.append(({'sta_tag': sta_tag, 'dest_ip': target_addr_for_denial_policy,}, test_name, common_name, 2, False))
test_name = 'CB_Station_Connect_To_Server_Port'
common_name = "%sVerify station connecting to server's port succeeds"%(test_case_name)
test_cfgs.append(({'sta_tag': sta_tag, 'server_ip': '172.16.10.252','dest_port':80}, test_name, common_name, 2, False))
#edit wlan to enable denial policy
test_name = 'CB_ZD_Edit_Wlan'
common_name = '%sEdit wlan, to select a denial policy' % test_case_name
param_cfg = {'wlan_ssid': wlan_cfg['ssid'], 'new_wlan_cfg': {'application_denial_policy':'test_app_denial_policy'}}
test_cfgs.append((param_cfg,test_name, common_name, 2, False))
test_name = 'CB_Station_Ping_Dest_Is_Denied'
common_name = '%sVerify station pinging to the server fails'%(test_case_name)
test_cfgs.append(({'sta_tag': sta_tag, 'dest_ip': target_addr_for_denial_policy,}, test_name, common_name, 2, False))
test_name = 'CB_Station_Connect_To_Server_Port'
common_name = "%sVerify station connecting to server's port fails"%(test_case_name)
test_cfgs.append(({'sta_tag': sta_tag, 'server_ip': '172.16.10.252','dest_port':80,'negative':True}, test_name, common_name, 2, False))
test_name = 'CB_Station_Remove_All_Wlans'
common_name = '%sRemove all WlANs from station'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag}, test_name, common_name, 2, True))
test_name = 'CB_ZD_Remove_All_Wlans'
common_name = '%sRemove all the WLANs from ZD'% (test_case_name)
test_cfgs.append(({}, test_name, common_name, 2, True))
test_name = 'CB_ZD_CLI_Del_App_Denial_Policy'
common_name = '%s Delete all denial policies.'% (test_case_name)
test_params = {}
test_cfgs.append((test_params,test_name, common_name, 2, True))
#clean_up
test_name = 'CB_ZD_Config_AP_Radio'
common_name = 'Config All APs Radio - Enable WLAN Service'
test_params = {'cfg_type': 'teardown',
'all_ap_mac_list': cfg['all_ap_mac_list']}
test_cfgs.append((test_params, test_name, common_name, 0, True))
test_name = 'CB_Station_Remove_All_Wlans'
common_name = 'Remove all WlANs from station for the next test'
test_cfgs.append(({'sta_tag': sta_tag}, test_name, common_name, 0, True))
test_name = 'CB_ZD_CLI_Application_Visibility_Init_Env'
common_name = 'Try to delete all application visibility rules for next test.'
test_params = {}
test_cfgs.append((test_params, test_name, common_name, 0, True))
return test_cfgs
def check_max_length(test_cfgs):
for test_params, testname, common_name, exc_level, is_cleanup in test_cfgs:
if len(common_name) > 120:
raise Exception('common_name[%s] in case [%s] is too long, more than 120 characters' % (common_name, testname))
def check_duplicated_common_name(test_cfgs):
common_name_list = []
duplicate_flag = False
for test_params, testname, common_name, exc_level, is_cleanup in test_cfgs:
if common_name in common_name_list:
duplicate_flag = False
print '####################'
print common_name
print '####################'
else:
common_name_list.append(common_name)
return duplicate_flag
def createTestSuite(**kwargs):
ts_cfg = dict(interactive_mode=True,
station=(0, "g"),
targetap=False,
testsuite_name="",
)
ts_cfg.update(kwargs)
mtb = testsuite.getMeshTestbed(**kwargs)
tbcfg = testsuite.getTestbedConfig(mtb)
sta_ip_list = tbcfg['sta_ip_list']
ap_sym_dict = tbcfg['ap_sym_dict']
all_ap_mac_list = tbcfg['ap_mac_list']
if ts_cfg["interactive_mode"]:
print '\nOnly the following AP models support application visibility:'
print ' 1.ZF-7762-AC,ZF-7762-S-AC'
print ' 2.ZF-7782,ZF-7782-s,ZF-7782-n,ZF-7782-e'
print ' 3.ZF-7982'
print ' 4.sc8800-s-ac,sc8800-s'
print ' 5.ZF-7055'
print ' 6.ZF-7352'
print ' 7.ZF-7372,ZF-7372-e'
print ' 8.ZF-7781-m,ZF-7781cm'
active_ap_list = testsuite.getActiveAp(ap_sym_dict)
target_sta = testsuite.getTargetStation(sta_ip_list, "Pick wireless station: ")
target_sta_radio = testsuite.get_target_sta_radio()
else:
target_sta = sta_ip_list[ts_cfg["station"][0]]
target_sta_radio = ts_cfg["station"][1]
if kwargs["targetap"]:
active_ap_list = sorted(ap_sym_dict.keys())
active_ap = active_ap_list[0]
tcfg = {
'target_station':'%s' % target_sta,
'radio_mode': target_sta_radio,
'active_ap':active_ap,
'all_ap_mac_list': all_ap_mac_list,
}
tunnel_mode = raw_input("\n\
Do you want to enable tunnel to do test?\n\
1. Yes\n\
2. No\n\
Default selection is 2.Input your choice:")
if tunnel_mode != '1':
enable_tunnel = False
else: enable_tunnel = True
test_cfgs = define_test_cfg(tcfg,enable_tunnel)
check_max_length(test_cfgs)
check_duplicated_common_name(test_cfgs)
ts_suffix = ''
if enable_tunnel: ts_suffix = ' - tunneled'
if ts_cfg["testsuite_name"]:
ts_name = ts_cfg["testsuite_name"]
else:
ts_name = "Application_Visibility - Standard - OPEN%s"%ts_suffix
ts = testsuite.get_testsuite(ts_name, "Application_Visibility - Standard - OPEN%s"%ts_suffix , combotest=True)
test_order = 1
test_added = 0
for test_params, testname, common_name, exc_level, is_cleanup in test_cfgs:
if testsuite.addTestCase(ts, testname, common_name, test_params, test_order, exc_level, is_cleanup) > 0:
test_added += 1
test_order += 1
print "Add test case with test name: %s\n\t\common name: %s" % (testname, common_name)
print "\n-- Summary: added %d test cases into test suite '%s'" % (test_added, ts.name)
if __name__ == "__main__":
_dict = kwlist.as_dict(sys.argv[1:])
createTestSuite(**_dict)
| [
"tan@xx.com"
] | tan@xx.com |
2e5e3e67e1ff2834fbde5eafac226a339de6d857 | a471266944a3227f458234388ad7a5fa091a0f89 | /rsg_selct.py | 4f68c558fce1c50a69b7e47ca88e80aa7c78c6d5 | [] | no_license | rubab1/RSG-Select | 51fff92f3b963db9bce1b041f9539ee3510e221b | e652b0d215cc1f70251ac9cdca293659f5715474 | refs/heads/master | 2020-03-23T08:52:46.868439 | 2018-07-17T22:51:39 | 2018-07-17T22:51:39 | 141,353,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,750 | py | import numpy as np
from astropy.io import fits
from astropy.io import ascii
from astropy.table import Table
from scipy.spatial import cKDTree
rad = 0.5 # matching radii, arcsec
err = 0.3 # mag error, Vega
crd = 0.5 # crowding effect, Vega
def DoAll(infile='14786_N6846-SN2017.gst.fits',outfile='targets1'):
ref = ascii.read('n6946_CAT')
tol = rad/3600
filt = ['F606W','F814W']
ra2,dec2,mm1,ee1,dd1,mm2,ee2,dd2 = ref['col1'],ref['col2'],ref['col3'],ref['col4'],ref['col5'],ref['col6'],ref['col7'],ref['col8']
t = (ee1<err)&(ee2<err)&(dd1<crd)&(dd2<crd)
ra2,dec2,mm1,ee1,mm2,ee2=ra2[t],dec2[t],mm1[t],ee1[t],mm2[t],ee2[t]
X,Y,RA,DEC,vega_mags,mag_errors,detector = read_phot_fits_table(infile,filt)
t = (((vega_mags[0]-vega_mags[1])>2)&(vega_mags[0]<29)&(vega_mags[1]<24.5))
ra1,dec1,mag1,mag2,err1,err2 = RA[t],DEC[t],vega_mags[0][t],vega_mags[1][t],mag_errors[0][t],mag_errors[1][t]
in1 = matchLists(tol,ra1,dec1,ra2,dec2)
ra1,dec1,mag1,mag2,err1,err2 = ra1[in1!=-1],dec1[in1!=-1],mag1[in1!=-1],mag2[in1!=-1],err1[in1!=-1],err2[in1!=-1]
mm1,mm2,ee1,ee2 = mm1[in1[in1!=-1]],mm2[in1[in1!=-1]],ee1[in1[in1!=-1]],ee2[in1[in1!=-1]]
return write_target_list(ra1,dec1,mag1,err1,mag2,err2,mm1,ee1,mm2,ee2,outfile)
def read_phot_fits_table(filename,filt):
photTable = fits.open(filename)
detector = photTable[0].header['CAMERA']
data = photTable[1].data; del photTable
vega_mags = [data[filt[0]+'_VEGA'], data[filt[1]+'_VEGA']]
mag_errors = [data[filt[0]+'_ERR'], data[filt[1]+'_ERR']]
X,Y,RA,DEC = data['X'],data['Y'],data['RA'],data['DEC']
return X,Y,RA,DEC,vega_mags,mag_errors,detector
''' Quick match; returns index of 2nd list coresponding to position in 1st '''
def matchLists(tol,x1,y1,x2,y2):
d1 = np.empty((x1.size, 2))
d2 = np.empty((x2.size, 2))
d1[:,0],d1[:,1] = x1,y1
d2[:,0],d2[:,1] = x2,y2
t = cKDTree(d2)
tmp, in1 = t.query(d1, distance_upper_bound=tol)
in1[in1==x2.size] = -1
return in1
def write_target_list(ra,dec,m1,err1,m2,err2,m3,err3,m4,err4,fileroot):
outfile = fileroot+'.txt'
tab = [ra,dec,m1,err1,m2,err2,m3,err3,m4,err4]
nms = ('RA','DEC','F606W','ER606','F814W','ER814','IRAC1','ERR36','IRAC2','ERR45')
fmt = {'RA':'%12.7f','DEC':'%12.7f','F606W':'%8.3f','ER606':'%8.3f',
'F814W':'%8.3f','ER814':'%8.3f','IRAC1':'%8.3f','ERR36':'%8.3f','IRAC2':'%8.3f','ERR45':'%8.3f'}
t = Table(tab, names=nms)
ascii.write(t, outfile, format='fixed_width', delimiter='', formats=fmt, overwrite=True)
return print('Wrote out: ', outfile)
###
DoAll('14786_N6846-SN2017.gst.fits','targets1')
DoAll('14786_N6946-111ne-26900.gst.fits','targets2')
DoAll('20180119_Vanisher.gst.fits','targets3')
| [
"rubab@uw.edu"
] | rubab@uw.edu |
ce99e765ea48053b039f968a4a801368104e5c6f | 485d62d8adb88925535fc57ec8eb127db5012767 | /venv/bin/wheel | dfde5beee5436d07a85e08a2ff772f82ec815212 | [] | no_license | MariomcgeeArt/Tweet-gen | f9995e1350413f23e1f32675d35a47bdf3cb81f3 | 619c6f7fb184b0458121193923757c866a64acaa | refs/heads/master | 2021-01-03T03:22:28.599007 | 2020-03-09T17:27:11 | 2020-03-09T17:27:11 | 239,900,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | #!/Users/makeschoolloaner/Desktop/devloper/ms/Tweet/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"mario.mcgee@students.makeschool.com"
] | mario.mcgee@students.makeschool.com | |
871284d24db3cfaf05336d9c7b5bd82940d854ca | 7eecd923b2b34d1888ca97d95af21ef0e4bb6d29 | /source/generator/multiplication.py | 03c2f79153cc04daad2c9b93144f81d0479e48c1 | [] | no_license | Az2Poivron/piwar | b641acd1a274949ec03fd6187ad3d3511c62f7c5 | f5015e9e9649713addbded2bc87f82ed51169a14 | refs/heads/main | 2023-05-03T10:07:42.295623 | 2021-05-30T16:46:59 | 2021-05-30T16:46:59 | 372,017,734 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | from random import randint as rdm
class Multiplication: #a * b = c
condition = "MULTIPLICATION_ENABLE"
def __init__(self,rule):
a_min = rule["MULTIPLICATION_a_min"]
a_max = rule["MULTIPLICATION_a_max"]
b_min = rule["MULTIPLICATION_b_min"]
b_max = rule["MULTIPLICATION_b_max"]
equa = rule["MULTIPLICATION_equa"]
a = rdm(a_min,a_max)
b = rdm(b_min,b_max)
c = a * b
L = [a,b,c]
if equa:
a_index = rdm(0,2)
else:
a_index = 2
self.answer = L[a_index]
L[a_index] = "?"
self.str = f"{L[0]} x {L[1]} = {L[2]}" | [
"«monnomctom@gmail.com»"
] | «monnomctom@gmail.com» |
5d00ab45b31431b87c4bc649952d35c2f2a94f9a | 2b249f2be3a06b647c58ed09f14d1c0cc77b0309 | /cluster/pkg/swarmer.py | 059f3d7ad0f2e3b332823c5df2ff5527b94be16d | [] | permissive | Hrishi5/ACI-EnhancedEndpointTracker | 7dad677146ae4a26e1e2f212ad6e6eead92e3513 | a4de84c5fc00549e6539dbc1d8d927c74a704dcc | refs/heads/2.0 | 2020-04-01T18:18:24.119980 | 2018-12-14T06:17:15 | 2018-12-14T06:17:15 | 148,533,258 | 0 | 0 | MIT | 2018-09-12T19:46:19 | 2018-09-12T19:46:19 | null | UTF-8 | Python | false | false | 22,407 | py |
from .connection import Connection
from .lib import run_command
from .lib import pretty_print
import getpass
import json
import logging
import re
import time
# module level logging
logger = logging.getLogger(__name__)
class Swarmer(object):
def __init__(self, config, username=None, password=None):
# recevies instance of ClusterConfig
self.config = config
self.username = username
self.password = password
self.nodes = {}
self.node_id = None # local node-id
self.node_addr = None # local node-addr
self.node_socket = None # local node-addr+port for registering worker nodes
self.token = None # registration token
# reindex config.nodes with string id's to match against string labels
config_nodes = {}
for nid in self.config.nodes:
config_nodes["%s" % nid] = self.config.nodes[nid]
self.config.nodes = config_nodes
def get_credentials(self):
# prompt user for username/password if not previously provided
while self.username is None or len(self.username)==0:
self.username = raw_input("Enter ssh username: ").strip()
while self.password is None or len(self.password)==0:
self.password = getpass.getpass("Enter ssh password: ").strip()
def get_connection(self, hostname):
# return ssh connection object to provided hostname, raise exception on error
logger.debug("get connection to %s", hostname)
self.get_credentials()
c = Connection(hostname)
c.username = self.username
c.password = self.password
c.protocol = "ssh"
c.port = 22
c.prompt = "[#>\$] *$"
if not c.login(max_attempts=3):
raise Exception("failed to connect to node %s@%s" % (self.username, hostname))
return c
def init_swarm(self):
# determine the swarm status of this node. If in a swarm but not the manager, raise an error
# If in a swarm AND the manager, then validate status matches config.
# If in not in a swarm, then assume this non-initialized system
js = self.get_swarm_info()
self.node_id = js["NodeID"]
self.node_addr = js["NodeAddr"]
managers = js["RemoteManagers"]
manager_addr = None
if len(self.node_id) > 0:
logger.debug("node %s is part of an existing swarm", self.node_addr)
self.set_node_socket(managers)
if self.node_socket is None:
err_msg = "This node is not a docker swarm manager. "
err_msg+= "Please execute on the node-1"
raise Exception(err_msg)
else:
# need to initialize this node as a swarm master
logger.info("initializing swarm master")
if not run_command("docker swarm init"):
raise Exception("failed to initialize node as swarm master")
# get new swarm info
js = self.get_swarm_info()
self.node_id = js["NodeID"]
self.node_addr = js["NodeAddr"]
managers = js["RemoteManagers"]
self.set_node_socket(managers)
if self.node_socket is None:
raise Exception("failed to init swarm manager, no Addr found in RemoteManagers")
# validated that swarm is initialized and we're executing on a manager node. Need to get
# token for deploying to new workers
token = run_command("docker swarm join-token worker -q")
if token is None:
raise Exception("failed to get swarm token from manager")
self.token = token.strip()
logger.debug("swarm token: %s", self.token)
# get list of current nodes IDs
self.get_nodes()
lnode = self.nodes.get(self.node_id, None)
if lnode is None:
raise Exception("unable to find local id %s in docker nodes", self.node_id)
# check label for current node is '1', if not add it
node_label = lnode.labels.get("node", None)
if node_label is None:
logger.debug("adding label '1' to local node")
cmd = "docker node update --label-add node=1 %s" % self.node_id
if run_command(cmd) is None:
raise Exception("failed to add docker node label node=1 to %s" % self.node_id)
lnode.labels["node"] = "1"
elif "%s"%node_label != "1":
err_msg = "This node(%s) has node-id set to %s. Please run on node-1" % (
self.node_id, node_label)
raise Exception(err_msg)
else:
logger.debug("node(%s) already assigned with label 1", self.node_id)
# index nodes by addr and label id, raise error on duplicate
index_addr = {}
index_label = {}
for nid in self.nodes:
n = self.nodes[nid]
if n.addr in index_addr:
raise Exception("duplicate docker node address: %s between %s and %s" % (n.addr,
index_addr[n.addr].node_id, nid))
node_label = n.labels.get("node", None)
if node_label is None:
# existing node without a label should not exists, we could try to fix it here but
# that's a bit out of scope. Will force user to manually fix it for now...
err_msg = "Node(%s) exists within swarm but does not have a label. " % nid
err_msg+= "Manually add the appropriate id label via:\n"
err_msg+= " docker node update --label-add node=<id> %s" % nid
raise Exception(err_msg)
node_label = "%s" % node_label
if node_label in index_label:
raise Exception("duplicate docker label node=%s between %s and %s" % (node_label,
index_label[node_label].node_id, nid))
index_addr[n.addr] = n
index_label[node_label] = n
logger.debug("index_label: %s", index_label)
# validate each node in the config or add it if missing
for node_label in sorted(self.config.nodes):
# already validate we're on node-id 1, never need to add 1 as worker
if node_label == "1": continue
hostname = self.config.nodes[node_label]["hostname"]
if node_label not in index_label:
swarm_node_id = self.add_worker(hostname, node_label)
cmd = "docker node update --label-add node=%s %s" % (node_label, swarm_node_id)
if run_command(cmd) is None:
raise Exception("failed to add docker node label node=%s to %s" % (node_label,
swarm_node_id))
logger.info("docker cluster initialized with %s node(s)", len(self.config.nodes))
def add_worker(self, hostname, nid):
""" attempt to connect to remote node and add to docker swarm """
# prompt user for credentials here if not set...
logger.info("Adding worker to cluster (id:%s, hostname:%s)", nid, hostname)
c = self.get_connection(hostname)
cmd = "docker swarm join --token %s %s" % (self.token, self.node_socket)
ret = c.cmd(cmd, timeout=60)
if ret != "prompt":
raise Exception("failed to add worker(%s) %s: %s" % (nid, hostname, ret))
if not re.search("This node joined a swarm", c.output):
raise Exception("failed to add worker(%s) %s: %s" % (nid, hostname, c.output))
# hopefully node was added, grab the NodeID from the swarm and then make sure it is seen
# on the master node (over ssh so outputs contain prompt and full command)
cmd = "docker info --format '{{.Swarm.NodeID}}'"
ret = c.cmd(cmd)
if ret != "prompt":
raise Exception("failed to determine Swarm.NodeID for worker(%s) %s" % (nid, hostname))
for l in c.output.split("\n"):
r1 = re.search("^(?P<node_id>[a-zA-Z0-9]{25})$", l.strip())
if r1 is not None:
logger.debug("Swarm.NodeID %s for worker(%s) %s", r1.group("node_id"),nid,hostname)
return r1.group("node_id")
raise Exception("unable to extract Swarm.NodeID for new worker(%s) %s"% (nid, hostname))
def set_node_socket(self, managers):
""" from docker swarm RemoteManagers list, find the socket connection (Addr) for the
provided node_id. Return None on error
"""
self.node_socket = None
if managers is not None:
for m in managers:
if "NodeID" in m and "Addr" in m and m["NodeID"] == self.node_id:
logger.debug("node %s matches manager %s", self.node_id, m)
self.node_socket = m["Addr"]
return
logger.debug("node %s not in RemoteManagers list", self.node_id)
def get_swarm_info(self):
""" get and validate swarm info from 'docker info' command
return dict {
"NodeID": "",
"NodeAddr": "",
"RemoteManagers": "",
}
"""
# get/validate swarm info from 'docker info' command. Return
info = run_command("docker info --format '{{json .}}'")
if info is None:
raise Exception("failed to get docker info, is docker installed?")
js = json.loads(info)
logger.debug("local node docker info:%s", pretty_print(js))
if "Swarm" not in js or "NodeID" not in js["Swarm"] or "NodeAddr" not in js["Swarm"] or \
"RemoteManagers" not in js["Swarm"]:
version = js.get("ServerVersion", "n/a")
raise Exception("no Swarm info, unsupported docker version: %s" % version)
return {
"NodeID": js["Swarm"]["NodeID"],
"NodeAddr": js["Swarm"]["NodeAddr"],
"RemoteManagers": js["Swarm"]["RemoteManagers"],
}
def get_nodes(self):
""" read docker nodes and update self.nodes """
logger.debug("get docker node info")
lines = run_command("docker node ls --format '{{json .}}'")
if lines is None:
raise Exception("unable to get docker node info")
for l in lines.split("\n"):
if len(l) == 0: continue
try:
logger.debug("node: %s", l)
node = DockerNode(**json.loads(l))
if node.node_id is not None:
self.nodes[node.node_id] = node
logger.debug("new node: %s", node)
except ValueError as e:
logger.debug("failed to decode node: '%s'", l)
def deploy_service(self):
""" deploy docker service referencing config file and verify everything is running """
logger.info("deploying app services, please wait...")
cmd = "docker stack deploy -c %s %s" % (self.config.compose_file, self.config.app_name)
if run_command(cmd) is None:
raise Exception("failed to deploy stack")
check_count = 8
check_interval = 15
all_services_running = True
while check_count > 0:
check_count-= 1
all_services_running = True
# check that all the deployed services have at least one replica up
cmd = "docker service ls --format '{{json .}}'"
out = run_command(cmd)
if out is None:
raise Exception("failed to validate services are running")
for l in out.split("\n"):
if len(l.strip()) == 0: continue
try:
js = json.loads(l)
if re.search("^%s_" % re.escape(self.config.app_name), js["Name"]):
replicas = re.search("(?P<c>[0-9]+)/(?P<t>[0-9]+)",js["Replicas"])
if replicas is not None:
if int(replicas.group("c")) < int(replicas.group("t")):
err_msg = "failed to deploy service %s (%s/%s)" % (js["Name"],
replicas.group("c"), replicas.group("t"))
# if this is last check interation, raise an error
if check_count <= 0: raise Exception(err_msg)
all_services_running = False
logger.debug(err_msg)
logger.debug("service %s success: %s", js["Name"], js["Replicas"])
else:
logger.debug("skipping check for service %s", js["Name"])
except (ValueError,KeyError) as e:
logger.warn("failed to parse docker service line: %s", l)
if not all_services_running:
logger.debug("one or more services pending, re-check in %s seconds", check_interval)
time.sleep(check_interval)
else: break
logger.info("app services deployed")
logger.debug("pausing for 15 seconds to give all services time to actually start")
time.sleep(15)
def init_db(self):
""" need to initialize all replication sets for mongo db based on user config
ssh to intended replica primary (replica 0) and initialize replica
"""
self.init_db_cfg()
# pause for 15 seconds to ensure that replica set is ready
logger.debug("pausing for 15 seconds to ensure replica is up")
time.sleep(15)
self.init_db_shards()
def init_db_cfg(self):
""" initialize cfg server replica set """
logger.info("initialize db config replica set")
# find all 'db_cfg' service along with replica '0' info
rs = {"configsvr": True, "members":[]}
db_port = None
replica_0_node = None
replica_0_name = None
for svc_name in self.config.services:
svc = self.config.services[svc_name]
if svc.service_type == "db_cfg":
if "_id" not in rs: rs["_id"] = svc.replica
if svc.replica_number is None or svc.port_number is None:
raise Exception("service has invalid replica or port number: %s" % svc)
host = self.config.nodes.get("%s" % svc.node, None)
if host is None:
raise Exception("failed to determine host for service: %s" % svc)
member = {
"_id": svc.replica_number,
"host": "%s:%s" % (svc_name, svc.port_number)
}
if svc.replica_number == 0:
replica_0_node = host
replica_0_name = svc_name
db_port = svc.port_number
member["priority"] = 2
else:
member["priority"] = 1
rs["members"].append(member)
if replica_0_node is None or replica_0_name is None:
raise Exception("failed to determine replica 0 db configsrv")
cmd = 'docker exec -it '
cmd+= '$(docker ps -qf label=com.docker.swarm.service.name=%s_%s) ' % (
self.config.app_name, replica_0_name)
cmd+= 'mongo localhost:%s --eval \'rs.initiate(%s)\'' % (db_port, json.dumps(rs))
logger.debug("initiate cfg replication set cmd: %s", cmd)
# cfg server is statically pinned to node-1
if "%s" % replica_0_node["id"] == "1":
# hard to parse return json since there's other non-json characters printed so we'll
# just search for "ok" : 1
ret = run_command(cmd)
if ret is None or not re.search("['\"]ok['\"] *: *1 *", ret):
logger.warn("rs.initiate may not have completed successfully, cmd:%s\nresult:\n%s",
cmd, ret)
else:
raise Exception("expected cfg server replica 0 to be on node-1, currently on %s" % (
replica_0_node))
def init_db_shards(self):
""" initialize each shard replication set on replica-0 node owner """
logger.info("initialize db shards")
# get all service type db_sh and organize into replication sets
shards = {} # indexed by shared replica-name, contains node-0 (id and hostname) along
# with 'rs' which is initiate dict
for svc_name in self.config.services:
svc = self.config.services[svc_name]
if svc.service_type == "db_sh":
if svc.replica_number is None or svc.port_number is None:
raise Exception("service has invalid replica or port number: %s" % svc)
if svc.replica not in shards:
shards[svc.replica] = {
"node-0": None,
"svc_name": None,
"svc_port": None,
"rs": {"_id": svc.replica, "members":[]}
}
host = self.config.nodes.get("%s" % svc.node, None)
if host is None:
raise Exception("failed to determine host for service: %s" % svc)
member = {
"_id": svc.replica_number,
"host": "%s:%s" % (svc_name, svc.port_number)
}
if svc.replica_number == 0:
shards[svc.replica]["node-0"] = host
shards[svc.replica]["svc_name"] = svc.name
shards[svc.replica]["svc_port"] = svc.port_number
member["priority"] = 2
else:
member["priority"] = 1
shards[svc.replica]["rs"]["members"].append(member)
for shard_name in shards:
rs = shards[shard_name]["rs"]
node_0 = shards[shard_name]["node-0"]
if node_0 is None:
raise Exception("failed to find replica 0 node for shard %s" % shard_name)
cmd = 'docker exec -it '
cmd+= '$(docker ps -qf label=com.docker.swarm.service.name=%s_%s) ' % (
self.config.app_name, shards[shard_name]["svc_name"])
cmd+= 'mongo localhost:%s --eval \'rs.initiate(%s)\'' % (shards[shard_name]["svc_port"],
json.dumps(rs))
logger.debug("command on %s: %s", node_0["id"], cmd)
if "%s" % node_0["id"] == "1":
# command is executed on local host
ret = run_command(cmd)
if ret is None or not re.search("['\"]ok['\"] *: *1 *", ret):
err_msg="rs.initiate may not have completed successfully for shard %s"%shard_name
err_msg+= ", node (id:%s, hostname:%s)" % (node_0["id"], node_0["hostname"])
err_msg+= "\ncmd: %s\nresult: %s" % (cmd, ret)
logger.warn(err_msg)
else:
c = self.get_connection(node_0["hostname"])
ret = c.cmd(cmd)
if ret != "prompt" or not re.search("['\"]ok['\"] *: *1 *", c.output):
err_msg="rs.initiate may not have completed successfully for shard %s"%shard_name
err_msg+= ", (node id: %s, hostname: %s)" % (node_0["id"], node_0["hostname"])
err_msg+= "\ncmd: %s\nresult: %s" % (cmd, "\n".join(c.output.split("\n")[:-1]))
logger.warn(err_msg)
# pause for 15 seconds to ensure that replica set is ready
logger.debug("pausing for 15 seconds to ensure all replica is up")
time.sleep(15)
# add each shard to mongo-router - note, there's an instance of mongos with service name
# 'db' on all nodes in the cluster so this command is always locally executed
for shard_name in shards:
svc_name = shards[shard_name]["svc_name"]
svc_port = shards[shard_name]["svc_port"]
cmd = 'docker exec -it '
cmd+= '$(docker ps -qf label=com.docker.swarm.service.name=%s_db) '%self.config.app_name
cmd+= 'mongo localhost:%s --eval \'sh.addShard("%s/%s:%s")\'' % (
self.config.mongos_port, shard_name, svc_name, svc_port)
ret = run_command(cmd)
if ret is None or not re.search("['\"]ok['\"] *: *1 *", ret):
err_msg="sh.addShard may not have completed successfully for shard %s"%shard_name
err_msg+= "\ncmd: %s\nresult: %s" % (cmd, ret)
logger.warn(err_msg)
class DockerNode(object):
def __init__(self, **kwargs):
self.labels = {}
self.role = None
self.addr = None
self.node_id = kwargs.get("ID", None)
self.hostname = kwargs.get("Hostname", None)
self.availability = kwargs.get("Availability", None)
self.status = kwargs.get("Status", None)
if self.node_id is not None:
inspect = run_command("docker node inspect %s --format '{{json .}}'" % self.node_id)
if inspect is not None:
try:
logger.debug("inspect: %s", inspect)
js = json.loads(inspect)
if "Status" in js:
if "Addr" in js["Status"]:
self.addr = js["Status"]["Addr"]
if "State" in js["Status"]:
self.status = js["Status"]["State"]
if "Spec" in js:
if "Availability" in js["Spec"]:
self.availability = js["Spec"]["Availability"]
if "Role" in js["Spec"]:
self.role = js["Spec"]["Role"]
if "Labels" in js["Spec"]:
if type(js["Spec"]["Labels"]) is not dict:
logger.debug("invalid Labels for %s: %s", self.node_id, js["Spec"])
else:
self.labels = js["Spec"]["Labels"]
except ValueError as e:
logger.debug("failed to decode inspect(%s): %s", self.node_id, inspect)
def __repr__(self):
return "id:%s, role:%s, addr:%s, status:%s, avail:%s, labels:%s" % (
self.node_id, self.role, self.addr, self.status, self.availability, self.labels
)
| [
"agossett@cisco.com"
] | agossett@cisco.com |
c5d385b41cade2187400881bf390d7ffe5eb5c55 | bd867af5245366ee0abfd0f659fcb42170fff8ca | /hackerRank/algorithms/DiagonalDifference/diagonal_difference.py | 954cf6bd5cfc0ee3735dcd2733472402344f7d21 | [] | no_license | kruart/coding_challenges | 04736a6b66da813fd973e7a57aa084bbdab31183 | 395ae60ab392e49bb5bc2f0a4eef1dfd232899bb | refs/heads/master | 2021-06-16T08:51:21.815334 | 2019-11-07T08:39:13 | 2019-11-07T08:39:13 | 153,890,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | # https://www.hackerrank.com/challenges/diagonal-difference/problem
def diagonal_difference(arr):
return abs(sum([arr[i][i] - arr[i][len(arr)-i-1] for i in range(len(arr))]))
def main():
matrix1 = [
[1, 2, 3],
[4, 5, 6],
[9, 8, 9]
]
matrix2 = [
[11, 2, 4],
[4, 5, 6],
[10, 8, -12]
]
print(diagonal_difference(matrix1)) # 15 - 17 = 2
print(diagonal_difference(matrix2)) # 4 - 19 = 15
if __name__ == '__main__':
main()
| [
"weoz@ukr.net"
] | weoz@ukr.net |
7ab2db44c45f6cfdfecb3f313cf0817d5d141223 | b04eb4cec627b40c9cf970d25206d41367125041 | /learn-flask/flask_vs_wsgi/wsgi_vs_flask/wsgi/why_not_wsgi.py | ca4dee3bbd0d60deddb0c1935622f7eefd462a30 | [] | no_license | zhangkexia/web-learning | 793594da4257d461b3c9cc610b97718c344685fb | 72ee6174623f72a9d9eb1623fdf2620122dd72ef | refs/heads/master | 2023-06-24T20:27:45.981714 | 2021-07-28T05:39:07 | 2021-07-28T05:39:07 | 390,198,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | def application(environ, start_response):
method = environ['REQUEST_METHOD']
path = environ['PATH_INFO']
if method == 'GET' and path == '/':
return handle_home(environ, start_response)
if method == 'POST' and path == '/signin':
return handle_signin(environ, start_response)
if ....
#so many functions to handle different urls of different request types
| [
"yueyue_freedom@163.cm"
] | yueyue_freedom@163.cm |
9a7f3a9af93bc6ffe45fc6bedb3bdd29cc8e812e | 36f685d0bd257fd9addde777a16ce162386b303b | /easyautoml/utils.py | 48aac6fad2481ef00a4b859a6b08bf9d523dbf4b | [
"MIT"
] | permissive | almandsky/easyautoml | 5af61bede76cfde13b8dc6e4dc1e2e078595bf92 | 09a28109a8b6afe9b4791828313e374176af9a80 | refs/heads/master | 2020-03-27T15:24:07.845306 | 2018-09-27T23:34:59 | 2018-09-27T23:34:59 | 146,715,752 | 3 | 3 | MIT | 2018-09-27T23:35:00 | 2018-08-30T07:56:53 | Python | UTF-8 | Python | false | false | 1,772 | py | from easyautoml.ftutils import get_train_data, get_test_data
from easyautoml.tpotutils import tpot_train, tpot_score
# GLOBAL CONFIG
TPOT_MAX_TIME_MINS = 1
TPOT_MAX_EVAL_TIME_MINS = 0.04
TPOT_POPULATION_SIZE = 40
N_JOBS=-1
def tpot_with_ft(
project,
train_file_name,
test_file_name,
submit_file,
export_file,
prediction_target,
prediction_key,
prediction_type,
variable_types={},
scoring_func=None,
predictProba=False,
predictInt=False,
getdummies=False,
drop_train_columns=None,
drop_score_columns=None):
X, y = get_train_data(
project=project,
train_file=train_file_name,
prediction_key=prediction_key,
prediction_target=prediction_target,
variable_types=variable_types,
drop_columns=drop_train_columns)
tpot_instance = tpot_train(
project=project,
X=X,
y=y,
prediction_type=prediction_type,
export_file=export_file,
scoring_func=scoring_func,
max_time_mins=TPOT_MAX_TIME_MINS,
n_jobs=N_JOBS,
population_size=TPOT_POPULATION_SIZE)
X_test, index_column = get_test_data(
project=project,
testfile=test_file_name,
prediction_key=prediction_key,
prediction_target=prediction_target,
variable_types=variable_types,
drop_columns=drop_score_columns)
tpot_score(
tpot=tpot_instance,
project=project,
X_test=X_test,
index_column = index_column,
prediction_target=prediction_target,
prediction_key=prediction_key,
submit_file=submit_file,
predictProba=predictProba,
predictInt=predictInt,
getdummies=getdummies) | [
"sky.chen@salesforce.com"
] | sky.chen@salesforce.com |
ae99363612df9095c5b8194a8ad42254c93b94c0 | 981ead90e27f974a0b567a543df783c0677933bf | /Utilities/util.py | 9e8fce17a4a37344bc9437fde9bd3c6c8f7bbf5b | [] | no_license | TachunLin/pytestAllure | 20360979c3921295cc0890ccbca5cf8116811b02 | 5edf7179b4beee4ea8d2ff10ebff9d0f3733f6df | refs/heads/master | 2020-03-17T06:47:42.604130 | 2018-05-19T13:02:16 | 2018-05-19T13:02:16 | 133,369,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,130 | py |
import time
import traceback
import random, string
import Utilities.custom_logger as cl
import logging
from traceback import print_stack
import datetime
import os
import pytest
import allure
from allure.constants import AttachmentType
from helpers import ensure_dir
# from Utilities.custom_logger import customLogger
# import Utilities.custom_logger as cl
# log = cl.customLogger(logging.INFO)
class Util(object):
logging.getLogger().setLevel(logging.INFO)
def sleep(self, sec, info=""):
"""
Put the program to wait for the specified amount of time
"""
if info is not None:
logging.info("Wait :: '" + str(sec) + "' seconds for " + info)
try:
time.sleep(sec)
except InterruptedError:
traceback.print_stack()
def getAlphaNumeric(self, length, type='letters'):
"""
Get random string of characters
Parameters:
length: Length of string, number of characters string should have
type: Type of characters string should have. Default is letters
Provide lower/upper/digits for different types
"""
alpha_num = ''
if type == 'lower':
case = string.ascii_lowercase
elif type == 'upper':
case = string.ascii_uppercase
elif type == 'digits':
case = string.digits
elif type == 'mix':
case = string.ascii_letters + string.digits
else:
case = string.ascii_letters
return alpha_num.join(random.choice(case) for i in range(length))
def getUniqueName(self, charCount=10):
"""
Get a unique name
"""
return self.getAlphaNumeric(charCount, 'lower')
def getUniqueNameList(self, listSize=5, itemLength=None):
"""
Get a list of valid email ids
Parameters:
listSize: Number of names. Default is 5 names in a list
itemLength: It should be a list containing number of items equal to the listSize
This determines the length of the each item in the list -> [1, 2, 3, 4, 5]
"""
nameList = []
for i in range(0, listSize):
nameList.append(self.getUniqueName(itemLength[i]))
return nameList
def verifyTextContains(self, actualText, expectedText):
"""
Verify actual text contains expected text string
Parameters:
expectedList: Expected Text
actualList: Actual Text
"""
logging.info("Actual Text From Application Web UI --> :: " + actualText)
logging.info("Expected Text From Application Web UI --> :: " + expectedText)
if expectedText.lower() in actualText.lower():
logging.info("### VERIFICATION CONTAINS !!!")
return True
else:
logging.info("### VERIFICATION DOES NOT CONTAINS !!!")
return False
def verifyTextMatch(self, actualText, expectedText):
"""
Verify text match
Parameters:
expectedList: Expected Text
actualList: Actual Text
"""
logging.info("Actual Text From Application Web UI --> :: " + actualText)
logging.info("Expected Text From Application Web UI --> :: " + expectedText)
if actualText.lower() == expectedText.lower():
logging.info("### VERIFICATION MATCHED !!!")
return True
else:
logging.info("### VERIFICATION DOES NOT MATCHED !!!")
return False
def verifyListMatch(self, expectedList, actualList):
"""
Verify two list matches
Parameters:
expectedList: Expected List
actualList: Actual List
"""
return set(expectedList) == set(actualList)
def verifyListContains(self, expectedList, actualList):
"""
Verify actual list contains elements of expected list
Parameters:
expectedList: Expected List
actualList: Actual List
"""
length = len(expectedList)
for i in range(0, length):
if expectedList[i] not in actualList:
return False
else:
return True
def verifyCheckPoint(self, driver, actual, expectation, request, log):
if (actual == expectation):
with pytest.allure.step('Pass~ Actual result: {0} match expectation: {1}'.format(actual, expectation)):
# cl.customLogger(logging.INFO).info('Pass~ Actual result: {0} match expectation: {1}'.format(actual, expectation))
log.info('Pass~ Actual result: {0} match expectation: {1}'.format(actual, expectation))
# logging.info('Pass~ Actual result: {0} match expectation: {1}'.format(actual, expectation))
else:
with pytest.allure.step('Mismatched!! Actual result: {0} <-> expectation: {1}'.format(actual, expectation)):
allure.attach('Assertion error screenshot', open(Util.saveAssertScreenShot(Util(), driver, request.node.name), 'rb').read(), type=AttachmentType.PNG)
# cl.customLogger(logging.INFO).error('Mismatched!! Actual result: {0} <-> expectation: {1}'.format(actual, expectation))
log.error('Mismatched!! Actual result: {0} <-> expectation: {1}'.format(actual, expectation))
assert 0, 'Mismatched!! Actual result: {0} <-> expectation: {1}'.format(actual, expectation)
def saveExeScreenShot(self, driver, testcaseName):
current_day = (datetime.datetime.now().strftime("%Y_%m_%d_%H_%S"))
ensure_dir("Screenshot")
result_dir = ensure_dir(os.path.join("Screenshot", current_day))
# result_dir = os.path.join(os.path.dirname(__file__), "Screenshot", current_day)
# print(result_dir)
# ensure_dir(result_dir)
result_dir_test_run = result_dir
Execution_screen_shot_dir = os.path.join(result_dir_test_run, "Execution")
ensure_dir(Execution_screen_shot_dir)
# print(Execution_screen_shot_dir)
ExeImagePath = os.path.join(Execution_screen_shot_dir, current_day + '_' + testcaseName + ".png")
driver.save_screenshot(ExeImagePath)
# print(ExeImagePath)
return ExeImagePath
def saveAssertScreenShot(self, driver, testcaseName):
current_day = (datetime.datetime.now().strftime("%Y_%m_%d_%H_%S"))
ensure_dir("Screenshot")
result_dir = ensure_dir(os.path.join("Screenshot", current_day))
# result_dir = os.path.join(os.path.dirname(__file__), "Screenshot", current_day)
# print(result_dir)
# ensure_dir(result_dir)
result_dir_test_run = result_dir
Execution_screen_shot_dir = os.path.join(result_dir_test_run, "Assertion")
ensure_dir(Execution_screen_shot_dir)
# print(Execution_screen_shot_dir)
ExeImagePath = os.path.join(Execution_screen_shot_dir, current_day + '_' + testcaseName + ".png")
driver.save_screenshot(ExeImagePath)
# print(ExeImagePath)
return ExeImagePath | [
"imdagcool@gmail.com"
] | imdagcool@gmail.com |
4b613f02dfa0d2eb3da212f00e4eab60c32a56c1 | 265946224347cc202e56629c752dfa87808b6bb8 | /node_modules/mongoose/node_modules/bson/node_modules/bson-ext/build/config.gypi | 37d8ad379aebc56c134947389a15f2f782bc2677 | [
"Apache-2.0",
"MIT"
] | permissive | Dongs7/drawguess | e1bba9f719311ea7636bd1ce1d40d8e5cde449e6 | 9643a16eaf35067d64196eac981901e80f11ed1c | refs/heads/master | 2020-09-17T08:15:24.313909 | 2016-08-31T12:32:25 | 2016-08-31T12:32:25 | 67,034,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,067 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"icu_data_file": "icudt54l.dat",
"icu_data_in": "../../deps/icu/source/data/in/icudt54l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "./deps/icu",
"icu_small": "true",
"icu_ver_major": "54",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_mdb": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "x64",
"uv_library": "static_library",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/Users/vingosj/.node-gyp/0.12.4",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"fallback_to_build": "true",
"module": "/Users/vingosj/Documents/Web course/final/node_modules/mongoose/node_modules/bson/node_modules/bson-ext/build/Release/bson.node",
"module_name": "bson",
"module_path": "/Users/vingosj/Documents/Web course/final/node_modules/mongoose/node_modules/bson/node_modules/bson-ext/build/Release",
"save_dev": "",
"viewer": "man",
"browser": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"shell": "/bin/bash",
"init_author_url": "",
"shrinkwrap": "true",
"parseable": "",
"init_license": "ISC",
"if_present": "",
"sign_git_tag": "",
"init_author_email": "",
"cache_max": "Infinity",
"long": "",
"local_address": "",
"git_tag_version": "true",
"cert": "",
"registry": "https://registry.npmjs.org/",
"npat": "",
"fetch_retries": "2",
"versions": "",
"message": "%s",
"key": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"spin": "true",
"cache_lock_retries": "10",
"cafile": "",
"heading": "npm",
"proprietary_attribs": "true",
"fetch_retry_mintimeout": "10000",
"json": "",
"access": "",
"https_proxy": "",
"engine_strict": "",
"description": "true",
"userconfig": "/Users/vingosj/.npmrc",
"init_module": "/Users/vingosj/.npm-init.js",
"user": "",
"node_version": "0.12.4",
"editor": "vi",
"save": "true",
"tag": "latest",
"global": "",
"optional": "true",
"force": "",
"bin_links": "true",
"searchopts": "",
"depth": "Infinity",
"searchsort": "name",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"tag_version_prefix": "v",
"strict_ssl": "true",
"save_prefix": "^",
"ca": "",
"save_exact": "",
"group": "20",
"fetch_retry_factor": "10",
"dev": "",
"version": "",
"cache_lock_stale": "60000",
"cache_min": "10",
"searchexclude": "",
"cache": "/Users/vingosj/.npm",
"color": "true",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "npm/2.11.1 node/v0.12.4 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "0022",
"init_version": "1.0.0",
"scope": "",
"init_author_name": "",
"git": "git",
"unsafe_perm": "true",
"tmp": "/var/folders/mn/kz9gt1x9033f40dg4sky225w0000gn/T",
"onload_script": "",
"prefix": "/usr/local",
"link": ""
}
}
| [
"vingosheng@gmail.com"
] | vingosheng@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.