blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
25fa020e4125bd003f249e28f46273ee24735c42 | 59db1b21ccf1399b87e6a85f4c9e1122f1358d5f | /config.py | 4a9d832a9a4fa7f59b008e31944a1f17b488cea4 | [
"MIT"
] | permissive | nicholas-oduor/Pitches | bff97bdc470443be1520e0e8ce9145c2739bee89 | c85d02274085b0cff8a631e7328ba76a6f6060d1 | refs/heads/master | 2023-03-19T16:34:20.264716 | 2021-03-02T06:52:53 | 2021-03-02T06:52:53 | 342,582,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | import os
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringaaccessaccess:Access@localhost/pitches'
SQLALCHEMY_TRACK_MODIFICATIONS = False
UPLOADED_PHOTOS_DEST ='app/static/photos'
# email configurations
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
# simple mde configurations
SIMPLEMDE_JS_IIFE = True
SIMPLEMDE_USE_CDN = True
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class TestConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringaaccess:Access@localhost/pitches_test'
class DevConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringaaccess:Access@localhost/pitches'
DEBUG = True
config_options = {
'development':DevConfig,
'production':ProdConfig,
'test':TestConfig
} | [
"oduor5742@gmail.com"
] | oduor5742@gmail.com |
fc4ee268dd12250989e2ef7da583d9b11063e8d7 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano3249.py | 502f170e217ed9f5f27e6f70e1ca274e34def381 | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/120000/991CE2DB-8189-7D41-A40D-75A46C5E3FAE.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest3249.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"rsk146@scarletmail.rutgers.edu"
] | rsk146@scarletmail.rutgers.edu |
60e2b53b3aadeba4f702b9b196dfc641f99ac80e | 390e4ed75f9c1311541a027f2e4f665a92a6497b | /main.py | e9e0be9acc48b5d5c0bbc46a911991708c06859c | [
"MIT"
] | permissive | AlmirPaulo/songTabs_bot | f2fafd91aac1215e80307d4f73f7b14c992d9840 | 7edb743e5e420d7f1eb03cd88a4716cd9629ae96 | refs/heads/main | 2023-03-11T03:52:19.971403 | 2021-02-27T21:25:57 | 2021-02-27T21:25:57 | 331,465,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,166 | py | # Software: songTabs_bot
# Description: A Telegram bot that give guitar tabs to songs.
# Info Page: https://github.com/AlmirPaulo/songTabs_bot
# Author: Almir Paulo
# Github: https://github.com/AlmirPaulo/
# Website: https://almirpaulo.github.io/
# Email: ap.freelas@gmail.com
from telegram.ext import (CallbackContext,
Updater,
Filters,
CommandHandler,
MessageHandler,
CallbackQueryHandler
)
from telegram import (Update, InlineKeyboardButton, InlineKeyboardMarkup, ParseMode)
import side, os
#Variables
token = os.getenv('TOKEN')
def start(update: Update, context: CallbackContext) -> None:
update.message.reply_text('''
Welcome! \n Type the name of your favorite artist or song to get your tabs. ''')
def help(update: Update, context: CallbackContext) -> None:
update.message.reply_text('''Just type the name of your favorite artist or song. If something goes wrong, try in another way (just the first name of the artist, for instance). \n I will give you all the available tab options, you click, get the link and enjoy.''')
def get_tabs(update: Update, context: CallbackContext) -> None:
user_input = update.message.text
keyboard = []
for i in side.api_req(user_input):
keyboard.append([InlineKeyboardButton(f"{i['artist']['name']} - {i['title']}", callback_data=i['id'])])
reply = InlineKeyboardMarkup(keyboard)
update.message.reply_text('Choose your tab: ', reply_markup=reply)
def button(update: Update, context: CallbackContext) -> None:
query = update.callback_query
query.answer()
query.edit_message_text(text=f"<a href= 'http://www.songsterr.com/a/wa/song?id={query.data}'>Click here</a> to get your tabs.\n Enjoy!", parse_mode=ParseMode.HTML)
def main():
#Token
updater = Updater(token.strip())
#Commands
updater.dispatcher.add_handler(MessageHandler(Filters.text & ~Filters.command, get_tabs))
updater.dispatcher.add_handler(CallbackQueryHandler(button))
updater.dispatcher.add_handler(CommandHandler('start', start))
updater.dispatcher.add_handler(CommandHandler('help', help))
#Server
side.keep_alive()
#Main Loop
updater.start_polling()
updater.idle()
main() | [
"ap.freelas@gmail.com"
] | ap.freelas@gmail.com |
6f03172d6c29f39f9818691edaf721fb8837e220 | 4feaa1623cab4383e539442690ff68859452480d | /venv/lib/python3.6/locale.py | 544823210631e1db36a85335522be0659ce0618f | [] | no_license | brenda151295/pattern_recognition | 0cc49176e7a1493e12e7b18c8da1d1e77e82899c | e90690312e66704655dcd9c787757a3387ee4e79 | refs/heads/master | 2020-08-11T06:54:45.204755 | 2020-02-21T18:21:01 | 2020-02-21T18:21:01 | 214,512,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | /home/ademir/condaVenv/lib/python3.6/locale.py | [
"brenda151295@gmail.com"
] | brenda151295@gmail.com |
862acd6512fcd275ad31407a805f4042dc0f7f1a | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_3055.py | d3ddd7faf2f926afa7c10c6d1e6b93350df9ca44 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,838 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((603.214, 485.129, 451.385), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((541.749, 495.92, 477.899), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((467.324, 495.674, 512.203), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((556.434, 391.186, 540.366), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((283.035, 529.66, 565.734), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((557.795, 485.317, 460.79), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((558.563, 484.876, 459.829), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((533.437, 474.115, 453.383), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((515.033, 462.122, 435.931), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((532.546, 467.24, 414.512), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((551.905, 448.762, 405.545), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((542.507, 428.932, 387.838), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((581.824, 500.246, 457.243), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((497.997, 364.361, 315.965), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((339.062, 447.64, 409.007), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((339.062, 447.64, 409.007), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((364.487, 458.99, 413.049), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((377.071, 484.078, 416.14), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((404.662, 489.561, 420.931), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((432.096, 490.206, 429.903), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((459.506, 494.339, 438.802), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((487.333, 498.6, 446.999), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((350.813, 308.266, 339.256), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((643.588, 676.363, 555.92), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((470.839, 520.017, 471.876), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((470.839, 520.017, 471.876), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((483.756, 496.579, 482.551), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((486.953, 472.926, 498.824), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((484.173, 470.236, 527.431), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((605.482, 456.096, 502.002), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((363.791, 482.892, 558.77), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((557, 476.25, 491.55), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((557.088, 476.203, 491.618), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((544.415, 454.029, 481.026), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((548.922, 442.682, 455.892), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((542.255, 425.539, 434.671), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((523.034, 433.671, 415.706), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((514.935, 447.987, 392.582), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((536.035, 459.928, 377.787), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((525.707, 535.74, 416.368), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((549.35, 381.476, 341.443), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((524.859, 551.968, 459.592), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((523.107, 530.815, 476.035), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((516.553, 486.022, 511.863), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((509.81, 442.117, 548.141), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((587.843, 457.044, 563.839), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((442.375, 371.69, 584.536), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((574.845, 451.027, 504.557), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((561.907, 464.822, 525.322), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((540.488, 479.621, 535.588), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((519.876, 495.995, 545.797), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((496.536, 511.699, 548.556), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((473.732, 528.445, 546.627), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((533.626, 515.875, 493.685), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((412.856, 541.337, 599.637), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
702760dacc77f2e65aaed171a0998dfd7602a7b9 | 3cf21d46cc8213614f5edfe4ebb09df112e5bf44 | /tools/asset_aggregator/name_check.py | 788a6e9eadc9bae3dc73c59a19e06448f19fd6e7 | [
"BSD-3-Clause"
] | permissive | toro09/rotki | abbf06c63bf0191b8a381bad05534bf8541cf212 | 014e7e11521b81c89b5cd2b4082d197da26684ee | refs/heads/master | 2022-12-01T19:09:08.409018 | 2020-08-11T19:34:54 | 2020-08-11T19:34:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,883 | py | import sys
from typing import Any, Dict
from asset_aggregator.utils import choose_multiple
# For assets we support but no API has names for. We manually input the names then.
MANUALLY_CHECKED_NAMES = {
'ADADOWN': 'Binance leveraged token ADADOWN',
'ADAUP': 'Binance leveraged token ADAUP',
'BTCDOWN': 'Binance leveraged token BTCDOWN',
'BTCUP': 'Binance leveraged token BTCUP',
'ETHDOWN': 'Binance leveraged token ETHDOWN',
'ETHUP': 'Binance leveraged token ETHUP',
'LINKDOWN': 'Binance leveraged token LINKDOWN',
'LINKUP': 'Binance leveraged token LINKUP',
'AMIS': 'Amis',
'AVA-2': 'Avalon',
'BIDR': 'Binance IDR Stable Coin',
'BITCAR': 'BitCar',
'BMT': 'BMChain',
'BOU': 'Boulle',
'BTCE': 'EthereumBitcoin',
'BTE': 'BTEcoin',
'BTH': 'Bytether',
'BTR-2': 'Bither',
'CET-2': 'DICE Money',
'CFTY': 'Crafty',
'CNTM': 'Connectome',
'CTSI': 'Cartesi',
'CO2': 'Climatecoin',
'CRGO': 'CargoCoin',
'DEPO': 'Depository Network',
'DIP': 'Etherisc',
'DPP': 'Digital Assets Power Play',
'EMT': 'EasyMine',
'ENTRP': 'Hut34 Entropy Token',
'ETHB': 'EtherBTC',
'FIH': 'FidelityHouse',
'FLX': 'BitFlux',
'FORK-2': 'Gastro Advisor Token',
'HBD': 'Hive dollar',
'HIVE': 'Hive',
'HKG': 'Hacker Gold',
'ITM': 'Intimate',
'JOY': 'JOYSO',
'KUE': 'Kuende',
'LGR': 'Logarithm',
'LOON': 'Loon Network',
'ME': 'All.me',
'MILC': 'Micro Licensing Coin',
'MNT': 'Media Network Token',
'MRP': 'Money Rebel',
'MRV': 'Macroverse',
'OAK': 'Acorn Collective',
'OCC-2': 'Original Crypto Coin',
'REA': 'Realisto',
'REDC': 'Red Cab',
'RIPT': 'RiptideCoin',
'RNDR': 'Render Token',
'SKR': 'Skrilla Token',
'SKYM': 'Skymap',
'SPICE': 'Spice VC Token',
'SSH': 'StreamSpace',
'STP': 'StashPay',
'TAN': 'Taklimakan',
'TBT': 'T-Bot',
'TRXBEAR': ' 3X Short TRX Token',
'TRXBULL': ' 3X Long TRX Token',
'URB': 'Urbit Data',
'USDJ': 'USDJ',
'UTI': 'Unicorn Technology International',
'VENUS': 'VenusEnergy',
'WMK': 'WeMark',
'WLK': 'Wolk',
'ZIX': 'Zeex Token',
}
def name_check(
asset_symbol: str,
our_asset: Dict[str, Any],
our_data: Dict[str, Any],
paprika_data: Dict[str, Any],
cmc_data: Dict[str, Any],
) -> Dict[str, Any]:
"""Process the name from coin paprika and coinmarketcap
Then compare to our data and provide choices to clean up the data.
"""
our_name = our_asset.get('name', None)
if our_name:
# If we already got a name from manual input then keep it
return our_data
if asset_symbol in MANUALLY_CHECKED_NAMES:
our_data[asset_symbol]['name'] = MANUALLY_CHECKED_NAMES[asset_symbol]
return our_data
paprika_name = None
if paprika_data:
paprika_name = paprika_data['name']
cmc_name = None
if cmc_data:
cmc_name = cmc_data['name']
if not paprika_name and not cmc_name and asset_symbol:
print(f'No name in any external api for asset {asset_symbol}')
sys.exit(1)
if paprika_name == cmc_name:
# If both external APIs agree just use their name
our_data[asset_symbol]['name'] = paprika_name
return our_data
msg = (
f'For asset {asset_symbol} the possible names are: \n'
f'(1) Coinpaprika: {paprika_name}\n'
f'(2) Coinmarketcap: {cmc_name}\n'
f'Choose a number (1)-(2) to choose which name to use: '
)
choice = choose_multiple(msg, (1, 2))
if choice == 1:
name = paprika_name
elif choice == 2:
if not cmc_name:
print("Chose coinmarketcap's name but it's empty. Bailing ...")
sys.exit(1)
name = cmc_name
our_data[asset_symbol]['name'] = name
return our_data
| [
"lefteris@refu.co"
] | lefteris@refu.co |
a48a57e6ab08c5febbe9e00f996361d8d49911ae | 8543967dee7ba159ada7c1a4ab85281113421029 | /venv/Scripts/autopep8-script.py | 03359bb881ecdf5a4bda3fc8bf9a0497303bfd5d | [] | no_license | dcarpent19/dummy | 5929ece6698f03212116ab73f4aae0540b994a56 | 5e72cf65c2aee328bf271f8f9a13184dd3841eb7 | refs/heads/master | 2022-12-08T01:54:39.630886 | 2020-08-24T18:46:22 | 2020-08-24T18:46:22 | 284,261,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | #!c:\Users\dxc004\Projects\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'autopep8==1.5.4','console_scripts','autopep8'
__requires__ = 'autopep8==1.5.4'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('autopep8==1.5.4', 'console_scripts', 'autopep8')()
)
| [
"dcarpent@amfam.com"
] | dcarpent@amfam.com |
12a1f22f173ead5bc87c8c5dd9e34617c3d8c272 | 4ec311ae2a7d809c7eeaaaa67f8eec152a92b542 | /ManufactureDeepLearning/over-fitting/Dropout/about_dropout.py | d544bbef1d22e9330cd6b48bf80da49480b9a677 | [] | no_license | TakanoriHasebe/DeepLearning | 71baf7465f31a7539ad0ea9b313146c891cd8376 | 682006e77b9b90f034b41b65724e52266e7bc193 | refs/heads/master | 2021-01-20T00:20:07.853541 | 2017-03-30T05:10:24 | 2017-03-30T05:10:24 | 78,616,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 15 11:39:16 2017
@author: Takanori
"""
"""
dropoutについてのプログラム
"""
import numpy as np
# Dropoutについてのプログラム
class Dropout:
def __init__(self, dropout_ratio=0.5):
self.dropout_ratio = dropout_ratio
self.mask = None
def forward(self, x, train_flg=True):
if train_flg:
self.mask = np.random.rand(*x.shape) > self.dropout_ratio
# print(self.mask)
return x * self.mask
else:
return x * (1.0 - self.dropout_ratio)
def backward(self, dout):
return dout * self.mask
x = np.array([1,2,3,4,5])
drop = Dropout()
res = drop.forward(x)
print(res)
res = drop.backward(res)
| [
"jimmyflyingstrat@gmail.com"
] | jimmyflyingstrat@gmail.com |
0b7d6236b66a636e6778572bde8454fb0fa408ca | 242086b8c6a39cbc7af3bd7f2fd9b78a66567024 | /python/PP4E-Examples-1.4/Examples/PP4E/Dstruct/Classics/permcomb.py | f6e244e89732645613fef830391f1e2f9dd60db6 | [] | no_license | chuzui/algorithm | 7537d0aa051ac4cbe9f6a7ca9a3037204803a650 | c3006b24c4896c1242d3ceab43ace995c94f10c8 | refs/heads/master | 2021-01-10T13:05:30.902020 | 2015-09-27T14:39:02 | 2015-09-27T14:39:02 | 8,404,397 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,516 | py | "permutation-type operations for sequences"
def permute(list):
if not list: # shuffle any sequence
return [list] # empty sequence
else:
res = []
for i in range(len(list)):
rest = list[:i] + list[i+1:] # delete current node
for x in permute(rest): # permute the others
res.append(list[i:i+1] + x) # add node at front
return res
def subset(list, size):
if size == 0 or not list: # order matters here
return [list[:0]] # an empty sequence
else:
result = []
for i in range(len(list)):
pick = list[i:i+1] # sequence slice
rest = list[:i] + list[i+1:] # keep [:i] part
for x in subset(rest, size-1):
result.append(pick + x)
return result
def combo(list, size):
if size == 0 or not list: # order doesn't matter
return [list[:0]] # xyz == yzx
else:
result = []
for i in range(0, (len(list) - size) + 1): # iff enough left
pick = list[i:i+1]
rest = list[i+1:] # drop [:i] part
for x in combo(rest, size - 1):
result.append(pick + x)
return result
| [
"zui"
] | zui |
4a2e7fe3a9ce7aac84565ff460a6d8136cc67b11 | 01419d9a41dcf6c7cd02963615f1612fe170ebd9 | /chapter_9/single_layer_chain.py | 8e5948be424c8d30f96a72ddab92176c6b7d8038 | [] | no_license | Graham-CO/ai_ml_python | 0daa42270e34fd6b597d4ce20bbfcc7bbd08fa3f | c75b8e471107919872b16c86758928f51538aebe | refs/heads/master | 2023-07-14T00:26:36.106803 | 2021-08-28T18:36:57 | 2021-08-28T18:36:57 | 377,682,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,346 | py | import numpy as np
# Passed-in gradient from the next layer
# for the purpose of this example we're going to use
# a vector of 1s
dvalues = np.array([[1., 1., 1.]])
# We have 3 sets of weights - one for each neuron
# we have 4 inputs, thus 4 weights
# recall that we keep weights transposed
weights = np.array([[0.2, 0.8, -0.5,1],
[0.5, -0.91, 0.26, -0.5],
[-0.26, -0.27, 0.17, 0.87]]).T
# Sum weights related to the given input multiplied by
# the gradient related to the given neuron
# INNEFICIENT CODE
# dx0 = sum([weights[0][0]*dvalues[0][0], weights[0][1]*dvalues[0][1],
# weights[0][2]*dvalues[0][2]])
# dx1 = sum([weights[1][0]*dvalues[0][0], weights[1][1]*dvalues[0][1],
# weights[1][2]*dvalues[0][2]])
# dx2 = sum([weights[2][0]*dvalues[0][0], weights[2][1]*dvalues[0][1],
# weights[2][2]*dvalues[0][2]])
# dx3 = sum([weights[3][0]*dvalues[0][0], weights[3][1]*dvalues[0][1],
# weights[3][2]*dvalues[0][2]])
# MORE EFFICIENT (NOT VECTORIZED)
# dx0 = sum(weights[0]*dvalues[0])
# dx1 = sum(weights[1]*dvalues[0])
# dx2 = sum(weights[2]*dvalues[0])
# dx3 = sum(weights[3]*dvalues[0])
# USING DOT PRODUCT (FASTER THAN VECTORIZED???) - NOT BATCHED
dinputs = np.dot(dvalues[0], weights.T)
# dinputs = np.array([dx0, dx1, dx2, dx3])
print(dinputs) | [
"grwi2594@colorado.edu"
] | grwi2594@colorado.edu |
6c4de933b6029c46a47a278129e86bb8f2ddafcd | 0d1c57fd55660829b83ab0f280dfb687046765ec | /BowlingGame.py | e72ab8bf5d173088cdd2202de3f9e607704f3004 | [] | no_license | remimarenco/bowling_tdd | dea8310d8592096708fc48f3f02f3f6c0d112631 | 2f6f540f090fbda06c0c163ea92a17da1aaafd9b | refs/heads/master | 2021-03-12T19:22:56.491643 | 2014-11-29T09:24:39 | 2014-11-29T09:24:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | class BowlingGame():
"""docstring for BowlingGame"""
_score = 0
_pending_strikes = 0
first_roll = True
score_first_roll = 0
def roll(self, pins):
if pins == 10:
self._pending_strikes += 1
self.first_roll = True
elif self.first_roll is False:
if self._pending_strikes != 0:
self._score += (self._pending_strikes * 10) * 2 - 10 + (self.score_first_roll + pins) * 2
self._pending_strikes = 0
else:
self._score += pins + self.score_first_roll
self.first_roll = True
else:
self.score_first_roll = pins
self.first_roll = False
def score(self):
return self._score | [
"remi.marenco@gmail.com"
] | remi.marenco@gmail.com |
6b09f9aba1293dfc55ba24eb95056373cfd7cfb5 | f4d98cc3fa4959d5ab12362629e64e5db5363345 | /image/read.py | a2d5cedb99f75aa34d8d58bcb0366c2fc2fbe74b | [] | no_license | dongfengjue/python | 675ed34398f56521f3b1612e20c9e8ab3729ba19 | c73511bb81388f9a61847b97d2dd7e6f1f1a6b5b | refs/heads/master | 2021-06-12T20:12:29.697376 | 2019-10-14T09:49:27 | 2019-10-14T09:49:27 | 115,174,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | from PIL import Image
import pytesseract
# brew install tesseract
# 如中文需下载chi_sim.traineddata,将下载好的语言包放到/usr/local/Cellar/tesseract/3.05.01/share/tessdata/路径下。
# https://pan.baidu.com/s/1J0HNoVhX8WexS_5r0k2jDw 密码: ywc3 语言包
picPath = '/Users/chenbing/Documents/file/word.jpg'
Image = Image.open(picPath) # 打开图片
text = pytesseract.image_to_string(Image,lang='chi_sim') #使用简体中文解析图片
print(text) | [
"1009529808@qq.com"
] | 1009529808@qq.com |
90e384af21bd08a3b265c6e8e265ecec84c75f85 | 63619a0a59b5c4f47978519a4ef22484e62e69cc | /project-euler/problem66.py | aec3313fd84809da7d2e1b7a32a131457fa013db | [] | no_license | nidi3/project-euler | dad3998b0ac90714e7c06452bf90c4ebf59531f4 | bf87c6f75f30dc59405a239a8c5e9ef4d9a0dc24 | refs/heads/master | 2021-01-18T14:19:42.407815 | 2015-03-08T22:26:43 | 2015-03-08T22:26:43 | 30,136,649 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | from cont_frac import cont_frac_of_root, eval_cont_frac, coeff_from_periodic
q = set()
for x in xrange(1, 100000): q.add(x * x)
def min_solution(d):
r = cont_frac_of_root(d)
n = 0
while True:
p = eval_cont_frac(n, coeff_from_periodic(r))
if p[0] * p[0] - d * p[1] * p[1] == 1:
return p[0]
n += 1
mx = md = 0
for d in xrange(1, 1001):
if d not in q:
x = min_solution(d)
if x > mx:
mx = x
md = d
print md
| [
"ghuder5@gmx.ch"
] | ghuder5@gmx.ch |
0b082fb7f73882c9ca7854907e24acb96d4303f5 | 4bac91673b47c485a0104e8556cc2098c4cfe238 | /web_dynamic/100-hbnb.py | 6500c6cc895f22ebc0352156f4500ad5518b4e6c | [
"LicenseRef-scancode-public-domain"
] | permissive | huggins9000211/AirBnB_clone_v4 | 08f5cee1e07a8310a03c062fb80c314a7c62ad22 | b77f0e9548eba3b84efc2be9f8f853c578282524 | refs/heads/master | 2022-07-15T09:27:23.562937 | 2020-05-19T02:43:18 | 2020-05-19T02:43:18 | 264,271,959 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,308 | py | #!/usr/bin/python3
""" Starts a Flash Web Application """
from models import storage
import uuid
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.place import Place
from os import environ
from flask import Flask, render_template
app = Flask(__name__)
# app.jinja_env.trim_blocks = True
# app.jinja_env.lstrip_blocks = True
@app.teardown_appcontext
def close_db(error):
""" Remove the current SQLAlchemy Session """
storage.close()
@app.route('/100-hbnb/', strict_slashes=False)
def hbnb():
""" HBNB is alive! """
states = storage.all(State).values()
states = sorted(states, key=lambda k: k.name)
st_ct = []
for state in states:
st_ct.append([state, sorted(state.cities, key=lambda k: k.name)])
amenities = storage.all(Amenity).values()
amenities = sorted(amenities, key=lambda k: k.name)
places = storage.all(Place).values()
places = sorted(places, key=lambda k: k.name)
return render_template('100-hbnb.html',
states=st_ct,
amenities=amenities,
places=places,
cache_id=uuid.uuid4())
if __name__ == "__main__":
""" Main Function """
app.run(host='0.0.0.0', port=5000)
| [
"ezra.nobrega@outlook.com"
] | ezra.nobrega@outlook.com |
39695f540bade7e05ff8fa960c71d068109b1dda | 2dd0bf6e8542b560c2e3567f8793b561cb0678b0 | /code/src/main/python/misconceptions/syntactics/grammar/R.py | 4c300cb4f55146b42613854ab34bcb255fe58cbf | [
"Unlicense"
] | permissive | Eduardo95/COSAL | 021f01cfa86e656c3fe320159c8d25ca5b6f311d | 4eb95d286288aa25a1a90db40cb1998dad048e1b | refs/heads/master | 2023-06-17T08:19:37.925879 | 2021-07-12T16:24:06 | 2021-07-12T16:24:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,354 | py | import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
__author__ = "COSAL"
from lark import Lark
R_GRAMMAR = """
start: value (ASSIGNMENT_OPERATOR value)?
// expr: value ( indexer | value | attribute)+
binary_expr: value BINARY_OPERATOR value
unary_expr: UNARY_OPERATOR value
indexer: value "[" value "]"
attribute: value "$" value
value: unary_expr
| binary_expr
| array
| list
| matrix
| data_frame
| tuple
| slice_range
| QUOTED_STRING
| NUMBER
| BOOL
| NULL
| NAME
| if_else
| func_call
| attribute
| indexer
| values
values: value? ("," value?)+
array: ("c" "(" [value ("," value)*] ")") | ("[" value? ("," value?)* "]")
list: "list" "(" [value ("," value)*] ")"
matrix: "matrix" "(" args ")"
data_frame: "data.frame" "(" args ")"
tuple: "(" [value ("," value)*] ")"
QUOTED_STRING : DOUBLE_QUOTED_STRING | SINGLE_QUOTED_STRING | TILDE_QUOTED_STRING
DOUBLE_QUOTED_STRING : /"[^"]*"/
SINGLE_QUOTED_STRING : /'[^']*'/
TILDE_QUOTED_STRING : /`[^']*`/
NAME: ("_"|LETTER) ("_"|LETTER|DIGIT|".")*
BOOL: "TRUE" | "FALSE"
if_else: "ifelse" "(" value "," value "," value")"
slice_range: value? ":" value?
NULL: "NULL" | "NaN"
ASSIGNMENT_OPERATOR: "="
| "<-"
BINARY_OPERATOR: "+"
| "-"
| "**"
| "*"
| "/"
| "^"
| "%%"
| "%/%"
| ">="
| ">"
| "<="
| "<"
| "=="
| "!="
| "|"
| "&"
UNARY_OPERATOR: "!"
| "-"
func_name: NAME | TILDE_QUOTED_STRING
func_args: value ("," value)*
func_kwarg: NAME "=" value
func_kwargs: func_kwarg ("," func_kwarg)*
args: (func_args | func_kwargs | (func_args "," func_kwargs))
//indexer_args: (value | values | func_name)
func_call: func_name "(" args? ")"
// %import common.CNAME -> NAME
%import common.SIGNED_NUMBER -> NUMBER
%import common.LETTER -> LETTER
%import common.DIGIT -> DIGIT
%import common.WORD
%import common.WS
%import common.NEWLINE -> NEWLINE
%ignore WS
"""
def r_parser():
return Lark(R_GRAMMAR)
def _test():
parser = r_parser()
# print(parser.parse("df.iloc[1:2, df[[2]]]"))
# print(parser.parse("df.set_value(dfaxis=8.05)"))
# print(parser.parse('table(df$Parch, df$Survived)'))
print(parser.parse('mean(df$Fare)'))
def verify():
from utils import cache
misconceptions_path = "/Users/panzer/Raise/ProgramRepair/CodeSeer/code/src/main/python/misconceptions.xlsx"
wb = cache.read_excel(misconceptions_path, read_only=True)
# sheet = wb.get_sheet_by_name('HighSim-HighSyn')
sheet = wb.get_sheet_by_name('LowSim-LowSyn')
parser = r_parser()
seen = set()
for i, row in enumerate(sheet.iter_rows()):
if i == 0:
continue
snippet = row[0].value
if i >= 1 and snippet not in seen:
print(i, snippet)
seen.add(snippet)
parser.parse(snippet)
elif i % 100 == 0:
print("Dont worry I'm running", i)
if __name__ == "__main__":
# verify()
_test() | [
"george.meg91@gmail.com"
] | george.meg91@gmail.com |
eb36e1c51d25f8e46e38f1c101348621540c6ba2 | cb58aa97d748b196e20fdec00e1c50afa0bbdbe0 | /skAffinityPropagation.py | d5a487fd730ae29e10162c1b320e444209be92a0 | [] | no_license | FASLADODO/SKClustering | 06219c18f9b9eca7545293d03d1cd2d650352b0d | 89e629b528250c34c4a146f33fc84690b6e4c4bf | refs/heads/master | 2022-04-10T12:56:09.788650 | 2020-02-18T09:59:23 | 2020-02-18T09:59:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,096 | py | from EvalClus import evaluate
import pandas as pd
import csv
'''
This file runs different configurations of AffinityPropagation
Each parameter different values are defined as paramerter name and “_r” at the end
All results are written to csv file defined in the code , csv_file = "./output/AffinityPropagation_eval_out.csv"
'''
estimator = "AffinityPropagation"
''' nmi is a flag, when it is set to true the model will only evaluate configurations based on ground truth data
'''
nmi = True
if nmi:
csv_file = "./output/BestModel/AffinityPropagation_bestmodel.csv"
else:
csv_file = "./output/AffinityPropagation_out.csv"
count_all=1
damping_r=[0.5,0.6,0.7,0.8,0.9,1]
count_all*=len(damping_r)
convergence_iter_r=[3,10,15,20,30]#, 'random']
count_all*=len(convergence_iter_r)
max_iter_r= [300]
count_all*=len(max_iter_r)
count=0
for damping in damping_r:
for convergence_iter in convergence_iter_r:
for max_iter in max_iter_r:
config = {"damping": damping, "convergence_iter": convergence_iter,
"max_iter": max_iter}
s = evaluate(estimator, config)
flag = s.run_all(verbose=True,nmi=nmi)
count+=1
print("run "+str(count)+" configs out of "+str(count_all))
if flag:
out = s.res
d = {}
# for key in out:
for dataset in out.keys():
d0 = {"dataset": dataset}
d1 = out[dataset]
d0.update(d1)
d0.update(config)
if nmi:
dcols=dcols=["dataset" , "damping" , "convergence_iter" , "max_iter" ,'nmi']
else:
dcols=["dataset" , "damping" , "convergence_iter" , "max_iter" ,'Baker_Hubert_Gamma', 'Ball_Hall', 'Banfeld_Raferty', 'Davies_Bouldin', 'Dunns_index', 'McClain_Rao', 'PBM_index', 'Ratkowsky_Lance', 'Ray_Turi', 'Scott_Symons', 'Wemmert_Gancarski', 'Xie_Beni', 'c_index', 'det_ratio', 'g_plus_index', 'i_index', 'ksq_detw_index', 'log_det_ratio', 'log_ss_ratio', 'modified_hubert_t', 'point_biserial', 'r_squared', 'root_mean_square', 's_dbw', 'silhouette', 'tau_index', 'trace_w', 'trace_wib', 'IIndex', 'SDBW', 'ari', 'ami', 'nmi','v_measure','silhouette_score','calinski_harabasz_score']
with open(csv_file, 'a', newline='') as csvfile:
writer = csv.DictWriter(
csvfile, delimiter='\t', fieldnames=dcols)
#writer.writeheader()
# for data in dict_data:
dwrite={}
for key in dcols:
dwrite[key]=d0[key]
writer.writerow(dwrite)
csvfile.flush()
| [
"abdelrhman.d@aucegypt.edu"
] | abdelrhman.d@aucegypt.edu |
ac5004c1099a74ceeec349847c63c54c17d4bbbb | ec0c7ca71135e9004ab3cdc76b703a2284863a1b | /fabfile/python/__init__.py | def88f12d553fc31866957d66c58f429ccc26b11 | [
"MIT"
] | permissive | stepan-perlov/jrs | 4fcf5921a4301f9d6d748fc40193a678bf143008 | c4db322f433dd5da35d5a13f6cc18483e1df8fc9 | refs/heads/master | 2021-01-17T07:45:22.735113 | 2016-07-07T11:17:02 | 2016-07-07T13:41:56 | 41,475,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | from fabric.api import task
from fabric.api import local, lcd
@task
def pip():
with lcd("python"):
local("rm -rf dist jrs.egg-info")
local("./setup.py sdist")
local("twine upload dist/{}".format(local("ls dist", capture=True).strip()))
| [
"stepanperlov@gmail.com"
] | stepanperlov@gmail.com |
e1fbce2504b9a37a750d423f2a7e28595b4e259b | 4099ae3d8e367c8b47e284245b3aa2bd8621e95e | /nssf_project/urls.py | c9ca15e76ef0d6581605a147b7b6ea02d6638ba3 | [] | no_license | kenedyakn/ns-project | b1f64f6c05e30a66575dfa4a68aae7b729cb80f9 | a904a4eee40f05793b925cb7198514a4a4e3278d | refs/heads/master | 2020-03-20T23:37:38.250807 | 2018-06-19T08:29:41 | 2018-06-19T08:29:41 | 137,855,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | """nssf_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
# from django.contrib import admin
from django.urls import path
from django.urls import include, path
urlpatterns = [
# path('admin/', admin.site.urls),
path('ecollections/', include('ecollections.urls')),
]
| [
"kenedyakenaivan@gmail.com"
] | kenedyakenaivan@gmail.com |
2718c3441138bf66c7e26a309ed95597a6632432 | 19375a18719e44eee7c596e72ef8915d3fcbff92 | /day07_spider/06_qq.py | e29058c5d79b1d50650db9c34bab04e364ccb3bd | [] | no_license | J-shan0903/AID1912 | 6c617fa26751c31ff05a63050a320122e3ca044e | 0797f3d8ef0e96b8eb6908dffbec8193c9614973 | refs/heads/master | 2021-03-23T12:21:32.480026 | 2020-05-23T08:36:21 | 2020-05-23T08:36:21 | 247,452,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | from selenium import webdriver
driver = webdriver.Chrome()
driver.get(url='https://mail.qq.com/')
driver.switch_to.frame('login_frame')
driver.find_element_by_id('u').send_keys('369618935@qq.com')
driver.find_element_by_id('p').send_keys('353597jss')
driver.find_elements_by_class_name('btn').click() | [
"369618935@qq.com"
] | 369618935@qq.com |
d11c09fad7adf4123d50761f6e834de4193b6c99 | 8956535434a4b7496f3b311df82892b719dce114 | /source/python/neuropod/backends/python/utils.py | 63092172d06e72647119f299c7e8ae97668ab940 | [
"Apache-2.0"
] | permissive | uber/neuropod | 5b5d4fc95b09f74c7d29f8e3a5246af6c7d0cc90 | fde79d98f975124a21c6221a79377f0b3a034c5f | refs/heads/master | 2023-09-04T04:35:35.765879 | 2022-07-15T19:49:18 | 2022-07-15T19:49:18 | 167,240,425 | 924 | 80 | Apache-2.0 | 2023-06-24T15:15:55 | 2019-01-23T19:22:22 | C++ | UTF-8 | Python | false | false | 2,385 | py | # Copyright (c) 2022 The Neuropod Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from neuropod.loader import load_neuropod
from neuropod.packagers import create_python_neuropod
from testpath.tempdir import TemporaryDirectory
_MODEL_SOURCE = """
def model():
return {}
def get_model(_):
return model
"""
def preinstall_deps(backend_version, requirements):
"""
Preinstall python dependencies into the isolated python environments used to
run Neuropod models.
This can be used to reduce load times for models using large dependencies.
:param backend_version: The version of the python Neuropod backend to install the deps for (e.g. `3.6`)
:param requirements: The deps to preinstall. See the docs for `create_python_neuropod` for details.
"""
with TemporaryDirectory() as tmp_dir:
neuropod_path = os.path.join(tmp_dir, "temp_neuropod")
model_code_dir = os.path.join(tmp_dir, "model_code")
os.makedirs(model_code_dir)
with open(os.path.join(model_code_dir, "model.py"), "w") as f:
f.write(_MODEL_SOURCE)
# Creates and loads a python "model" that just serves to preload depsq
create_python_neuropod(
neuropod_path=neuropod_path,
model_name="temp_preload_model",
data_paths=[],
code_path_spec=[
{
"python_root": model_code_dir,
"dirs_to_package": [""], # Package everything in the python_root
}
],
entrypoint_package="model",
entrypoint="get_model",
input_spec=[],
output_spec=[],
platform_version_semver=backend_version,
requirements=requirements,
)
# Load the model to trigger installing the deps
load_neuropod(neuropod_path)
| [
"viv.panyam@gmail.com"
] | viv.panyam@gmail.com |
81e485b35660f099130b86190a84cb18a680c9ee | 17f0c21cd18937f6b3558bde770c4100a3596a13 | /WeBlog/app/email.py | 036504485a977c9f572c856feb851c5505bb51ec | [
"MIT"
] | permissive | yinzhe1989/web | 5a6843dd10149b613f5ebe9532b123bbd666d46b | 3af7fbd82af8b4d2737672779d5742825b94a6fe | refs/heads/master | 2022-12-17T09:04:06.363085 | 2019-06-13T07:29:26 | 2019-06-13T07:29:26 | 191,696,492 | 0 | 0 | MIT | 2022-12-08T03:02:49 | 2019-06-13T05:29:47 | Python | UTF-8 | Python | false | false | 669 | py | from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['WEBLOG_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
recipients=[to], sender=app.config['WEBLOG_MAIL_SENDER'])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr | [
"shihaojie1219@gmail.com"
] | shihaojie1219@gmail.com |
1807d83edbe77891c745b4e78518146dd97465bb | 77ced9c9a954abe4cdaf8c29f5ce2b087ce9fff1 | /pymicroconnectors/config.py | 081c938059cd0cb55726d38ca9fdda3564ff37ae | [
"MIT"
] | permissive | ddelizia/pymicroconnectors | 4c074bd50224605971ff609b6279abd655e16e44 | e41e3c928d47f069a9b0d342e8033ee7b9e3a0a5 | refs/heads/master | 2021-07-16T06:54:44.884332 | 2018-09-24T19:53:19 | 2018-09-24T19:53:19 | 150,156,933 | 0 | 0 | MIT | 2020-05-20T17:01:03 | 2018-09-24T19:27:08 | Python | UTF-8 | Python | false | false | 1,121 | py | """
Created by ddelizia
Config utility to provide easy access to the app configuration
"""
import logging
import os
from functools import lru_cache
from figgypy import Config as Figgypy
def _configure_logger():
logger_config = get_config().get('logger')
if logger_config is not None:
for key in logger_config:
level = logging.getLevelName(logger_config.get(key))
logging.getLogger(key).setLevel(level)
def _select_file():
file_path = os.environ.get('ENV')
if file_path is None:
file_path = 'config/config.yml'
return file_path
def _get_path(dot_notation):
return dot_notation.split('.')
@lru_cache(maxsize=1)
def get_config() -> dict:
return cfg.values
@lru_cache(maxsize=256)
def get_value(dot_notation: str, the_type=None):
path = _get_path(dot_notation)
current = get_config()
for element in path:
current = current.get(element)
if type is not None:
pass
return current
cfg = Figgypy(config_file=_select_file(),
decrypt_gpg=False,
decrypt_kms=False)
_configure_logger()
| [
"danilo.delizia@thinkwrap.com"
] | danilo.delizia@thinkwrap.com |
e9fefb58e4632df56be027a71c291cf45956e7d8 | 6fa463d51cee7109bddb51723f1b7997ba61bc9f | /User_API/settings.py | 7a07464e962545cb97f4d5a781d6b85c51be7766 | [] | no_license | Priya5686/Intern_Update | 565bd601bfb2580450ac46a6970d1bb324f3a94f | 613c79eeb5d012362c6ebf7b935fec3c0dbf34a1 | refs/heads/master | 2021-01-13T22:15:30.388151 | 2020-02-18T17:01:37 | 2020-02-18T17:01:37 | 242,512,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,192 | py | """
Django settings for User_API project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!@=+1paz3p)iwoh9ow6f(#!_3y+$y5(!kqi&7%%4_pfsp+jh&n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'User_APP',
'bootstrap3',
'rest_framework_swagger',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'User_API.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'User_API.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/app/'
| [
"priyaj5686@gmail.com"
] | priyaj5686@gmail.com |
5c4ac087fbb113c430411670b9f46dfde0b46102 | 41c57af19f3f2267cee27589c92e360172c463ca | /wmpl/Utils/Plotting.py | 62e126c8536304d91d4deeb9d65b12842f42f909 | [
"MIT"
] | permissive | wmpg/WesternMeteorPyLib | 278f6b005902bb94c38ba9cb0ef50290c51e9164 | fe511f264c4354a84b4a1c60f257883473e3855d | refs/heads/master | 2023-09-01T18:18:38.638990 | 2023-08-30T18:13:51 | 2023-08-30T18:13:51 | 178,280,764 | 29 | 13 | MIT | 2023-07-26T17:53:13 | 2019-03-28T20:53:45 | Python | UTF-8 | Python | false | false | 4,063 | py | """ Functions for plotting purposes. """
from __future__ import absolute_import, print_function, division
import os
import numpy as np
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
from wmpl.Config import config
from wmpl.Utils.OSTools import mkdirP
def saveImage(file_path, img, vmin=None, vmax=None, cmap=None, format=None, origin=None):
""" Save numpy array to disk as image. """
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
fig = Figure(figsize=img.shape[::-1], dpi=1, frameon=False)
FigureCanvas(fig)
fig.figimage(img, cmap=cmap, vmin=vmin, vmax=vmax, origin=origin)
fig.savefig(file_path, dpi=1, format=format)
fig.clf()
del fig
def savePlot(plt_handle, file_path, output_dir='.', kwargs=None):
""" Saves the plot to the given file path, with the DPI defined in configuration.
Arguments:
plt_handle: [object] handle to the plot to the saved (usually 'plt' in the main program)
file_path: [string] file name and path to which the plot will be saved
Keyword arguments:
kwargs: [dictionary] Extra keyword arguments to be passed to savefig. None by default.
"""
if kwargs is None:
kwargs = {}
# Make the output directory, if it does not exist
mkdirP(output_dir)
# Save the plot (remove all surrounding white space)
plt_handle.savefig(os.path.join(output_dir, file_path), dpi=config.plots_DPI, bbox_inches='tight',
**kwargs)
class Arrow3D(FancyArrowPatch):
""" Arrow in 3D for plotting in matplotlib.
Arguments:
xs: [list of floats] (origin, destination) pair for the X axis
ys: [list of floats] (origin, destination) pair for the Y axis
zs: [list of floats] (origin, destination) pair for the Z axis
Source:
https://stackoverflow.com/questions/22867620/putting-arrowheads-on-vectors-in-matplotlibs-3d-plot
"""
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
self.do_3d_projection(renderer)
def do_3d_projection(self, renderer=None):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, self.axes.M)
self.set_positions((xs[0], ys[0]),(xs[1], ys[1]))
if renderer is not None:
FancyArrowPatch.draw(self, renderer)
return np.min(zs) if zs.size else np.nan
def set3DEqualAxes(ax):
""" Make axes of 3D plot have equal scale so that spheres appear as spheres, cubes as cubes, etc.
This is one possible solution to Matplotlib's ax.set_aspect('equal') and ax.axis('equal') not working
for 3D.
Source: https://stackoverflow.com/a/31364297
Arguments:
ax: [matplotlib axis] Axis handle of a 3D plot.
"""
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity norm, hence I call half the max range
# the plot radius
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
if __name__ == '__main__':
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Test the arrow function
a = Arrow3D([0.2, 3], [1, -2], [0, 1], mutation_scale=20, lw=3, arrowstyle="-|>", color="r")
ax.add_artist(a)
plt.show() | [
"denis.vida@gmail.com"
] | denis.vida@gmail.com |
283b168ab1f7c8cdb131ec1007e511759db810e2 | 953a1f7de146f1eb085d6d8ebf4296d143b023c2 | /check_pred.py | c98e307e826512144f1b65b7bd833f76e7fe53ac | [] | no_license | tomergill/DL_ass3 | 7d760794f378046359b12f4b9f967cd79f708d1f | df2dea464335ebd36ffbcb155adcd919c7acee66 | refs/heads/master | 2021-05-06T05:43:34.874514 | 2017-12-31T19:23:57 | 2017-12-31T19:23:57 | 115,134,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | import re
def check(good_pattern, words_and_tags):
good = 0.0
wrong = []
for i, (word, tag) in enumerate(words_and_tags):
real_tag = "good" if re.match(good_pattern, word) is not None else "bad"
if real_tag == tag:
good += 1
else:
wrong.append((i, word, tag))
return good / len(words_and_tags), wrong
def main():
num_pattern = '[0-9]+'
good_pattern = num_pattern + 'a' + num_pattern + 'b' + num_pattern + 'c' + num_pattern + 'd' + num_pattern
f = "test1.pred"
words_and_tags = [tuple(line[:-1].split(" \t")) for line in file(f) if line != "\n"]
acc, wrong = check(good_pattern, words_and_tags)
print "Accuracy: {}%\n".format(acc * 100)
print "Wrong:"
print "line tag word"
for i, word, tag in wrong:
print i, tag, word
if __name__ == "__main__":
main()
| [
"coolran103@gmail.com"
] | coolran103@gmail.com |
92534bd9869286a16f3bd69ad7549c0a897bb059 | ba7f977669f8fbfe0ad1b7e3d5d9a778753ef267 | /socialbus-portuguese-classifier/sdb.py | d77d69de9ee66d30ce81dbde567c18cf70f9e4fc | [
"MIT"
] | permissive | LIAAD/socialbus | b3c5f52527fec6365ca93d306f9250a6cc2deffd | 942de3195f40732e8aa985c01f3733f9bc7eb39d | refs/heads/master | 2021-08-14T22:27:04.861181 | 2019-10-21T09:51:48 | 2019-10-21T09:51:48 | 11,526,450 | 4 | 1 | MIT | 2021-08-09T20:49:37 | 2013-07-19T11:25:15 | CSS | UTF-8 | Python | false | false | 1,139 | py | from pymongo import Connection
server = "192.168.102.195"
database = "socialecho_v05"
connection = Connection(server)
db = connection[database]
def get_user_info(user_id):
"Get all information about the user."
return db["twitter"].find_one({"user.id": user_id})["user"]
def get_user_tweets(user_id, limit):
"Get the latest [limit] messages of the user with twitter user id [user_id]."
return list(db["twitter"].find({"user.id": user_id}, limit=limit))
def get_user_tweets_count(user_id):
"Calculate how many messages of the user we have in the database."
total = db["twitter"].find({"user.id": user_id}).count()
print total
return total
# def get_users():
# "Return an iterator that returns all users in the database."
# return ( x['id'] for x in db.users.find({},{"id":True, "_id":False}) )
def get_text_sample(user_id, maximum):
if get_user_tweets_count(user_id) >= maximum:
print "return list() de tweets" ,user_id
return [ x["text"] for x in db["twitter"].find({"user.id": user_id}, {"text": True, "_id": False}).limit(maximum) ]
else:
print "return list() vazio" ,user_id
return list()
| [
"me@arianpasquali.com"
] | me@arianpasquali.com |
cb3d416ffdf505c74b755e6ba646936f2cf5c105 | 83e2041d75416c3ff1f3480c9236ee6ac080b744 | /__init__.py | 5624871ade4ea6c6385ca560312d8a9b21ac50cd | [] | no_license | tilipatov/pyFtext | aec6cc4c47de49ebd4362d93cab092f40e1f3dbd | b2d9bcfab087906007f75094f37d6e06f07de87d | refs/heads/master | 2021-08-31T21:59:36.132841 | 2017-12-23T02:59:03 | 2017-12-23T02:59:03 | 115,164,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | #!/usr/bin/ python
# coding:utf8
| [
"noreply@github.com"
] | tilipatov.noreply@github.com |
38d2bcac5c41dd566830ab131ec56cc759cf07c8 | fe0d9ce17e4c73cba10e3a300822e246ec63a186 | /deepsight/service/common/fasterRCNN/utils/view_bndbox.py | b1e999824b08d1f03930fca8d5763bf5f0d25dc7 | [
"MIT"
] | permissive | vitomuzi/deepsight2.0-ai | b1738f76cdbefc38a3c432e0c09d8b4d35259599 | 1aaee04eeecf5610b7e1ef5b9239e68a4ba5e31b | refs/heads/master | 2021-05-24T17:05:36.923173 | 2020-04-07T02:47:24 | 2020-04-07T02:47:24 | 253,669,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,183 | py | import numpy as np
import os
import PIL
from PIL import ImageDraw
import xml.etree.ElementTree as ET
import general as ut
# view bounding box in test images.
def view_bndboxes_2d(img_path, ant_path, out_path):
img = PIL.Image.open(img_path)
boxes = _load_annt_file(ant_path)
if boxes is None:
img.save(out_path)
return
draw = ImageDraw.Draw(img)
for i in range(boxes.shape[0]):
[xmin,ymin,xmax,ymax] = boxes[i, :]
draw.rectangle([xmin,ymin,xmax,ymax], outline=(0,255,0))
draw.rectangle([xmin+1,ymin+1,xmax-1,ymax-1], outline=(0,255,0))
draw.rectangle([xmin+2,ymin+2,xmax-2,ymax-2], outline=(0,255,0))
img.save(out_path)
return
# load annotation file and return bounding boxes
def _load_annt_file(ant_path):
tree = ET.parse(ant_path)
objs = tree.findall('object')
num_objs = len(objs)
if num_objs <= 0:
return None
boxes = np.empty(shape=[num_objs, 4], dtype=np.uint16)
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
x1 = int(bbox.find('xmin').text)
y1 = int(bbox.find('ymin').text)
x2 = int(bbox.find('xmax').text)
y2 = int(bbox.find('ymax').text)
boxes[ix, :] = [x1, y1, x2, y2]
return boxes
if __name__ == '__main__':
# # show test cases ==================================
# gt_ant_dir = '/home/lc/code/NetKit/data/own_data/test_raw'
# restore_ant_dir = '/home/lc/code/NetKit/data/output/restore/test'
# refine_ant_dir = '/home/lc/code/NetKit/data/output/refined/test'
# img_dir = '/home/lc/code/NetKit/data/own_data/test_raw'
# out_dir = '/home/lc/code/NetKit/data/output/visualize'
# file_itr = ut.get_file_list(img_dir=img_dir, img_suffix='.jpg')
# for file_name in file_itr:
# img_path = img_dir+'/'+file_name+'.jpg'
# gt_ant_path = gt_ant_dir+'/'+file_name+'.xml'
# restore_ant_path = restore_ant_dir+'/'+file_name+'.xml'
# refine_ant_path = refine_ant_dir+'/'+file_name+'.xml'
# gt_out_path = out_dir+'/'+file_name+'-0.jpg'
# restore_out_path = out_dir+'/'+file_name+'-1.jpg'
# refine_out_path = out_dir+'/'+file_name+'-2.jpg'
# print(img_path)
# view_bndboxes_2d(img_path, gt_ant_path, gt_out_path)
# view_bndboxes_2d(img_path, restore_ant_path, restore_out_path)
# view_bndboxes_2d(img_path, refine_ant_path, refine_out_path)
# # show all_neg test cases ==================================
# restore_ant_dir = '/home/lc/code/NetKit/data/output/restore/all_neg_test'
# refine_ant_dir = '/home/lc/code/NetKit/data/output/refined/all_neg_test'
# img_dir = '/home/lc/code/NetKit/data/own_data/all_neg/test_raw'
# out_dir = '/home/lc/code/NetKit/data/output/visualize/all_neg'
# file_itr = ut.get_file_list(img_dir=img_dir, img_suffix='.jpg')
# for file_name in file_itr:
# img_path = img_dir+'/'+file_name+'.jpg'
# restore_ant_path = restore_ant_dir+'/'+file_name+'.xml'
# refine_ant_path = refine_ant_dir+'/'+file_name+'.xml'
# restore_out_path = out_dir+'/'+file_name+'-1.jpg'
# refine_out_path = out_dir+'/'+file_name+'-2.jpg'
# print(img_path)
# view_bndboxes_2d(img_path, restore_ant_path, restore_out_path)
# view_bndboxes_2d(img_path, refine_ant_path, refine_out_path)
gt_ant_dir = '/home/liuchang/code/classification_net_pytorch/data/own_data/all_neg/test'
ant_dir = '/home/liuchang/code/classification_net_pytorch/data/output/all_neg/test'
refine_ant_dir = '/home/liuchang/code/classification_net_pytorch/data/output/cls_output/all_neg/test'
img_dir = '/home/liuchang/code/classification_net_pytorch/data/own_data/all_neg/test'
out_dir = '/home/liuchang/code/classification_net_pytorch/data/own_data/vis/all_neg'
file_itr = ut.get_file_list(img_dir=img_dir, img_suffix='.jpg')
for file_name in file_itr:
img_path = img_dir+'/'+file_name+'.jpg'
gt_ant_path = gt_ant_dir+'/'+file_name+'.xml'
ant_path = ant_dir+'/'+file_name+'.xml'
refine_ant_path = refine_ant_dir+'/'+file_name+'.xml'
gt_out_path = out_dir+'/'+file_name+'-0.jpg'
rcnn_out_path = out_dir+'/'+file_name+'-1.jpg'
refine_out_path = out_dir+'/'+file_name+'-2.jpg'
print(img_path)
# view_bndboxes_2d(img_path, gt_ant_path, gt_out_path)
view_bndboxes_2d(img_path, ant_path, rcnn_out_path)
view_bndboxes_2d(img_path, refine_ant_path, refine_out_path) | [
"lisenlin@deepsight.top"
] | lisenlin@deepsight.top |
111e4e3b3e118be47d757ed5f872a5057ef0e42c | 4a307849ed4dded5ce84b0ceb6d2cf56c2e64b89 | /common/servicechain/firewall/verify.py | b43cb7d94e9c935986e1e607a161918f1394dedd | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | lmadhusudhanan/contrail-test | a6316b41dcb836315d25503f1dee511943d7f976 | bd39ff19da06a20bd79af8c25e3cde07375577cf | refs/heads/master | 2022-05-04T20:01:58.960911 | 2018-06-27T17:56:47 | 2018-06-27T17:56:47 | 138,913,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,312 | py | import os
import re
from common.servicechain.verify import VerifySvcChain
from tcutils.util import get_random_cidr
from tcutils.util import get_random_name
from tcutils.util import retry
SVC_TYPE_PROPS = {
'firewall': {'in-network-nat': 'tiny_nat_fw',
'in-network': 'tiny_in_net',
'transparent': 'tiny_trans_fw',
},
'analyzer': {'transparent': 'analyzer',
'in-network' : 'analyzer',
}
}
class VerifySvcFirewall(VerifySvcChain):
def verify_svc_span(self, in_net=False):
vn1_name = get_random_name("left_vn")
vn1_subnets = ['31.1.1.0/24']
vm1_name = get_random_name('left_vm')
vn2_name = get_random_name("right_vn")
vn2_subnets = ['41.2.2.0/24']
vm2_name = get_random_name('right_vm')
if in_net:
vn1_name = get_random_name("in_left_vn")
vn1_subnets = ['32.1.1.0/24']
vm1_name = get_random_name('in_left_vm')
vn2_name = get_random_name("in_right_vn")
vn2_subnets = ['42.2.2.0/24']
vm2_name = get_random_name('in_right_vm')
vn1_fixture = self.config_vn(vn1_name, vn1_subnets)
vn2_fixture = self.config_vn(vn2_name, vn2_subnets)
vm1_fixture = self.config_vm(vm1_name, vn_fix=vn1_fixture)
vm2_fixture = self.config_vm(vm2_name, vn_fix=vn2_fixture)
assert vm1_fixture.verify_on_setup()
assert vm2_fixture.verify_on_setup()
vm1_fixture.wait_till_vm_is_up()
vm2_fixture.wait_till_vm_is_up()
max_inst = 3
st_name = get_random_name("tcp_svc_template")
si_prefix = "tcp_bridge_"
policy_name = get_random_name("allow_tcp")
if in_net:
st_name = get_random_name("in_tcp_svc_template")
si_prefix = "in_tcp_bridge_"
policy_name = get_random_name("in_allow_tcp")
tcp_st_fixture, tcp_si_fixture = self.config_st_si(
st_name, si_prefix, max_inst=max_inst,
left_vn_fixture=vn1_fixture, right_vn_fixture=vn2_fixture)
else:
tcp_st_fixture, tcp_si_fixture = self.config_st_si(
st_name, si_prefix, max_inst=max_inst)
action_list = [tcp_si_fixture.fq_name_str]
# action_list = self.chain_si(si_count, si_prefix)
# Update rule with specific port/protocol
rule = [{'direction': '<>',
'protocol': 'tcp',
'source_network': vn1_name,
'src_ports': [8000, 8000],
'dest_network': vn2_name,
'dst_ports': [9000, 9000],
'simple_action': None,
'action_list': {'apply_service': action_list}
}]
# Create new policy with rule to allow traffci from new VN's
tcp_policy_fixture = self.config_policy(policy_name, rule)
self.verify_si(tcp_si_fixture)
st_name = get_random_name("udp_svc_template")
si_prefix = "udp_bridge_"
policy_name = get_random_name("allow_udp")
if in_net:
st_name = get_random_name("in_udp_svc_template")
si_prefix = "in_udp_bridge_"
policy_name = get_random_name("in_allow_udp")
udp_st_fixture, udp_si_fixture = self.config_st_si(
st_name, si_prefix, max_inst=max_inst,
left_vn_fixture=vn1_fixture, right_vn_fixture=vn2_fixture)
else:
udp_st_fixture, udp_si_fixture = self.config_st_si(
st_name, si_prefix, max_inst=max_inst)
action_list = [udp_si_fixture.fq_name_str]
# action_list = self.chain_si(si_count, si_prefix)
# Update rule with specific port/protocol
rule = [{'direction': '<>',
'protocol': 'udp',
'source_network': vn1_name,
'src_ports': [8001, 8001],
'dest_network': vn2_name,
'dst_ports': [9001, 9001],
'simple_action': None,
'action_list': {'apply_service': action_list}
}]
# Create new policy with rule to allow traffci from new VN's
udp_policy_fixture = self.config_policy(policy_name, rule)
vn1_udp_policy_fix = self.attach_policy_to_vn(
[tcp_policy_fixture, udp_policy_fixture], vn1_fixture)
vn2_udp_policy_fix = self.attach_policy_to_vn(
[tcp_policy_fixture, udp_policy_fixture], vn2_fixture)
result, msg = self.validate_vn(vn1_name)
assert result, msg
result, msg = self.validate_vn(vn2_name)
assert result, msg
assert self.verify_si(udp_si_fixtures)
# Install traffic package in VM
vm1_fixture.install_pkg("Traffic")
vm2_fixture.install_pkg("Traffic")
sport = 8001
dport = 9001
sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture,
'tcp', sport=sport, dport=dport)
errmsg = "TCP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
self.delete_si_st(tcp_si_fixtures, tcp_st_fixture)
sport = 8001
dport = 9001
sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture,
'tcp', sport=sport, dport=dport)
errmsg = "TCP traffic with src port %s and dst port %s passed; Expected to fail" % (
sport, dport)
assert sent and recv == 0, errmsg
st_name = get_random_name("tcp_svc_template")
si_prefix = "tcp_bridge_"
policy_name = get_random_name("allow_tcp")
if in_net:
st_name = get_random_name("in_tcp_svc_template")
si_prefix = "in_tcp_bridge_"
policy_name = get_random_name("in_allow_tcp")
tcp_st_fixture, tcp_si_fixture = self.config_st_si(
st_name, si_prefix, max_inst=max_inst,
left_vn_fixture=vn1_fixture, right_vn_fixture=vn2_fixture)
else:
tcp_st_fixture, tcp_si_fixture = self.config_st_si(
st_name, si_prefix, max_inst=max_inst)
action_list = [tcp_si_fixture.fq_name_str]
# action_list = self.chain_si(si_count, si_prefix)
result, msg = self.validate_vn(vn1_name)
assert result, msg
result, msg = self.validate_vn(vn2_name)
assert result, msg
self.verify_si(tcp_si_fixtures)
sport = 8001
dport = 9001
sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture,
'tcp', sport=sport, dport=dport)
errmsg = "TCP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
def verify_multi_inline_svc(self, *args, **kwargs):
ret_dict = self.config_multi_inline_svc(*args, **kwargs)
proto = kwargs.get('proto', 'any')
left_vn_fq_name = ret_dict.get('left_vn_fixture').vn_fq_name
right_vn_fq_name = ret_dict.get('right_vn_fixture').vn_fq_name
left_vm_fixture = ret_dict.get('left_vm_fixture')
right_vm_fixture = ret_dict.get('right_vm_fixture')
st_fixtures = ret_dict.get('st_fixtures')
si_fixtures = ret_dict.get('si_fixtures')
for i in range(len(st_fixtures)):
assert st_fixtures[i].verify_on_setup(), 'ST Verification failed'
assert si_fixtures[i].verify_on_setup(), 'SI Verification failed'
result, msg = self.validate_vn(left_vn_fq_name)
assert result, msg
result, msg = self.validate_vn(right_vn_fq_name, right_vn=True)
assert result, msg
result, msg = self.validate_svc_action(
left_vn_fq_name, si_fixtures[0], right_vm_fixture, src='left')
assert result, msg
if proto not in ['any', 'icmp']:
self.logger.info('Will skip Ping test')
else:
# Ping from left VM to right VM
errmsg = "Ping to Right VM %s from Left VM failed" % right_vm_fixture.vm_ip
assert left_vm_fixture.ping_with_certainty(
right_vm_fixture.vm_ip, count='3'), errmsg
return ret_dict
# end verify_multi_inline_svc
def verify_policy_delete_add(self, si_test_dict):
left_vn_policy_fix = si_test_dict['left_vn_policy_fix']
right_vn_policy_fix = si_test_dict['right_vn_policy_fix']
policy_fixture = si_test_dict['policy_fixture']
left_vm_fixture = si_test_dict['left_vm_fixture']
right_vm_fixture = si_test_dict['right_vm_fixture']
si_fixture = si_test_dict['si_fixture']
left_vn_fixture = si_test_dict['left_vn_fixture']
right_vn_fixture = si_test_dict['right_vn_fixture']
# Delete policy
self.detach_policy(left_vn_policy_fix)
self.detach_policy(right_vn_policy_fix)
self.unconfig_policy(policy_fixture)
# Ping from left VM to right VM; expected to fail
errmsg = "Ping to right VM ip %s from left VM passed; expected to fail" % right_vm_fixture.vm_ip
assert left_vm_fixture.ping_with_certainty(
right_vm_fixture.vm_ip, expectation=False), errmsg
# Create policy again
policy_fixture = self.config_policy(policy_fixture.policy_name,
policy_fixture.rules_list)
left_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, left_vn_fixture)
right_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, right_vn_fixture)
assert self.verify_si(si_fixture)
# Wait for the existing flow entry to age
self.sleep(40)
# Ping from left VM to right VM
errmsg = "Ping to right VM ip %s from left VM failed" % right_vm_fixture.vm_ip
assert left_vm_fixture.ping_with_certainty(
right_vm_fixture.vm_ip), errmsg
return True
# end verify_policy_delete_add
def verify_protocol_port_change(self, si_test_dict, mode='transparent'):
left_vn_policy_fix = si_test_dict['left_vn_policy_fix']
right_vn_policy_fix = si_test_dict['right_vn_policy_fix']
policy_fixture = si_test_dict['policy_fixture']
left_vm_fixture = si_test_dict['left_vm_fixture']
right_vm_fixture = si_test_dict['right_vm_fixture']
left_vn_fixture = si_test_dict['left_vn_fixture']
right_vn_fixture = si_test_dict['right_vn_fixture']
si_fixture = si_test_dict['si_fixture']
# Install traffic package in VM
left_vm_fixture.install_pkg("Traffic")
right_vm_fixture.install_pkg("Traffic")
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(left_vm_fixture, right_vm_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sport = 8000
dport = 9001
sent, recv = self.verify_traffic(left_vm_fixture, right_vm_fixture,
'tcp', sport=sport, dport=dport)
errmsg = "TCP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
# Delete policy
self.detach_policy(left_vn_policy_fix)
self.detach_policy(right_vn_policy_fix)
self.unconfig_policy(policy_fixture)
# Update rule with specific port/protocol
#action_list = {'apply_service': self.action_list}
action_list = policy_fixture.rules_list[0]['action_list']
new_rule = {'direction': '<>',
'protocol': 'tcp',
'source_network': si_test_dict['left_vn_fixture'].vn_fq_name,
'src_ports': [8000, 8000],
'dest_network': si_test_dict['right_vn_fixture'].vn_fq_name,
'dst_ports': [9001, 9001],
'simple_action': None,
'action_list': action_list
}
rules = [new_rule]
# Create new policy with rule to allow traffci from new VN's
policy_fixture = self.config_policy(policy_fixture.policy_name, rules)
left_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, left_vn_fixture)
right_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, right_vn_fixture)
assert self.verify_si(si_fixture)
self.logger.debug("Send udp traffic; with policy rule %s", new_rule)
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(left_vm_fixture, right_vm_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s passed; Expected to fail" % (
sport, dport)
assert sent and recv == 0, errmsg
sport = 8000
dport = 9001
self.logger.debug("Send tcp traffic; with policy rule %s", new_rule)
sent, recv = self.verify_traffic(left_vm_fixture, right_vm_fixture,
'tcp', sport=sport, dport=dport)
errmsg = "TCP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
# verify_protocol_port_change
def verify_add_new_vns(self, si_test_dict):
left_vn_policy_fix = si_test_dict['left_vn_policy_fix']
right_vn_policy_fix = si_test_dict['right_vn_policy_fix']
policy_fixture = si_test_dict['policy_fixture']
left_vm_fixture = si_test_dict['left_vm_fixture']
right_vm_fixture = si_test_dict['right_vm_fixture']
si_fixture = si_test_dict['si_fixture']
left_vn_fixture = si_test_dict['left_vn_fixture']
right_vn_fixture = si_test_dict['right_vn_fixture']
# Delete policy
self.detach_policy(left_vn_policy_fix)
self.detach_policy(right_vn_policy_fix)
self.unconfig_policy(policy_fixture)
# Create one more left and right VN's
new_left_vn = "new_left_bridge_vn"
new_left_vn_net = [get_random_cidr(af=self.inputs.get_af())]
new_right_vn = "new_right_bridge_vn"
new_right_vn_net = [get_random_cidr(af=self.inputs.get_af())]
new_left_vn_fix = self.config_vn(new_left_vn, new_left_vn_net)
new_right_vn_fix = self.config_vn(new_right_vn, new_right_vn_net)
# Launch VMs in new left and right VN's
new_left_vm = 'new_left_bridge_vm'
new_right_vm = 'new_right_bridge_vm'
new_left_vm_fix = self.config_vm(new_left_vm, vn_fix=new_left_vn_fix)
new_right_vm_fix = self.config_vm(new_right_vm, vn_fix=new_right_vn_fix)
# Wait for VM's to come up
new_left_vm_fix.wait_till_vm_is_up()
new_right_vm_fix.wait_till_vm_is_up()
# Add rule to policy to allow traffic from new left_vn to right_vn
# through SI
action_list = policy_fixture.input_rules_list[0]['action_list']
new_rule = {'direction': '<>',
'protocol': 'any',
'source_network': new_left_vn,
'src_ports': [0, 65535],
'dest_network': new_right_vn,
'dst_ports': [0, 65535],
'simple_action': action_list.get('simple_action', None),
'action_list': action_list,
}
rules = policy_fixture.input_rules_list
rules.append(new_rule)
# Create new policy with rule to allow traffic from new VN's
policy_fixture = self.config_policy(policy_fixture.policy_name, rules)
left_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, left_vn_fixture)
right_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, right_vn_fixture)
# attach policy to new VN's
new_policy_left_vn_fix = self.attach_policy_to_vn(
policy_fixture, new_left_vn_fix)
new_policy_right_vn_fix = self.attach_policy_to_vn(
policy_fixture, new_right_vn_fix)
self.verify_si(si_fixture)
# Ping from left VM to right VM
self.sleep(5)
self.logger.info("Verfiy ICMP traffic between new VN's.")
errmsg = "Ping to right VM ip %s from left VM failed" % new_right_vm_fix.vm_ip
assert new_left_vm_fix.ping_with_certainty(
new_right_vm_fix.vm_ip), errmsg
self.logger.info(
"Verfiy ICMP traffic between new left VN and existing right VN.")
errmsg = "Ping to right VM ip %s from left VM passed; \
Expected tp Fail" % right_vm_fixture.vm_ip
assert new_left_vm_fix.ping_with_certainty(right_vm_fixture.vm_ip,
expectation=False), errmsg
self.logger.info(
"Verfiy ICMP traffic between existing VN's with allow all.")
errmsg = "Ping to right VM ip %s from left VM failed" % right_vm_fixture.vm_ip
assert left_vm_fixture.ping_with_certainty(
right_vm_fixture.vm_ip), errmsg
self.logger.info(
"Verfiy ICMP traffic between existing left VN and new right VN.")
errmsg = "Ping to right VM ip %s from left VM passed; \
Expected to Fail" % new_right_vm_fix.vm_ip
assert left_vm_fixture.ping_with_certainty(new_right_vm_fix.vm_ip,
expectation=False), errmsg
# Ping between left VN's
self.logger.info(
"Verfiy ICMP traffic between new left VN and existing left VN.")
errmsg = "Ping to left VM ip %s from another left VM in different VN \
passed; Expected to fail" % left_vm_fixture.vm_ip
assert new_left_vm_fix.ping_with_certainty(left_vm_fixture.vm_ip,
expectation=False), errmsg
self.logger.info(
"Verfiy ICMP traffic between new right VN and existing right VN.")
errmsg = "Ping to right VM ip %s from another right VM in different VN \
passed; Expected to fail" % right_vm_fixture.vm_ip
assert new_right_vm_fix.ping_with_certainty(right_vm_fixture.vm_ip,
expectation=False), errmsg
# Delete policy
self.detach_policy(left_vn_policy_fix)
self.detach_policy(right_vn_policy_fix)
self.detach_policy(new_policy_left_vn_fix)
self.detach_policy(new_policy_right_vn_fix)
self.unconfig_policy(policy_fixture)
# Add rule to policy to allow only tcp traffic from new left_vn to right_vn
# through SI
rules.remove(new_rule)
udp_rule = {'direction': '<>',
'protocol': 'udp',
'source_network': new_left_vn,
'src_ports': [8000, 8000],
'dest_network': new_right_vn,
'dst_ports': [9000, 9000],
'simple_action': action_list.get('simple_action', None),
'action_list': {'apply_service': action_list['apply_service']}
}
rules.append(udp_rule)
# Create new policy with rule to allow traffci from new VN's
policy_fixture = self.config_policy(policy_fixture.policy_name, rules)
left_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, left_vn_fixture)
right_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, right_vn_fixture)
# attach policy to new VN's
new_policy_left_vn_fix = self.attach_policy_to_vn(
policy_fixture, new_left_vn_fix)
new_policy_right_vn_fix = self.attach_policy_to_vn(
policy_fixture, new_right_vn_fix)
self.verify_si(si_fixture)
# Ping from left VM to right VM with udp rule
self.logger.info(
"Verify ICMP traffic with allow udp only rule from new left VN to new right VN")
errmsg = "Ping to right VM ip %s from left VM passed; Expected to fail" % new_right_vm_fix.vm_ip
assert new_left_vm_fix.ping_with_certainty(new_right_vm_fix.vm_ip,
expectation=False), errmsg
# Install traffic package in VM
left_vm_fixture.install_pkg("Traffic")
right_vm_fixture.install_pkg("Traffic")
new_left_vm_fix.install_pkg("Traffic")
new_right_vm_fix.install_pkg("Traffic")
self.logger.info(
"Verify UDP traffic with allow udp only rule from new left VN to new right VN")
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(new_left_vm_fix, new_right_vm_fix,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
self.logger.info("Verfiy ICMP traffic with allow all.")
errmsg = "Ping to right VM ip %s from left VM failed" % right_vm_fixture.vm_ip
assert left_vm_fixture.ping_with_certainty(
right_vm_fixture.vm_ip), errmsg
self.logger.info("Verify UDP traffic with allow all")
sport = 8001
dport = 9001
sent, recv = self.verify_traffic(left_vm_fixture, right_vm_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
# Delete policy
self.delete_vm(new_left_vm_fix)
self.delete_vm(new_right_vm_fix)
self.detach_policy(new_policy_left_vn_fix)
self.detach_policy(new_policy_right_vn_fix)
self.delete_vn(new_left_vn_fix)
self.delete_vn(new_right_vn_fix)
self.verify_si(si_fixture)
self.logger.info(
"Icmp traffic with allow all after deleting the new left and right VN.")
errmsg = "Ping to right VM ip %s from left VM failed" % right_vm_fixture.vm_ip
assert left_vm_fixture.ping_with_certainty(
right_vm_fixture.vm_ip), errmsg
# end verify_add_new_vns
def verify_add_new_vms(self, si_test_dict):
left_vn_policy_fix = si_test_dict['left_vn_policy_fix']
right_vn_policy_fix = si_test_dict['right_vn_policy_fix']
policy_fixture = si_test_dict['policy_fixture']
left_vm_fixture = si_test_dict['left_vm_fixture']
right_vm_fixture = si_test_dict['right_vm_fixture']
si_fixture = si_test_dict['si_fixture']
left_vn_fixture = si_test_dict['left_vn_fixture']
right_vn_fixture = si_test_dict['right_vn_fixture']
# Launch VMs in new left and right VN's
new_left_vm = 'new_left_bridge_vm'
new_right_vm = 'new_right_bridge_vm'
new_left_vm_fix = self.config_vm(new_left_vm, vn_fix=left_vn_fixture)
new_right_vm_fix = self.config_vm(new_right_vm, vn_fix=right_vn_fixture)
# Wait for VM's to come up
assert new_left_vm_fix.wait_till_vm_is_up()
assert new_right_vm_fix.wait_till_vm_is_up()
# Ping from left VM to right VM
errmsg = "Ping to right VM ip %s from left VM failed" % new_right_vm_fix.vm_ip
assert new_left_vm_fix.ping_with_certainty(
new_right_vm_fix.vm_ip), errmsg
errmsg = "Ping to right VM ip %s from left VM failed" % right_vm_fixture.vm_ip
assert new_left_vm_fix.ping_with_certainty(
right_vm_fixture.vm_ip), errmsg
errmsg = "Ping to right VM ip %s from left VM failed" % right_vm_fixture.vm_ip
assert left_vm_fixture.ping_with_certainty(
right_vm_fixture.vm_ip), errmsg
errmsg = "Ping to right VM ip %s from left VM failed" % new_right_vm_fix.vm_ip
assert left_vm_fixture.ping_with_certainty(
new_right_vm_fix.vm_ip), errmsg
# Install traffic package in VM
left_vm_fixture.install_pkg("Traffic")
right_vm_fixture.install_pkg("Traffic")
self.logger.debug("Send udp traffic; with policy rule allow all")
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(left_vm_fixture, right_vm_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
# Delete policy
self.detach_policy(left_vn_policy_fix)
self.detach_policy(right_vn_policy_fix)
self.unconfig_policy(policy_fixture)
# Add rule to policy to allow traffic from new left_vn to right_vn
# through SI
action_list = policy_fixture.rules_list[0]['action_list']
new_rule = {'direction': '<>',
'protocol': 'udp',
'source_network': left_vn_fixture.vn_name,
'src_ports': [8000, 8000],
'dest_network': right_vn_fixture.vn_name,
'dst_ports': [9000, 9000],
'action_list': action_list
}
rules = [new_rule]
# Create new policy with rule to allow traffci from new VN's
policy_fixture = self.config_policy(policy_fixture.policy_name, rules)
left_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, left_vn_fixture)
right_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, right_vn_fixture)
self.verify_si(si_fixture)
# Install traffic package in VM
new_left_vm_fix.install_pkg("Traffic")
new_right_vm_fix.install_pkg("Traffic")
self.logger.debug("Send udp traffic; with policy rule %s", new_rule)
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(left_vm_fixture, right_vm_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sent, recv = self.verify_traffic(left_vm_fixture, new_right_vm_fix,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sent, recv = self.verify_traffic(new_left_vm_fix, new_right_vm_fix,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sent, recv = self.verify_traffic(new_left_vm_fix, right_vm_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
# Ping from left VM to right VM
errmsg = "Ping to right VM ip %s from left VM failed; Expected to fail" % new_right_vm_fix.vm_ip
assert new_left_vm_fix.ping_with_certainty(
new_right_vm_fix.vm_ip, expectation=False), errmsg
errmsg = "Ping to right VM ip %s from left VM failed; Expected to fail" % right_vm_fixture.vm_ip
assert new_left_vm_fix.ping_with_certainty(
right_vm_fixture.vm_ip, expectation=False), errmsg
errmsg = "Ping to right VM ip %s from left VM failed; Expected to fail" % right_vm_fixture.vm_ip
assert left_vm_fixture.ping_with_certainty(
right_vm_fixture.vm_ip, expectation=False), errmsg
errmsg = "Ping to right VM ip %s from left VM failed; Expected to fail" % new_right_vm_fix.vm_ip
assert left_vm_fixture.ping_with_certainty(
new_right_vm_fix.vm_ip, expectation=False), errmsg
# end verify_add_new_vms
def verify_firewall_with_mirroring(
self, max_inst=1,
firewall_svc_mode='in-network', mirror_svc_mode='transparent'):
"""Validate the service chaining in network datapath"""
#TODO
# max_inst cannot be more than one in this method since
# analyzer packet count verification logic needs to be updated when
# in case of more than one mirror SVM
max_inst = 1
vn1_name = get_random_name('left_vn')
vn2_name = get_random_name('right_vn')
vn1_subnets = [get_random_cidr(af=self.inputs.get_af())]
vn2_subnets = [get_random_cidr(af=self.inputs.get_af())]
vm1_name = get_random_name("in_network_vm1")
vm2_name = get_random_name("in_network_vm2")
action_list = []
firewall_st_name = get_random_name("svc_firewall_template_1")
firewall_si_prefix = get_random_name("svc_firewall_instance")
mirror_st_name = get_random_name("svc_mirror_template_1")
mirror_si_prefix = get_random_name("svc_mirror_instance")
policy_name = get_random_name("policy_in_network")
mgmt_vn_fixture = self.config_vn(get_random_name('mgmt'),
[get_random_cidr(af=self.inputs.get_af())])
vn1_fixture = self.config_vn(vn1_name, vn1_subnets)
vn2_fixture = self.config_vn(vn2_name, vn2_subnets)
vns = [mgmt_vn_fixture, vn1_fixture, vn2_fixture]
def firewall_svc_create(vn_list):
st_fixture = self.config_st(firewall_st_name,
service_type='firewall',
service_mode=firewall_svc_mode,
mgmt=getattr(mgmt_vn_fixture, 'vn_fq_name', None),
left=vn_list[1].vn_fq_name,
right=vn_list[2].vn_fq_name)
svm_fixtures = self.create_service_vms(vn_list,
service_mode=st_fixture.service_mode,
service_type=st_fixture.service_type,
max_inst=max_inst)
firewall_si_fixture = self.config_si(firewall_si_prefix,
st_fixture,
max_inst=max_inst,
mgmt_vn_fq_name=getattr(mgmt_vn_fixture, 'vn_fq_name', None),
left_vn_fq_name=vn_list[1].vn_fq_name,
right_vn_fq_name=vn_list[2].vn_fq_name,
svm_fixtures=svm_fixtures)
assert firewall_si_fixture.verify_on_setup()
return firewall_si_fixture
if firewall_svc_mode == 'transparent':
dummy_vn1 = self.config_vn('dummy_vn1', [get_random_cidr(af=self.inputs.get_af())])
dummy_vn2 = self.config_vn('dummy_vn2', [get_random_cidr(af=self.inputs.get_af())])
dummy_vn_list = [mgmt_vn_fixture, dummy_vn1, dummy_vn2]
firewall_si_fixture = firewall_svc_create(dummy_vn_list)
else:
firewall_si_fixture = firewall_svc_create(vns)
action_list = [firewall_si_fixture.fq_name_str]
mirror_st_fixture = self.config_st(mirror_st_name,
service_type='analyzer',
service_mode=mirror_svc_mode,
left=vn1_fixture.vn_fq_name)
mirror_svm_fixtures = self.create_service_vms([vn1_fixture],
service_mode=mirror_st_fixture.service_mode,
service_type=mirror_st_fixture.service_type,
max_inst=max_inst)
mirror_si_fixture = self.config_si(mirror_si_prefix,
mirror_st_fixture,
max_inst=max_inst,
left_vn_fq_name=vn1_fixture.vn_fq_name,
svm_fixtures=mirror_svm_fixtures)
assert mirror_si_fixture.verify_on_setup()
action_list += [mirror_si_fixture.fq_name_str]
rules = [
{
'direction': '<>',
'protocol': 'any',
'source_network': vn1_name,
'src_ports': [0, 65535],
'dest_network': vn2_name,
'dst_ports': [0, 65535],
'simple_action': 'pass',
'action_list': {'simple_action': 'pass',
'mirror_to': {'analyzer_name': action_list[1]},
'apply_service': action_list[:1]}
},
]
policy_fixture = self.config_policy(policy_name, rules)
vn1_policy_fix = self.attach_policy_to_vn(
policy_fixture, vn1_fixture)
vn2_policy_fix = self.attach_policy_to_vn(
policy_fixture, vn2_fixture)
vm1_fixture = self.config_vm(vm1_name, vn_fix=vn1_fixture)
vm2_fixture = self.config_vm(vm2_name, vn_fix=vn2_fixture)
vm1_fixture.wait_till_vm_is_up()
vm2_fixture.wait_till_vm_is_up()
result, msg = self.validate_vn(vn1_fixture.vn_fq_name)
assert result, msg
result, msg = self.validate_vn(vn2_fixture.vn_fq_name)
assert result, msg
assert self.verify_si(firewall_si_fixture)
assert self.verify_si(mirror_si_fixture)
svms = self.get_svms_in_si(firewall_si_fixture)
svm_node_ip = svms[0].vm_node_ip
# Ping from left VM to right VM
errmsg = "Ping to right VM ip %s from left VM failed" % vm2_fixture.vm_ip
assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip), errmsg
# Verify ICMP mirror
sessions = self.tcpdump_on_all_analyzer(mirror_si_fixture)
errmsg = "Ping to right VM ip %s from left VM failed" % vm2_fixture.vm_ip
assert vm1_fixture.ping_to_ip(vm2_fixture.vm_ip), errmsg
for svm_name, (session, pcap) in sessions.items():
if vm1_fixture.vm_node_ip == vm2_fixture.vm_node_ip:
if firewall_svc_mode == 'transparent':
count = 20
else:
count = 10
if vm1_fixture.vm_node_ip != vm2_fixture.vm_node_ip:
if firewall_svc_mode == 'in-network' and vm1_fixture.vm_node_ip == svm_node_ip:
count = 10
else:
count = 20
self.verify_icmp_mirror(svm_name, session, pcap, count)
# end verify_firewall_with_mirroring
def verify_ecmp_hash(self, vn_fixture=None, left_vm_fixture=None, right_vm_fixture=None, ecmp_hash='default'):
"""Verify ECMP configuration hash at Agent and control node """
# Verify configured ecmp_hash fileds at agent
result, msg = self.verify_ecmp_hash_at_agent(ecmp_hash=ecmp_hash,
vn_fixture=vn_fixture,
left_vm_fixture=left_vm_fixture,
right_vm_fixture=right_vm_fixture)
assert result, msg
# end verify_ecmp_hash
@retry(delay=5, tries=10)
def verify_ecmp_hash_at_agent(self, vn_fixture=None, left_vm_fixture=None, right_vm_fixture=None, ecmp_hash='default'):
"""Verify ECMP configuration hash """
# Default ECMP hash with 5 tuple
if ecmp_hash == 'default':
ecmp_hash = {"source_ip": True, "destination_ip": True,
"source_port": True, "destination_port": True,
"ip_protocol": True}
ecmp_hash_config=[]
# ECMP Hash fileds displayed at agent is different from configured
# values. Mapping is: source_ip : l3-source-address, destination_ip:
# l3-destination-address etc..
if 'source_ip' in ecmp_hash:
ecmp_hash_config.append('l3-source-address')
if 'destination_ip' in ecmp_hash:
ecmp_hash_config.append('l3-destination-address')
if 'source_port' in ecmp_hash:
ecmp_hash_config.append('l4-source-port')
if 'destination_port' in ecmp_hash:
ecmp_hash_config.append('l4-destination-port')
if 'ip_protocol' in ecmp_hash:
ecmp_hash_config.append('l4-protocol')
# Get the ECMP hash next hops at agent
(domain, project, vn) = vn_fixture.vn_fq_name.split(':')
inspect_h = self.agent_inspect[left_vm_fixture.vm_node_ip]
agent_vrf_objs = inspect_h.get_vna_vrf_objs(domain, project, vn)
agent_vrf_obj = left_vm_fixture.get_matching_vrf( agent_vrf_objs['vrf_list'], vn_fixture.vrf_name)
vn_vrf_id = agent_vrf_obj['ucindex']
# Get the ECMP Hashing fields at agent
ecmp_hashing_fileds = inspect_h.get_vna_active_route(vrf_id=vn_vrf_id, ip=right_vm_fixture.vm_ip, prefix='32')['path_list'][0]['ecmp_hashing_fields']
ecmp_hash_at_agent = ecmp_hashing_fileds.split(',')
# Removing the empty elements
ecmp_hash_at_agent = filter(None, ecmp_hash_at_agent)
# Compare ECMP hash configured value with value programmed at agent
if set(ecmp_hash_at_agent) == set(ecmp_hash_config):
result =True
msg = 'ECMP Hash is configured properly at Agent: {%s}' % ecmp_hashing_fileds
self.logger.info('ECMP Hash is configured properly at Agent: {%s}' % ecmp_hashing_fileds)
else:
result = False
msg = 'ECMP Hash is incorrect at Agent. Configured ECMP Hash is: %s, ECMP Hash present at Agent is:%s' % (ecmp_hash_config, ecmp_hash_at_agent)
self.logger.info('ECMP Hash is incorrect at Agent. Configured ECMP Hash is: %s, ECMP Hash present at Agent is:%s' % (ecmp_hash_config, ecmp_hash_at_agent))
return result, msg
# end verify_ecmp_hash_at_agent
| [
"lmadhusudhan@juniper.net"
] | lmadhusudhan@juniper.net |
b519ea50dec2f5d1b9ea40a4211cf6fc172aafde | 30aa7375dd22c230fd7f92fe0d0098f1015d910c | /tellers/migrations/0002_customer.py | 9fc41832cc1609e9bc611108991d581b68976f5e | [] | no_license | malep2007/bank_app | 4e413f058f44706eab6b42218c36fc609c5542f9 | f7192359e4daecbcce18b4f33cb096d28e446c0f | refs/heads/master | 2021-08-08T14:21:34.735130 | 2017-11-08T13:57:32 | 2017-11-08T13:57:32 | 109,974,307 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-11-08 12:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tellers', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('address', models.CharField(max_length=50)),
('phone number', models.CharField(max_length=10)),
('account number', models.CharField(max_length=50)),
],
),
]
| [
"malep2007@gmail.com"
] | malep2007@gmail.com |
b87d32e346233f7326e9bb0779f343b06a259219 | 7af87d9750223eb6a04ba0c5cb7e894a1e5bec12 | /example/movies/types.py | 292ef8353764b5352c90d712957035ef063a6e28 | [] | no_license | BonifacioJZ/react-django-graphql | 3fb9cf1376b755102c1453a51e6119981bdc8f54 | d6a91ec4ff2faed85096ec4f16518ff877f7e1ca | refs/heads/master | 2020-07-25T04:26:00.739408 | 2019-09-13T10:47:19 | 2019-09-13T10:47:19 | 208,164,226 | 1 | 0 | null | 2019-12-05T00:22:12 | 2019-09-12T23:39:21 | Python | UTF-8 | Python | false | false | 1,300 | py | #En este archivo van todos los esquemas para GraphQL
import graphene
from graphene_django.types import DjangoObjectType, ObjectType
from .models import Actor, Movie
#Creando un type GraphQl para el modelo actor
class ActorType(DjangoObjectType):
class Meta:
model = Actor
#Creando a GraphQl para el modelo Movie
class MovieType(DjangoObjectType):
class Meta:
model = Movie
#Crearcion de querys types
class Query(ObjectType):
#Select * from Actor where id = actor.id
actor = graphene.Field(ActorType,id = graphene.Int())
#Select * from Movie where id = movie.id
movie = graphene.Field(MovieType,id = graphene.Int())
#Select * from Actor
actors = graphene.List(ActorType)
#Select * from Movie
movies = graphene.List(MovieType)
#Estos son los resolver de las Querys
def resolve_actor(self,info, **kwargs):
id = kwargs.get('id')
if id is not None:
return Actor.objects.get(pk=id)
return None
def resolve_movie(self,info,**kwargs):
id = kwargs.get('id')
if id is not None:
return Movie.objects.get(pk=id)
return None
def resolve_actors(self,info, **kwargs):
return Actor.objects.all()
def resolve_movies(self,info,**kwargs):
return Movie.objects.all() | [
"revanjz@gmail.com"
] | revanjz@gmail.com |
0ecd301b4accbd446c44c0bb520f9f5d1f605c38 | 70acf2f67472bab2392a9e288477f2bdfb57c234 | /covidscraper.py | d2c1a3088d982d25de82229345b00cb790628eed | [] | no_license | jesp9/CIT-SP21-Senior-Design-Project | 3312c925bbf39339a63aaf4cd62a61aa1d9180a6 | 89bfe6a9851c6334042c9d1e47b92c8188ad5449 | refs/heads/main | 2023-05-01T08:56:31.306898 | 2021-05-20T18:32:31 | 2021-05-20T18:32:31 | 342,712,946 | 2 | 1 | null | 2021-04-24T08:36:54 | 2021-02-26T22:06:25 | HTML | UTF-8 | Python | false | false | 1,123 | py | import requests
import datetime
from bs4 import BeautifulSoup
def scrapeGlobalCase ():
try:
url = "https://news.google.com/covid19/map?hl=en-US&mid=%2Fm%2F09c7w0&state=7&gl=US&ceid=US%3Aen"
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
data = soup.find_all("div",class_ = "UvMayb")
totalCases = (data[0].text.strip().replace(',', ''))
deaths = (data[1].text.strip().replace(',', ''))
totalDoses = (data[2].text.strip().replace(',', ''))
fullyVaccinated = (data[3].text.strip().replace(',', ''))
TimeNow = datetime.datetime.now()
return {
'date': str(TimeNow),
'TotalCases': totalCases,
'Deaths': deaths,
'TotalDoses': totalDoses,
'FullyVaccinated': fullyVaccinated,
}
except Exception as e: print(e)
testResult = scrapeGlobalCase()
#print("Date:", testResult['date'], "TotalCases:", testResult['TotalCases'], "Total Deaths:", testResult['Deaths'], "Total Doses:" , testResult['TotalDoses'], "Fully Vaccinated:", testResult['FullyVaccinated'])
| [
"rafael.evangelista.53@my.csun.edu"
] | rafael.evangelista.53@my.csun.edu |
e4166169a6ab9bc75fed65d851305632f8c8250e | 41f5d5f602e3e743b2fd8a07712340aec4511089 | /main.py | 90aa444df92de279570ed4c59f4146bcfff46bc4 | [] | no_license | matv3ys/flask-sqlalchemy2-git | 64ae919330303c81fc3a358eb1dac8e7fd084788 | c7f113635df4538ee404934becfe50122538557b | refs/heads/master | 2021-05-20T00:53:14.821873 | 2020-04-01T20:29:50 | 2020-04-01T20:29:50 | 252,115,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,790 | py | from flask import Flask, render_template, redirect
from flask import request, make_response, abort
import datetime
from data import db_session
from data.users import User
from data.jobs import Jobs
from data.departments import Department
from data.news import News
from data.categories import CategoryJob
# импорт нужных форм
from forms import RegisterForm, LoginForm, NewsForm, JobForm, DepForm
from flask_login import LoginManager, login_user, logout_user, login_required, current_user
import sqlalchemy
# настройки приложения
app = Flask(__name__)
login_manager = LoginManager()
login_manager.init_app(app)
app.config['SECRET_KEY'] = 'yandexlyceum_secret_key'
app.config['PERMANENT_SESSION_LIFETIME'] = datetime.timedelta(days=365)
def main():
db_session.global_init("db/mars_db.sqlite")
app.run()
# начальная страница (список работ)
@app.route("/")
def index():
session = db_session.create_session()
jobs = session.query(Jobs).all()
return render_template("works_log.html", title="Mars Colonization", jobs=jobs)
# добавление работы
@app.route("/addjob", methods=['GET', 'POST'])
@login_required
def add_job():
form = JobForm()
if form.validate_on_submit():
session = db_session.create_session()
job = Jobs()
job.job = form.title.data
job.team_leader = form.leader_id.data
job.work_size = form.work_size.data
job.collaborators = form.collaborators.data
job.is_finished = form.is_finished.data
job.creator = current_user.id
category_id = form.category.data
category = session.query(CategoryJob).filter(CategoryJob.id == category_id).first()
job.categories.append(category)
session.commit()
try:
current_user.jobs.append(job)
except sqlalchemy.orm.exc.DetachedInstanceError:
pass
except sqlalchemy.exc.InvalidRequestError:
pass
session.merge(current_user)
session.commit()
return redirect('/')
return render_template('add_job.html', title='Adding a job',
form=form)
# редактирование работы
@app.route('/addjob/<int:id>', methods=['GET', 'POST'])
@login_required
def edit_job(id):
form = JobForm()
if request.method == "GET":
session = db_session.create_session()
if current_user.id == 1:
job = session.query(Jobs).filter(Jobs.id == id).first()
else:
job = session.query(Jobs).filter(Jobs.id == id,
Jobs.creator == current_user.id).first()
if job:
form.title.data = job.job
form.leader_id.data = job.team_leader
form.work_size.data = job.work_size
form.collaborators.data = job.collaborators
form.is_finished.data = job.is_finished
form.category.data = job.categories[0].id
else:
abort(404)
if form.validate_on_submit():
session = db_session.create_session()
if current_user.id == 1:
job = session.query(Jobs).filter(Jobs.id == id).first()
else:
job = session.query(Jobs).filter(Jobs.id == id,
Jobs.creator == current_user.id).first()
if job:
job.job = form.title.data
job.team_leader = form.leader_id.data
job.work_size = form.work_size.data
job.collaborators = form.collaborators.data
job.is_finished = form.is_finished.data
category_id = form.category.data
category = session.query(CategoryJob).filter(CategoryJob.id == category_id).first()
job.categories[0] = category
session.commit()
try:
current_user.jobs.append(job)
except sqlalchemy.orm.exc.DetachedInstanceError:
pass
except sqlalchemy.exc.InvalidRequestError:
pass
session.commit()
return redirect('/')
else:
abort(404)
return render_template('add_job.html', title='Job edit', form=form)
# удаление работы
@app.route('/job_delete/<int:id>', methods=['GET', 'POST'])
@login_required
def job_delete(id):
session = db_session.create_session()
if current_user.id == 1:
job = session.query(Jobs).filter(Jobs.id == id).first()
else:
job = session.query(Jobs).filter(Jobs.id == id,
Jobs.creator == current_user.id).first()
if job:
session.delete(job)
session.commit()
else:
abort(404)
return redirect('/')
# блог с новостями
@app.route("/blog")
def blog():
session = db_session.create_session()
if current_user.is_authenticated:
news = session.query(News).filter(
(News.user == current_user) | (News.is_private != True))
else:
news = session.query(News).filter(News.is_private != True)
return render_template("blog.html", title="Blog", news=news)
# добавление новостей
@app.route('/news', methods=['GET', 'POST'])
@login_required
def add_news():
form = NewsForm()
if form.validate_on_submit():
session = db_session.create_session()
news = News()
news.title = form.title.data
news.content = form.content.data
news.is_private = form.is_private.data
current_user.news.append(news)
session.merge(current_user)
session.commit()
return redirect('/blog')
return render_template('add_news.html', title='News add',
form=form)
# редактирование новостей
@app.route('/news/<int:id>', methods=['GET', 'POST'])
@login_required
def edit_news(id):
form = NewsForm()
if request.method == "GET":
session = db_session.create_session()
news = session.query(News).filter(News.id == id,
News.user == current_user).first()
if news:
form.title.data = news.title
form.content.data = news.content
form.is_private.data = news.is_private
else:
abort(404)
if form.validate_on_submit():
session = db_session.create_session()
news = session.query(News).filter(News.id == id,
News.user == current_user).first()
if news:
news.title = form.title.data
news.content = form.content.data
news.is_private = form.is_private.data
session.commit()
return redirect('/blog')
else:
abort(404)
return render_template('add_news.html', title='News edit', form=form)
# удаление новостей
@app.route('/news_delete/<int:id>', methods=['GET', 'POST'])
@login_required
def news_delete(id):
session = db_session.create_session()
news = session.query(News).filter(News.id == id,
News.user == current_user).first()
if news:
session.delete(news)
session.commit()
else:
abort(404)
return redirect('/blog')
# список департаментов
@app.route("/departments")
def departments():
session = db_session.create_session()
deps = session.query(Department).all()
return render_template("departments.html", title="List of Departments", deps=deps)
# добавление департамента
@app.route('/add_dep', methods=['GET', 'POST'])
@login_required
def add_dep():
form = DepForm()
if form.validate_on_submit():
session = db_session.create_session()
dep = Department()
dep.title = form.title.data
dep.chief = form.chief_id.data
dep.members = form.members.data
dep.email = form.email.data
dep.creator = current_user.id
chief = session.query(User).filter(User.id == form.chief_id.data).first()
chief.deps.append(dep)
session.merge(current_user)
session.commit()
return redirect('/departments')
return render_template('add_dep.html', title='Add a Department',
form=form)
# редактирование департамента
@app.route('/add_dep/<int:id>', methods=['GET', 'POST'])
@login_required
def edit_dep(id):
form = DepForm()
if request.method == "GET":
session = db_session.create_session()
if current_user.id == 1:
dep = session.query(Department).filter(Department.id == id).first()
else:
dep = session.query(Department).filter(Department.id == id,
Department.creator == current_user.id).first()
if dep:
form.title.data = dep.title
form.chief_id.data = dep.chief
form.members.data = dep.members
form.email.data = dep.email
else:
abort(404)
if form.validate_on_submit():
session = db_session.create_session()
if current_user.id == 1:
dep = session.query(Department).filter(Department.id == id).first()
else:
dep = session.query(Department).filter(Department.id == id,
Department.creator == current_user.id).first()
if dep:
dep.title = form.title.data
dep.chief = form.chief_id.data
dep.members = form.members.data
dep.email = form.email.data
session.commit()
return redirect('/departments')
else:
abort(404)
return render_template('add_dep.html', title='Department edit', form=form)
# удаление департамента
@app.route('/dep_delete/<int:id>', methods=['GET', 'POST'])
@login_required
def dep_delete(id):
session = db_session.create_session()
if current_user.id == 1:
dep = session.query(Department).filter(Department.id == id).first()
else:
dep = session.query(Department).filter(Department.id == id,
Department.creator == current_user.id).first()
if dep:
session.delete(dep)
session.commit()
else:
abort(404)
return redirect('/departments')
# вход на сайт
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
session = db_session.create_session()
user = session.query(User).filter(User.email == form.email.data).first()
if user and user.check_password(form.password.data):
login_user(user, remember=form.remember_me.data)
return redirect("/")
return render_template('login.html',
message="Неправильный логин или пароль",
form=form)
return render_template('login.html', title='Sign in', form=form)
# выход пользователя
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect("/")
# загрузка текущего пользователя
@login_manager.user_loader
def load_user(user_id):
session = db_session.create_session()
return session.query(User).get(user_id)
# регистрация пользователя
@app.route('/register', methods=['GET', 'POST'])
def reqister():
form = RegisterForm()
if form.validate_on_submit():
if form.password.data != form.password_again.data:
return render_template('register.html', title='Регистрация',
form=form,
message="Пароли не совпадают")
session = db_session.create_session()
if session.query(User).filter(User.email == form.email.data).first():
return render_template('register.html', title='Регистрация',
form=form,
message="Такой пользователь уже есть")
user = User(
surname=form.surname.data,
name=form.name.data,
age=form.age.data,
position=form.position.data,
speciality=form.speciality.data,
address=form.address.data,
email=form.email.data,
)
user.set_password(form.password.data)
session.add(user)
session.commit()
return redirect('/login')
return render_template('register.html', title='Sign up', form=form)
if __name__ == '__main__':
main()
| [
"matv3ys1337@gmail.com"
] | matv3ys1337@gmail.com |
513d6023431a7391dd8d55abbb5fa2999bac7467 | 0be02fc1ba339cfc895e79ac4f51b9e5c685ac79 | /newPrg.py | b1a15c0c87bd4917703a57b983fd1782fb259d4c | [] | no_license | ezzatisawesome/python-practice | 53afc04d7871d6f11e721b41d0bce72d64c9497b | 1255ac842998f3aac21a8273d2a71ab6d0fd2671 | refs/heads/master | 2023-04-09T21:01:53.932381 | 2016-06-18T19:04:46 | 2016-06-18T19:04:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | print("Hello Mr. Man")
input()
| [
"weirdo weirdness"
] | weirdo weirdness |
4222c6f5434e9fda336a3eba3df8cd8a6e4b1bd2 | d8d66a501f0b9b85dc3b070dfa66dc9207a4d56b | /job51/job51/citydata.py | 3a4e15e423ceb96704f1f89f51609bd211f5fe6b | [] | no_license | LinQiH/pachong | 98580f84a6897dae5f67419e1dfa46c5363d4918 | 2c15ea946aa35c23f86a3502ef09fa06c7a2a4a7 | refs/heads/master | 2021-08-06T10:34:02.498747 | 2017-11-05T08:33:22 | 2017-11-05T08:47:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | from lxml import etree
def citydata():
with open('city.html','r') as f:
html = etree.HTML(f.read())
data = html.xpath('//em[@data-value]/@data-value')
return data | [
"linqi1024@sina.com"
] | linqi1024@sina.com |
7bbf18b0a122b3cabd88909f27cdac6c4038c0b6 | 8682916e6a7f00c834e8db22379240809d8d433c | /migrations/versions/a55716df6973_.py | 65ef1d87b7fcea7c409857a1d21368e17070401c | [] | no_license | pepereco/investalent | da3b87070674d8b632a16450d57b4558187ab747 | 5ab7cb6f9b1d638542405de57cc47502b5567896 | refs/heads/master | 2023-01-02T16:51:47.992154 | 2020-10-30T15:31:40 | 2020-10-30T15:31:40 | 290,481,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | """empty message
Revision ID: a55716df6973
Revises: bea3268003d6
Create Date: 2020-08-04 23:19:36.311630
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a55716df6973'
down_revision = 'bea3268003d6'
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| [
"peperecolons0@gmail.com"
] | peperecolons0@gmail.com |
341738770f90b5c7c5648b654fb6badb2ff52930 | 8ae89fbbc850a7e3a5594107d35dda3e324c7076 | /exercise/break.py | 19313a424de98ae9f05374220a2f08588347f535 | [] | no_license | talaatmagdyx/Udacity-CS101 | 54a3f2556791997a474d69997d878e86b7f3e823 | 4e423f15af4a500b0177913bc4e57c0b7683b282 | refs/heads/master | 2022-01-02T11:09:13.571794 | 2017-07-06T13:00:31 | 2017-07-06T13:00:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py |
i=1
while True :
if i>10:
break
print(i)
i=i+1
| [
"talaatmagdy75@gmail.com"
] | talaatmagdy75@gmail.com |
0b3ce647889db5ce8bc43acdb3f0730ff2349fb3 | 70811da75f0f38719703e02c9f99e2ce09e21d2a | /LSTM_FCN/distal_phalanx_tw_model.py | cd18a4f46cd55d512c9381b2a58a3c9a060069cf | [] | no_license | HongminWu/time_series_anomaly_detection_classification_clustering | 9d5e555c9bf37ee72770e127588f61f15139bd4e | 548b3799389ec7a96fc56c51360a6de89e0502a1 | refs/heads/master | 2020-03-11T04:44:00.113684 | 2018-08-16T06:38:57 | 2018-08-16T06:38:57 | 129,783,614 | 15 | 6 | null | null | null | null | UTF-8 | Python | false | false | 2,711 | py | from keras.models import Model
from keras.layers import Input, PReLU, Dense, Dropout, LSTM, Bidirectional, multiply, concatenate
from keras.layers import Conv1D, BatchNormalization, GlobalAveragePooling1D, Permute, Activation
from utils.constants import MAX_SEQUENCE_LENGTH_LIST, NB_CLASSES_LIST
from utils.keras_utils import train_model, evaluate_model, set_trainable, visualize_context_vector, visualize_cam
from utils.layer_utils import AttentionLSTM
DATASET_INDEX = 11
MAX_SEQUENCE_LENGTH = MAX_SEQUENCE_LENGTH_LIST[DATASET_INDEX]
NB_CLASS = NB_CLASSES_LIST[DATASET_INDEX]
TRAINABLE = True
def generate_model():
ip = Input(shape=(1, MAX_SEQUENCE_LENGTH))
x = LSTM(64)(ip)
x = Dropout(0.8)(x)
y = Permute((2, 1))(ip)
y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = GlobalAveragePooling1D()(y)
x = concatenate([x, y])
out = Dense(NB_CLASS, activation='softmax')(x)
model = Model(ip, out)
model.summary()
# model.load_weights("weights/phalanx_tw_weights - 7769.h5")
return model
def generate_model_2():
ip = Input(shape=(1, MAX_SEQUENCE_LENGTH))
x = AttentionLSTM(64)(ip)
x = Dropout(0.8)(x)
y = Permute((2, 1))(ip)
y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = GlobalAveragePooling1D()(y)
x = concatenate([x, y])
out = Dense(NB_CLASS, activation='softmax')(x)
model = Model(ip, out)
model.summary()
# add load model code here to fine-tune
return model
if __name__ == "__main__":
model = generate_model_2()
#train_model(model, DATASET_INDEX, dataset_prefix='phalanx_tw', epochs=2000, batch_size=128)
evaluate_model(model, DATASET_INDEX, dataset_prefix='phalanx_tw', batch_size=128)
# visualize_context_vector(model, DATASET_INDEX, dataset_prefix='phalanx_tw', visualize_sequence=True,
# visualize_classwise=True, limit=1)
# visualize_cam(model, DATASET_INDEX, dataset_prefix='phalanx_tw', class_id=0)
| [
"hongminwu0120@gmail.com"
] | hongminwu0120@gmail.com |
962cfc65a1cca1f1c7efcaec822fad9a5784631d | 834134cce099d80d8564164696cdffcf0112aa9a | /scraper.py | 1125ecdfa37d5536e42f93716e09f4ca594a0cf9 | [] | no_license | mpulgaron17/EarlyProjects | e531c51daa060499645e43d79e82318b1b987de9 | 38b7ff489bc6498dcb631b9e8070a6a4584b9078 | refs/heads/main | 2023-06-06T18:57:33.822515 | 2021-07-15T03:35:21 | 2021-07-15T03:35:21 | 386,149,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | import urllib.request
from bs4 import BeautifulSoup
class Scraper:
def __init__(self, site):
self.site = site
def scrape(self):
r = urllib.request.urlopen(self.site)
html = r.read()
parser = "html.parser"
sp = BeautifulSoup(html, parser)
for tag in sp.find_all("a"):
url = tag.get("href")
print("\n" + url)
scrape = Scraper('https://news.google.com')
scrape.scrape() | [
"noreply@github.com"
] | mpulgaron17.noreply@github.com |
ef01adb41fcf1f474f98c2f88f09443ee34ec339 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/servicebus/azure-servicebus/tests/async_tests/mgmt_tests/test_mgmt_namespaces_async.py | 77e82602f3a50ce880403bd482c0dcba7293d2b3 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 1,489 | py | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import pytest
from azure.servicebus.aio.management import ServiceBusAdministrationClient
from devtools_testutils import AzureMgmtTestCase, CachedResourceGroupPreparer
from servicebus_preparer import CachedServiceBusNamespacePreparer
class ServiceBusManagementClientNamespaceAsyncTests(AzureMgmtTestCase):
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
async def test_async_mgmt_namespace_get_properties(self, servicebus_namespace_connection_string,
servicebus_namespace, servicebus_namespace_key_name,
servicebus_namespace_primary_key):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
properties = await mgmt_service.get_namespace_properties()
assert properties
assert properties.messaging_sku == 'Standard'
# assert properties.name == servicebus_namespace.name
# This is disabled pending investigation of why it isn't getting scrubbed despite expected scrubber use.
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
eb792de05309618dbd6f13ad548b23cddd4fcacd | 54731f795964175953b56cb0aa0299aba7cb603e | /myProject/app.py | 2d98ff7b463505f046bdfb279d81cb85a9066dd2 | [] | no_license | cha-n/homework | 8cf25575107b39e80b06672d6eda1b74bc246569 | 4f7387a302b2c12c7ac1c725d38a2bc8d52465c1 | refs/heads/master | 2023-01-29T12:31:25.011072 | 2020-12-14T12:43:29 | 2020-12-14T12:43:29 | 306,026,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,714 | py | from flask import Flask, render_template, jsonify, request
from pymongo import MongoClient
app = Flask(__name__)
client = MongoClient('localhost', 27017)
db = client.dbsparta
# method 명시되어 있지 않으면 GET, POST 모두 가능
@app.route('/')
def home():
return render_template('index.html')
@app.route('/write', methods=['GET'])
def write_form():
return render_template('write.html')
@app.route('/write', methods=['POST'])
def write_review():
title = request.form['title']
author = request.form['author']
publisher = request.form['publisher']
review = request.form['review']
thumbnail = request.form['thumbnail']
print(title, author, publisher, review, thumbnail)
doc = {
'title': title,
'author': author,
'publisher': publisher,
'review': review,
'thumbnail': thumbnail
}
db.booked.insert_one(doc)
print(doc)
return jsonify({'result': 'success', 'msg': '리뷰가 작성되었습니다!'})
@app.route('/review', methods=['GET'])
def read_all_reviews():
title = request.args.get('title', 'all')
print(title)
filter = {}
if title != 'all':
filter = {"title": title}
review_list = list(db.booked.find(filter, {'_id': False}))
print(review_list)
return jsonify({'result': 'success', 'reviews': review_list})
@app.route('/viewReview', methods=['GET'])
def read_review():
title = request.args.get('title')
return render_template('readReview.html', title=title)
# index.html에서 책 클릭할 때 책 정보 가져옴
@app.route('/viewReview', methods=['POST'])
def get_title():
title = request.form['title']
print(title)
review = list(db.booked.find({'title': title}, {'_id': False}))
print(review)
return jsonify({'result': 'success', 'reviews': review})
@app.route('/search_popup')
def search_popup():
return render_template('search_popup.html')
# 리뷰 삭제
@app.route('/deleteReview', methods=['POST'])
def delete_review():
print("deleteReview")
title = request.form['title']
print(title)
delete = db.booked.delete_one({"title": title})
return jsonify({'result': 'success', 'msg': '리뷰가 작성되었습니다!'})
@app.route('/test', methods=['GET'])
def test_get():
title_receive = request.args.get('title_give')
print(title_receive)
return jsonify({'result': 'success', 'msg': '이 요청은 GET!'})
@app.route('/test', methods=['POST'])
def test_post():
title_receive = request.form['title_give']
print(title_receive)
return jsonify({'result': 'success', 'msg': '이 요청은 POST!'})
if __name__ == '__main__':
app.run('0.0.0.0', port=5000, debug=True)
| [
"chan01115@naver.com"
] | chan01115@naver.com |
8d06eb9fd29257e1d48b88ff15d60e2b89203bb8 | 539c37c420df5ad243b26c63f48b7d3a478a4227 | /Incubator/urls.py | e5450e2839a15065850500444984202ad89e9ff4 | [] | no_license | ManasAgarwal02/Incubator | a91bafc8c8d50df1fe4e46f8a0dc433382af6280 | 5af76857b24a58292a446b81b458d6d73691b650 | refs/heads/master | 2023-03-15T16:33:40.354127 | 2021-02-28T07:51:59 | 2021-02-28T07:51:59 | 315,052,730 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | from django.urls import path, include
from . import views
urlpatterns = [
path('', views.home, name="home"),
path('about/', views.about, name="about"),
path('analyze/', views.analyze, name="analyze"),
path('network/', views.network_request, name="net_req")
]
| [
"manas.99ag@gmail.com"
] | manas.99ag@gmail.com |
376eedeb09e809b68e21efaed738bb75f8bc1003 | 188251f29b721f93e336193dfc630d2d3f7722c6 | /api/images/views.py | 1b47f89b2de1c063b952b4bfbb35886c1496235c | [
"MIT"
] | permissive | erik-sn/mako | eeb9a1e55ce4303bbbeb1a2d78aa1bf54a4c7de2 | f64644dacb132b0063805ccafc3f485bdb62595b | refs/heads/master | 2022-12-15T19:59:48.640267 | 2018-11-23T01:06:12 | 2018-11-23T01:06:12 | 137,537,123 | 0 | 2 | MIT | 2022-12-08T20:55:49 | 2018-06-15T22:32:36 | TypeScript | UTF-8 | Python | false | false | 7,433 | py | import logging
from django.core.files.uploadedfile import InMemoryUploadedFile
from django import forms
from django.db.utils import IntegrityError
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from rest_framework.decorators import list_route
from rest_framework import viewsets
from rest_framework.decorators import detail_route
from rest_framework.serializers import ModelSerializer
from rest_framework import status
from images.utils import unzip_and_save_files, assert_compressed_file, create_image_group
from images.serializers import (
ImageSerializer,
ImageGroupDetailSerializer,
ImageGroupListSerializer,
ImageGroupPostSerializer,
MergeSerializer,
UploadEventSerializer,
UploadEventListSerializer,
SearchListSerializer,
SearchSerializer,
)
from images.models import (
Image,
ImageGroup,
UploadEvent,
)
from images.search import Search
logger = logging.getLogger('django')
class UploadFileForm(forms.Form):
file = forms.FileField()
class ImageContainerView:
model_class = None
@detail_route(methods=['get'])
def download_images(self, request, pk: int) -> HttpResponse:
image_container = get_object_or_404(self.model_class, pk=pk)
return image_container.build_download_response()
class ImageViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Image.objects.all()
serializer_class = ImageSerializer
lookup_field = 'slug'
@staticmethod
def _include_image(image):
image.included = True
image.save()
return image
@staticmethod
def _exclude_image(image):
image.included = False
image.save()
return image
@detail_route(methods=['put'])
def toggle(self, request, slug=None):
value = request.GET.get('value', None)
try:
image = self.queryset.get(slug__contains=slug)
except Image.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if value:
bool_value = value.lower() == 'true'
if bool_value:
image = self._exclude_image(image)
else:
image = self._include_image(image)
else:
if image.included:
image = self._exclude_image(image)
else:
image = self._include_image(image)
serializer = self.serializer_class(image)
return Response(serializer.data, status=status.HTTP_200_OK)
class ImageGroupViewSet(viewsets.ModelViewSet, ImageContainerView):
"""
Image Group viewset
"""
model_class = ImageGroup
queryset = model_class.objects.all()
def get_serializer_class(self) -> ModelSerializer:
if self.request.method in ['GET']:
return ImageGroupDetailSerializer
return ImageGroupPostSerializer
def list(self, request, **kwargs) -> Response:
# TODO see if we can make this filter generic
image_groups = self.queryset.filter(owner=request.user)
serializer = ImageGroupListSerializer(image_groups, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def create(self, request, *args, **kwargs):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
class UploadEventViewSet(viewsets.ModelViewSet, ImageContainerView):
"""
view to handle uploading of single, multiple or gzipped
images
"""
serializer_class = UploadEventSerializer
model_class = UploadEvent
queryset = model_class.objects.all()
def get_serializer_class(self):
if self.action == 'list':
return UploadEventListSerializer
return UploadEventSerializer
def get_queryset(self):
return self.queryset.filter(owner=self.request.user)
@list_route(methods=['post'])
def merge(self, request) -> Response:
serializer = MergeSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
upload_event_ids = serializer.data['items']
try:
upload_events = [UploadEvent.objects.get(id=id) for id in upload_event_ids]
image_group = create_image_group(upload_events, serializer, request.user)
except UploadEvent.DoesNotExist:
return Response({
'generic': ['Could not find one of the selected google searches - contact an administrator']
}, status=status.HTTP_400_BAD_REQUEST)
except IntegrityError:
return Response({
'name': ['Image group with this name already exists']
}, status=status.HTTP_400_BAD_REQUEST)
return Response(ImageGroupDetailSerializer(image_group).data, status=status.HTTP_201_CREATED)
def create(self, request, *args, **kwargs):
form: UploadFileForm = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
file: InMemoryUploadedFile = request.FILES['file']
if not assert_compressed_file(file):
return Response('Invalid file type', status=status.HTTP_400_BAD_REQUEST)
saved_images, new_image_count = unzip_and_save_files(file)
# case where the user uploaded either a blank directory or a directory
# that contains no images, or contains invalid image types as defined in the
# image config admin
if len(saved_images) == 0:
return Response({
'generic': ['Uploaded archive has no valid images']
}, status=status.HTTP_400_BAD_REQUEST)
upload_event = UploadEvent.objects.create(owner=request.user, file_name=file.name)
upload_event.images.set(saved_images)
upload_event.save()
serializer = UploadEventSerializer(upload_event)
return Response(serializer.data, status=201)
# # case where duplicate file was uploaded
# return Response({}, status=201)
return Response(form.errors, status=400)
class SearchViewset(viewsets.ModelViewSet, ImageContainerView):
model_class = Search
queryset = model_class.objects.all()
def get_serializer_class(self):
if self.action == 'list':
return SearchListSerializer
return SearchSerializer
def get_queryset(self):
return self.queryset.filter(user=self.request.user)
@list_route(methods=['post'])
def merge(self, request) -> Response:
serializer = MergeSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
search_ids = serializer.data['items']
try:
searches = [Search.objects.get(id=id) for id in search_ids]
image_group = create_image_group(searches, serializer, request.user)
except Search.DoesNotExist:
return Response({
'generic': ['Could not find one of the selected google searches - contact an administrator']
}, status=status.HTTP_400_BAD_REQUEST)
except IntegrityError:
return Response({
'name': ['Image group with this name already exists']
}, status=status.HTTP_400_BAD_REQUEST)
return Response(ImageGroupDetailSerializer(image_group).data, status=201) | [
"nieh.erik@gmail.com"
] | nieh.erik@gmail.com |
4799dfae66b08654ba541db4e36bfdab1b6ecd9b | 6382e12a32c3b62ec059ca45c1fee6941e51e260 | /Part_5__Control_Flow/Chap_14__Iterables_Iterators_and_Generators/ex_14_12__aritprog_gen.py | b705450eca5df149e40a62b8325732285db256f8 | [] | no_license | CavalcanteLucas/python-fluent | e352a79e1da87ae4ee320a09196e119235a904a8 | a4e22ab88235c5045eca52745b5e1558586dc166 | refs/heads/master | 2023-07-11T20:13:35.793456 | 2023-06-19T13:37:54 | 2023-06-19T13:37:54 | 224,661,365 | 1 | 0 | null | 2023-02-11T01:30:09 | 2019-11-28T13:39:03 | Jupyter Notebook | UTF-8 | Python | false | false | 233 | py | def aritprog_gen(begin, step, end=None):
result = type(begin + step)(begin)
forever = end is None
index = 0
while forever or result < end:
yield result
index += 1
result = begin + step * index
| [
"lucascpcavalcante@gmail.com"
] | lucascpcavalcante@gmail.com |
fbb863a05f6df5149149d97b5578a426b774993a | cf1a7f07221766edfece71d63cb93886aa30c476 | /tests/test_quotient_metric.py | a05a1572eb0f28022501b6525cd8f36016edb650 | [
"MIT"
] | permissive | tristancabel/geomstats | 950c3f5d9e42e2bbfa4a4abd1b0e17b9b8c8b22f | eeba7b7a652d45fc0053e35219c03627f2e8406f | refs/heads/master | 2023-04-22T05:40:09.584435 | 2021-03-08T04:53:22 | 2021-03-08T04:53:22 | 345,702,762 | 0 | 0 | MIT | 2021-04-30T07:38:41 | 2021-03-08T15:29:37 | null | UTF-8 | Python | false | false | 6,431 | py | """Unit tests for the quotient space."""
import geomstats.backend as gs
import geomstats.tests
from geomstats.geometry.fiber_bundle import FiberBundle
from geomstats.geometry.general_linear import GeneralLinear
from geomstats.geometry.matrices import MatricesMetric
from geomstats.geometry.quotient_metric import QuotientMetric
from geomstats.geometry.spd_matrices import SPDMatrices, \
SPDMetricBuresWasserstein
from geomstats.geometry.special_orthogonal import SpecialOrthogonal
class TestQuotientMetric(geomstats.tests.TestCase):
def setUp(self):
gs.random.seed(0)
n = 3
self.base = SPDMatrices(n)
self.base_metric = SPDMetricBuresWasserstein(n)
self.group = SpecialOrthogonal(n)
self.bundle = FiberBundle(
GeneralLinear(n), base=self.base, group=self.group)
self.quotient_metric = QuotientMetric(
self.bundle, ambient_metric=MatricesMetric(n, n))
def submersion(point):
return GeneralLinear.mul(point, GeneralLinear.transpose(point))
def tangent_submersion(tangent_vec, base_point):
product = GeneralLinear.mul(
base_point, GeneralLinear.transpose(tangent_vec))
return 2 * GeneralLinear.to_symmetric(product)
def horizontal_lift(tangent_vec, point, base_point=None):
if base_point is None:
base_point = submersion(point)
sylvester = gs.linalg.solve_sylvester(
base_point, base_point, tangent_vec)
return GeneralLinear.mul(sylvester, point)
self.bundle.submersion = submersion
self.bundle.tangent_submersion = tangent_submersion
self.bundle.horizontal_lift = horizontal_lift
self.bundle.lift = gs.linalg.cholesky
def test_belongs(self):
point = self.base.random_uniform()
result = self.bundle.belongs(point)
self.assertTrue(result)
def test_submersion(self):
mat = self.bundle.total_space.random_uniform()
point = self.bundle.submersion(mat)
result = self.bundle.belongs(point)
self.assertTrue(result)
def test_lift_and_submersion(self):
point = self.base.random_uniform()
mat = self.bundle.lift(point)
result = self.bundle.submersion(mat)
self.assertAllClose(result, point)
def test_tangent_submersion(self):
mat = self.bundle.total_space.random_uniform()
point = self.bundle.submersion(mat)
vec = self.bundle.total_space.random_uniform()
tangent_vec = self.bundle.tangent_submersion(vec, point)
result = self.base.is_tangent(tangent_vec, point)
self.assertTrue(result)
def test_horizontal_projection(self):
mat = self.bundle.total_space.random_uniform()
vec = self.bundle.total_space.random_uniform()
horizontal_vec = self.bundle.horizontal_projection(vec, mat)
product = GeneralLinear.mul(horizontal_vec, GeneralLinear.inverse(mat))
is_horizontal = GeneralLinear.is_symmetric(product)
self.assertTrue(is_horizontal)
def test_vertical_projection(self):
mat = self.bundle.total_space.random_uniform()
vec = self.bundle.total_space.random_uniform()
vertical_vec = self.bundle.vertical_projection(vec, mat)
result = self.bundle.tangent_submersion(vertical_vec, mat)
expected = gs.zeros_like(result)
self.assertAllClose(result, expected, atol=1e-5)
def test_horizontal_lift_and_tangent_submersion(self):
mat = self.bundle.total_space.random_uniform()
tangent_vec = GeneralLinear.to_symmetric(
self.bundle.total_space.random_uniform())
horizontal = self.bundle.horizontal_lift(tangent_vec, mat)
result = self.bundle.tangent_submersion(horizontal, mat)
self.assertAllClose(result, tangent_vec)
def test_is_horizontal(self):
mat = self.bundle.total_space.random_uniform()
tangent_vec = GeneralLinear.to_symmetric(
self.bundle.total_space.random_uniform())
horizontal = self.bundle.horizontal_lift(tangent_vec, mat)
result = self.bundle.is_horizontal(horizontal, mat)
self.assertTrue(result)
def test_is_vertical(self):
mat = self.bundle.total_space.random_uniform()
tangent_vec = self.bundle.total_space.random_uniform()
vertical = self.bundle.vertical_projection(tangent_vec, mat)
result = self.bundle.is_vertical(vertical, mat)
self.assertTrue(result)
def test_align(self):
point = self.bundle.total_space.random_uniform(2)
aligned = self.bundle.align(
point[0], point[1], tol=1e-10)
result = self.bundle.is_horizontal(
point[1] - aligned, point[1], atol=1e-5)
self.assertTrue(result)
def test_inner_product(self):
mat = self.bundle.total_space.random_uniform()
point = self.bundle.submersion(mat)
tangent_vecs = GeneralLinear.to_symmetric(
self.bundle.total_space.random_uniform(2)) / 10
result = self.quotient_metric.inner_product(
tangent_vecs[0], tangent_vecs[1], point=mat)
expected = self.base_metric.inner_product(
tangent_vecs[0], tangent_vecs[1], point)
self.assertAllClose(result, expected)
def test_exp(self):
mat = self.bundle.total_space.random_uniform()
point = self.bundle.submersion(mat)
tangent_vec = GeneralLinear.to_symmetric(
self.bundle.total_space.random_uniform()) / 5
result = self.quotient_metric.exp(tangent_vec, point)
expected = self.base_metric.exp(tangent_vec, point)
self.assertAllClose(result, expected)
def test_log(self):
mats = self.bundle.total_space.random_uniform(2)
points = self.bundle.submersion(mats)
result = self.quotient_metric.log(points[1], points[0], tol=1e-10)
expected = self.base_metric.log(points[1], points[0])
self.assertAllClose(result, expected, atol=3e-4)
def test_squared_dist(self):
mats = self.bundle.total_space.random_uniform(2)
points = self.bundle.submersion(mats)
result = self.quotient_metric.squared_dist(
points[1], points[0], tol=1e-10)
expected = self.base_metric.squared_dist(points[1], points[0])
self.assertAllClose(result, expected, atol=1e-5)
| [
"nicolas.guigui@inria.fr"
] | nicolas.guigui@inria.fr |
fbcce6a43ad58373cd35ab45d604f4c91582da33 | e7b7505c084e2c2608cbda472bc193d4a0153248 | /LeetcodeNew/python/LC_935.py | 58a9a16ae88d9c6f9538e65cc80e22da6dfcaf47 | [] | no_license | Taoge123/OptimizedLeetcode | 8e5c1cd07904dfce1248bc3e3f960d2f48057a5d | 3e50f6a936b98ad75c47d7c1719e69163c648235 | refs/heads/master | 2023-02-27T21:13:40.450089 | 2023-02-07T04:11:09 | 2023-02-07T04:11:09 | 170,044,224 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,630 | py |
"""
https://www.youtube.com/watch?v=HTnIFivp0aw
这是一道简单但是比较有趣的题目。DP的方法还是比较容易想到的。令dp[k]表示当前拨号数字为k的方案数,显然它取决于在按k之前的那个数字的拨号方案数之和。
举个例子,第i次拨号时的dp[4]就等于第i-1次拨号时的dp[0]+dp[3]+dp[9],这是因为在盘面上骑士只能从0,3,9这三个位置跳跃到4.
"""
class SolutionTD:
def knightDialer(self, n):
table = {1: [6, 8], 2: [7, 9], 3: [4, 8], 4: [0, 3, 9], 5: [], 6: [0, 1, 7], 7: [2, 6], 8: [1, 3], 9: [2, 4],
0: [4, 6]}
self.mod = 10 ** 9 + 7
res = 0
memo = {}
for i in range(10):
res += self.dfs(n - 1, i, table, memo)
res %= self.mod
return res
def dfs(self, n, node, table, memo):
if (n, node) in memo:
return memo[(n, node)]
if n == 0:
return 1
res = 0
for nei in table[node]:
res += self.dfs(n - 1, nei, table, memo)
res %= self.mod
memo[(n, node)] = res
return res
class Solution:
def knightDialer(self, N):
table = {1: [6, 8], 2: [7, 9], 3: [4, 8], 4: [0, 3, 9], 5: [], 6: [0, 1, 7], 7: [2, 6], 8: [1, 3], 9: [2, 4],
0: [4, 6]}
mod = 10 ** 9 + 7
dp = [1] * 10
for _ in range(N - 1):
newDP = [0] * 10
for i in range(10):
for j in table[i]:
newDP[j] += dp[i]
dp = newDP
return sum(dp) % (mod)
| [
"taocheng984@gmail.com"
] | taocheng984@gmail.com |
9896f2f98fe117cec6e33099ec5018b9842f1ac9 | f38b9a6cd3f25160073d41ed6acf13b653a2ff74 | /rocs_teleop/scripts/rocs_teleop_keyboard.py | d8125b997ee2714f6ecd92dcf958a234d233927b | [] | no_license | uobirlab/dora-control | e9cab88fdbc7c9da66cea7a40f916e4420d61821 | 8f15c9a6033e2ce3506874c15ecac577e54d05aa | refs/heads/master | 2021-01-13T02:32:41.475079 | 2013-10-25T11:47:05 | 2013-10-25T11:47:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,043 | py | #!/usr/bin/env python
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import roslib; roslib.load_manifest('rocs_teleop')
import rospy
from geometry_msgs.msg import Twist
import sys, select, termios, tty
msg = """
Control Your TurtleBot!
---------------------------
Moving around:
u i o
j k l
m , .
q/z : increase/decrease max speeds by 10%
w/x : increase/decrease only linear speed by 10%
e/c : increase/decrease only angular speed by 10%
anything else : stop
CTRL-C to quit
"""
moveBindings = {
'i':(1,0),
'o':(1,-1),
'j':(0,1),
'l':(0,-1),
'u':(1,1),
',':(-1,0),
'.':(-1,1),
'm':(-1,-1),
}
speedBindings={
'q':(1.1,1.1),
'z':(.9,.9),
'w':(1.1,1),
'x':(.9,1),
'e':(1,1.1),
'c':(1,.9),
}
def getKey():
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
key = sys.stdin.read(1)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
speed = .3
turn = 1
def vels(speed,turn):
return "currently:\tspeed %s\tturn %s " % (speed,turn)
if __name__=="__main__":
settings = termios.tcgetattr(sys.stdin)
pub = rospy.Publisher('cmd_vel', Twist)
rospy.init_node('turtlebot_teleop')
x = 0
th = 0
status = 0
try:
print msg
print vels(speed,turn)
while(1):
key = getKey()
if key in moveBindings.keys():
x = moveBindings[key][0]
th = moveBindings[key][1]
elif key in speedBindings.keys():
speed = speed * speedBindings[key][0]
turn = turn * speedBindings[key][1]
print vels(speed,turn)
if (status == 14):
print msg
status = (status + 1) % 15
else:
x = 0
th = 0
if (key == '\x03'):
break
twist = Twist()
twist.linear.x = x*speed; twist.linear.y = 0; twist.linear.z = 0
twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = th*turn
pub.publish(twist)
except:
print e
finally:
twist = Twist()
twist.linear.x = 0; twist.linear.y = 0; twist.linear.z = 0
twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = 0
pub.publish(twist)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
| [
"a.pronobis@gmail.com"
] | a.pronobis@gmail.com |
95104df4640b4babf14d129503b2955198323497 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/11104121.py | e842e5077a8ce26042b14a549459d60c120ea087 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/11104121.py generated: Wed, 25 Jan 2017 15:25:16
#
# Event Type: 11104121
#
# ASCII decay Descriptor: [B0 -> pi+ pi- (KS0 -> pi+ pi-)]cc
#
from Configurables import Generation
Generation().EventType = 11104121
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bd_KSpi+pi-=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 511,-511 ]
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
1ade39bff263007813db93d12d91966da695744a | e3eee8900296e91601a2f6fea027c7956433e072 | /chap10/dirList.py | c2a90cfdb43d8848cbea15e53bebe83fc3d469b8 | [] | no_license | chc1129/introducing-python3 | 70ff14bbf24f7030a8cc20dba7db753e64b46865 | 43a6de586862380ac221669f11f1fbbac9105bb5 | refs/heads/master | 2020-04-12T21:53:32.031918 | 2019-09-23T12:55:59 | 2019-09-23T12:55:59 | 162,775,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | import os
print(os.listdir('.'))
print(os.listdir('..'))
| [
"chc1129@gmail.com"
] | chc1129@gmail.com |
dd75a4912a4cbb4431c9f3a9493f1d8671208472 | 412ddb81b217ced05f77381a625a6ee26a3b2ea7 | /lib/Stats.py | 11ef31512bc7bf0d2ac2616db64d5d2797753a9e | [] | no_license | AndreasHeger/adda | d26fcb7ba3f32ced351d34b8dac7f802e63219c5 | ddae18476747ef51cc8a2d924b723d5ae81a2da7 | refs/heads/master | 2016-09-10T22:58:51.797341 | 2014-06-26T19:44:39 | 2014-06-26T19:44:39 | 16,487,084 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,894 | py | ################################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#################################################################################
'''
Stats.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Code
----
'''
import types
import math
import numpy
import scipy
import scipy.stats
import scipy.interpolate
import collections
from rpy2.robjects import r as R
import rpy2.robjects as ro
import rpy2.robjects.numpy2ri
def getSignificance( pvalue, thresholds=[0.05, 0.01, 0.001] ):
"""return cartoon of significance of a p-Value."""
n = 0
for x in thresholds:
if pvalue > x: return "*" * n
n += 1
return "*" * n
class Result(object):
'''allow both member and dictionary access.'''
slots=("_data")
def __init__(self):
object.__setattr__(self, "_data", dict())
def fromR( self, take, r_result ):
'''convert from an *r_result* dictionary using map *take*.
*take* is a list of tuples mapping a field to the corresponding
field in *r_result*.
'''
for x,y in take:
if y:
self._data[x] = r_result.rx(y)[0][0]
else:
self._data[x] = r_result.rx(x)[0][0]
# if y:
# self._data[x] = r_result[y]
# else:
# self._data[x] = r_result[x]
return self
def __getattr__(self, key):
if not key.startswith("_"):
try: return object.__getattribute__(self,"_data")[key]
except KeyError: pass
return getattr( self._data, key )
def keys(self): return self._data.keys()
def values(self): return self._data.values()
def __len__(self): return self._data.__len__()
def __str__(self):
return str(self._data)
def __contains__(self,key):
return key in self._data
def __getitem__(self, key ):
return self._data[key]
def __delitem__(self, key ):
del self._data[key]
def __setitem__(self, key, value ):
self._data[key] = value
def __setattr__(self, key, value):
if not key.startswith("_"):
self._data[key] = value
else:
object.__setattr__(self,key,value)
#################################################################
#################################################################
#################################################################
## Perform log likelihood test
class LogLikelihoodTest:
def __init__(self):
pass
def doLogLikelihoodTest( complex_ll, complex_np,
simple_ll, simple_np,
significance_threshold = 0.05):
"""perform log-likelihood test between model1 and model2.
"""
assert complex_ll >= simple_ll, "log likelihood of complex model smaller than for simple model: %f > %f" % (complex_ll, simple_ll)
chi = 2 * (complex_ll - simple_ll)
df = complex_np - simple_np
if df <= 0:
raise ValueError, "difference of degrees of freedom not larger than 0"
p = scipy.stats.chisqprob( chi, df )
l = LogLikelihoodTest()
l.mComplexLogLikelihood = complex_ll
l.mSimpleLogLikelihood = simple_ll
l.mComplexNumParameters = complex_np
l.mSimpleNumParameters = simple_np
l.mSignificanceThreshold = significance_threshold
l.mProbability = p
l.mChiSquaredValue = chi
l.mDegreesFreedom = df
if p < significance_threshold:
l.mPassed = True
else:
l.mPassed = False
return l
#################################################################
#################################################################
#################################################################
class BinomialTest:
def __init__(self):
pass
def doBinomialTest( p, sample_size, observed, significance_threshold = 0.05):
"""perform a binomial test.
Given are p: the probability of the NULL hypothesis, the sample_size
and the number of observed counts.
"""
pass
#################################################################
#################################################################
#################################################################
class ChiSquaredTest:
def __init__(self):
pass
def doChiSquaredTest( matrix, significance_threshold = 0.05 ):
'''perform chi-squared test on a matrix.
The observed/expected values are in rows, the categories are in columns, for
example:
+---------+--------------+--------+----------+
|set |protein_coding|intronic|intergenic|
+---------+--------------+--------+----------+
|observed |92 |90 |194 |
+---------+--------------+--------+----------+
|expected |91 |10 |15 |
+---------+--------------+--------+----------+
If there are only two categories (one degrees of freedom) the Yates correction is applied.
For each entry (observed-expected), the value 0.5 is subtracted ignoring the sign of the difference.
The test throws an exception if
1. one or more expected categories are less than 1 (it does not matter what the observed values are)
2. more than one-fifth of expected categories are less than 5
'''
nrows, ncols = matrix.shape
if nrows != 2:
raise NotImplementedError( "chi-square currently only implemented for 2xn tables." )
n = 0
for x in range(ncols):
if matrix[1][x] < 1:
raise ValueError( "matrix contains expected counts < 1" )
if matrix[1][x] < 5: n +=1
if 100.0 * n / ncols > 20.0:
raise ValueError( "more than 20% of expected categories are less than 5" )
row_sums = [ sum(matrix[x,:]) for x in range( nrows ) ]
col_sums = [ sum(matrix[:,x]) for x in range( ncols ) ]
sample_size = float(sum(row_sums))
chi = 0.0
df = (nrows - 1) * (ncols -1 )
## Yates correction applies for a 2x2 table only (df==1)
if df == 1:
correction = 0.5 * 0.5
else:
correction = 0
for x in range(nrows):
for y in range(ncols):
expected = row_sums[x] * col_sums[y] / sample_size
# compute difference and apply Yates correction
d = abs(matrix[x,y] - expected) - correction
chi += (d * d ) / expected
result = ChiSquaredTest()
result.mProbability = scipy.stats.chisqprob( chi, df )
result.mDegreesFreedom = df
result.mChiSquaredValue = chi
result.mPassed = result.mProbability < significance_threshold
result.mSignificance = getSignificance( result.mProbability )
result.mSampleSize = sample_size
result.mPhi = math.sqrt( result.mChiSquaredValue / result.mSampleSize )
return result
def doPearsonChiSquaredTest( p, sample_size, observed, significance_threshold = 0.05):
"""perform a pearson chi squared test.
Given are p: the probability of the NULL hypothesis, the sample_size
and the number of observed counts.
For large sample sizes, this test is a continuous approximation to
the binomial test.
"""
e = float(p) * sample_size
d = float(observed) - e
chi = d * d / e
df = 1
result = ChiSquaredTest()
result.mProbability = scipy.stats.chisqprob( chi, df )
result.mDegreesFreedom = df
result.mChiSquaredValue = chi
result.mPassed = result.mProbability < significance_threshold
result.mSignificance = getSignificance( result.mProbability )
result.mSampleSize = sample_size
result.mPhi = math.sqrt( result.mChiSquaredValue / result.mSampleSize )
result.mObserved = observed
result.mExpected = e
return result
#################################################################
#################################################################
#################################################################
## Convenience functions and objects for statistical analysis
class DistributionalParameters:
"""a collection of distributional parameters. Available properties
are:
mMean, mMedian, mMin, mMax, mSampleStd, mSum, mCounts
This method is deprecated - use :class:`Summary` instead.
"""
def __init__(self, values = None, format = "%6.4f", mode="float"):
self.mMean, self.mMedian, self.mMin, self.mMax, self.mSampleStd, self.mSum, self.mCounts, self.mQ1, self.mQ3 = \
(0, 0, 0, 0, 0, 0, 0, 0, 0)
if values != None and len(values) > 0: self.updateProperties( values )
self.mFormat = format
self.mMode = mode
self.mNErrors = 0
def updateProperties( self, values):
"""update properties.
If values is an vector of strings, each entry will be converted
to float. Entries that can not be converted are ignored.
"""
values = [x for x in values if x != None ]
if len(values) == 0:
raise ValueError( "no data for statistics" )
## convert
self.mNErrors = 0
if type(values[0]) not in (types.IntType, types.FloatType):
n = []
for x in values:
try:
n.append( float(x) )
except ValueError:
self.mNErrors += 1
else:
n = values
if len(n) == 0:
raise ValueError( "no data for statistics" )
## use a non-sort algorithm later.
n.sort()
self.mQ1 = n[len(n) / 4]
self.mQ3 = n[len(n) * 3 / 4]
self.mCounts = len(n)
self.mMin = min(n)
self.mMax = max(n)
self.mMean = scipy.mean( n )
self.mMedian = scipy.median( n )
self.mSampleStd = scipy.std( n )
self.mSum = reduce( lambda x, y: x+y, n )
def getZScore( self, value ):
"""return zscore for value."""
if self.mSampleStd > 0:
return (value - self.mMean) / self.mSampleStd
else:
return 0
def setFormat( self, format ):
"""set number format."""
self.mFormat = format
def getHeaders( self ):
"""returns header of column separated values."""
return ("nval", "min", "max", "mean", "median", "stddev", "sum", "q1", "q3")
def getHeader( self ):
"""returns header of column separated values."""
return "\t".join( self.getHeaders())
def items(self):
return [ (x, self.__getitem__(x)) for x in self.getHeaders() ]
def __getitem__( self, key ):
if key == "nval": return self.mCounts
if key == "min": return self.mMin
if key == "max": return self.mMax
if key == "mean": return self.mMean
if key == "median": return self.mMedian
if key == "stddev": return self.mSampleStd
if key == "sum": return self.mSum
if key == "q1": return self.mQ1
if key == "q3": return self.mQ3
raise KeyError, key
def __str__( self ):
"""return string representation of data."""
if self.mMode == "int":
format_vals = "%i"
format_median = "%.1f"
else:
format_vals = self.mFormat
format_median = self.mFormat
return "\t".join( ( "%i" % self.mCounts,
format_vals % self.mMin,
format_vals % self.mMax,
self.mFormat % self.mMean,
format_median % self.mMedian,
self.mFormat % self.mSampleStd,
format_vals % self.mSum,
format_vals % self.mQ1,
format_vals % self.mQ3,
) )
class Summary( Result ):
"""a collection of distributional parameters. Available properties
are:
mean, median, min, max, samplestd, sum, counts
"""
fields = ("nval", "min", "max", "mean", "median", "stddev", "sum", "q1", "q3")
def __init__(self, values = None,
format = "%6.4f", mode="float",
allow_empty = True ):
Result.__init__(self)
self._format = format
self._mode = mode
# note that this determintes the order of the fields at output
self.counts, self.min, self.max, self.mean, self.median, self.samplestd, self.sum, self.q1, self.q3 = \
(0, 0, 0, 0, 0, 0, 0, 0, 0)
if values != None:
values = [x for x in values if x != None ]
if len(values) == 0:
if allow_empty: return
else: raise ValueError( "no data for statistics" )
# convert
self._nerrors = 0
if type(values[0]) not in (types.IntType, types.FloatType):
n = []
for x in values:
try:
n.append( float(x) )
except ValueError:
self._nerrors += 1
else:
n = values
## use a non-sort algorithm?
n.sort()
if len(n):
self.q1 = n[len(n) / 4]
self.q3 = n[len(n) * 3 / 4]
else:
self.q1 = self.q3 = 0
self.counts = len(n)
self.min = min(n)
self.max = max(n)
self.mean = scipy.mean( n )
self.median = scipy.median( n )
self.samplestd = scipy.std( n )
self.sum = reduce( lambda x, y: x+y, n )
def getHeaders( self ):
"""returns header of column separated values."""
return self.fields
def getHeader( self ):
"""returns header of column separated values."""
return "\t".join( self.getHeaders())
def __str__( self ):
"""return string representation of data."""
if self._mode == "int":
format_vals = "%i"
format_median = "%.1f"
else:
format_vals = self._format
format_median = self._format
return "\t".join( ( "%i" % self.counts,
format_vals % self.min,
format_vals % self.max,
self._format % self.mean,
format_median % self.median,
self._format % self.samplestd,
format_vals % self.sum,
format_vals % self.q1,
format_vals % self.q3,
) )
def adjustPValues( pvalues, method ):
'''adjust P-Values for multiple testing using
the p.adjust() method in R.
Possible values of method are:
c("holm", "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr", "none")
'''
return R.p_adjust( pvalues, method )
def smoothPValues( pvalues,
vlambda=numpy.arange(0,0.95,0.05),
smooth_df = 3,
smooth_log_pi0 = False):
if min(pvalues) < 0 or max(pvalues) > 1:
raise ValueError( "p-values out of range" )
if len(vlambda) > 1 and len(vlambda) < 4:
raise ValueError(" If length of vlambda greater than 1, you need at least 4 values." )
if len(vlambda) > 1 and (min(vlambda) < 0 or max(vlambda) >= 1):
raise ValueError( "vlambda must be within [0, 1).")
m = len(pvalues)
pi0 = numpy.zeros( len(vlambda), numpy.float )
for i in range( len(vlambda) ):
pi0[i] = numpy.mean( [x >= vlambda[i] for x in pvalues ]) / (1.0 -vlambda[i] )
R.assign( "pi0", pi0)
R.assign( "vlambda", vlambda)
print "pi0=", pi0
if smooth_log_pi0:
pi0 = math.log(pi0)
R.assign( "smooth_df", smooth_df)
spi0 = R("""spi0 <- smooth.spline(vlambda,pi0, df = smooth_df)""")
pi0 = R("""pi0 <- predict( spi0, x = max(vlambda) )$y""")
print spi0
if smooth_log_pi0:
pi0 = math.exp(pi0)
return pi0
def getPi0( pvalues,
vlambda=numpy.arange(0,0.95,0.05),
pi0_method="smoother",
smooth_df = 3,
smooth_log_pi0 = False):
'''used within nubiscan.'''
if min(pvalues) < 0 or max(pvalues) > 1:
raise ValueError( "p-values out of range" )
if len(vlambda) > 1 and len(vlambda) < 4:
raise ValueError(" If length of vlambda greater than 1, you need at least 4 values." )
if len(vlambda) > 1 and (min(vlambda) < 0 or max(vlambda) >= 1):
raise ValueError( "vlambda must be within [0, 1).")
m = len(pvalues)
# these next few functions are the various ways to estimate pi0
if len(vlambda)==1:
vlambda = vlambda[0]
if vlambda < 0 or vlambda >=1 :
raise ValueError( "vlambda must be within [0, 1).")
pi0 = numpy.mean( [ x >= vlambda for x in pvalues ] ) / (1.0 - vlambda)
pi0 = min(pi0, 1.0)
R.assign( "pi0", pi0)
else:
pi0 = numpy.zeros( len(vlambda), numpy.float )
for i in range( len(vlambda) ):
pi0[i] = numpy.mean( [x >= vlambda[i] for x in pvalues ]) / (1.0 -vlambda[i] )
R.assign( "pi0", pi0)
R.assign( "vlambda", vlambda)
if pi0_method=="smoother":
if smooth_log_pi0:
pi0 = math.log(pi0)
R.assign( "smooth_df", smooth_df)
spi0 = R("""spi0 <- smooth.spline(vlambda,pi0, df = smooth_df)""")
pi0 = R("""pi0 <- predict( spi0, x = max(vlambda) )$y""")
if smooth_log_pi0:
pi0 = math.exp(pi0)
elif pi0_method=="bootstrap":
minpi0 = min(pi0)
mse = numpy.zeros( len(vlambda), numpy.float )
pi0_boot = numpy.zeros( len(vlambda), numpy.float )
R.assign( "pvalues", pvalues)
pi0 = R("""
m <- length(pvalues)
minpi0 <- min(pi0)
mse <- rep(0,length(vlambda))
pi0_boot <- rep(0,length(vlambda))
for(i in 1:100)
{
pvalues_boot <- sample(pvalues,size=m,replace=TRUE)
for(i in 1:length(vlambda))
{
pi0_boot[i] <- mean(pvalues_boot>vlambda[i])/(1-vlambda[i])
}
mse <- mse + (pi0_boot-minpi0)^2
}
pi0 <- min(pi0[mse==min(mse)])""")
else:
raise ValueError( "'pi0_method' must be one of 'smoother' or 'bootstrap'.")
pi0 = min(pi0,1.0)
if pi0 <= 0:
raise ValueError( "The estimated pi0 <= 0. Check that you have valid p-values or use another vlambda method." )
return pi0
class FDRResult:
def __init__(self):
pass
def plot(self, hardcopy = None):
if hardcopy:
R.png(hardcopy, width=1024, height=768, type="cairo")
R.require('qvalue')
# build a qobj
R.assign( "pval", self.mPValues )
R.assign( "pi0", self.mPi0 )
R.assign( "qval", self.mQValues )
R.assign( "lambda", self.mLambda )
R("""qobj <-list( pi0=pi0, qvalues=qval, pvalues=pval, lambda=lambda)""")
R(""" class(qobj) <- "qvalue" """)
R("""qplot(qobj)""")
if hardcopy:
R.dev_off()
def doFDR(pvalues,
vlambda=None,
pi0_method="smoother",
fdr_level=None,
robust=False,
smooth_df = 3,
smooth_log_pi0 = False,
plot = False ):
"""modeled after code taken from http://genomics.princeton.edu/storeylab/qvalue/linux.html.
I did not like the error handling so I translated most to python.
Compute FDR after method by Storey et al. (2002).
"""
# set to default of qvalue method
if vlambda == None: vlambda = numpy.arange(0,0.95,0.05)
if min(pvalues) < 0 or max(pvalues) > 1:
raise ValueError( "p-values out of range" )
if type(vlambda) == float:
vlambda = (vlambda, )
if len(vlambda) > 1 and len(vlambda) < 4:
raise ValueError(" If length of vlambda greater than 1, you need at least 4 values." )
if len(vlambda) > 1 and (min(vlambda) < 0 or max(vlambda) >= 1):
raise ValueError( "vlambda must be within [0, 1).")
m = len(pvalues)
# these next few functions are the various ways to estimate pi0
if len(vlambda)==1:
vlambda = vlambda[0]
if vlambda < 0 or vlambda >=1 :
raise ValueError( "vlambda must be within [0, 1).")
pi0 = numpy.mean( [ x >= vlambda for x in pvalues ] ) / (1.0 - vlambda)
pi0 = min(pi0, 1.0)
R.assign( "pi0", pi0)
else:
pi0 = numpy.zeros( len(vlambda), numpy.float )
for i in range( len(vlambda) ):
pi0[i] = numpy.mean( [x >= vlambda[i] for x in pvalues ]) / (1.0 -vlambda[i] )
R.assign( "pi0", pi0)
R.assign( "vlambda", vlambda)
if pi0_method=="smoother":
if smooth_log_pi0:
pi0 = math.log(pi0)
R.assign( "smooth_df", smooth_df)
spi0 = R("""spi0 <- smooth.spline(vlambda,pi0, df = smooth_df)""")
if plot:
import matplotlib.pyplot as plt
plt.figure()
plt.plot( vlambda, pi0 )
x2 = numpy.arange( 0, 1, 0.001 )
R.assign( "x2", x2)
y2 = R("""y2 <- predict( spi0, x = x2 )$y""")
plt.plot( x2, y2 )
plt.show()
pi0 = R("""pi0 <- predict( spi0, x = max(vlambda) )$y""")[0]
if smooth_log_pi0:
pi0 = math.exp(pi0)
elif pi0_method=="bootstrap":
minpi0 = min(pi0)
mse = numpy.zeros( len(vlambda), numpy.float )
pi0_boot = numpy.zeros( len(vlambda), numpy.float )
R.assign( "pvalues", pvalues)
pi0 = R("""
m <- length(pvalues)
minpi0 <- min(pi0)
mse <- rep(0,length(vlambda))
pi0_boot <- rep(0,length(vlambda))
for(i in 1:100)
{
pvalues_boot <- sample(pvalues,size=m,replace=TRUE)
for(i in 1:length(vlambda))
{
pi0_boot[i] <- mean(pvalues_boot>vlambda[i])/(1-vlambda[i])
}
mse <- mse + (pi0_boot-minpi0)^2
}
pi0 <- min(pi0[mse==min(mse)])""")[0]
else:
raise ValueError( "'pi0_method' must be one of 'smoother' or 'bootstrap'.")
pi0 = min(pi0,1.0)
R.assign( "pi0", pi0 )
if pi0 <= 0:
raise ValueError( "The estimated pi0 (%f) <= 0. Check that you have valid p-values or use another vlambda method." % pi0)
if fdr_level != None and (fdr_level <= 0 or fdr_level > 1):
raise ValueError( "'fdr_level' must be within (0, 1].")
# The estimated q-values calculated here
#u = numpy.argsort( p )
# change by Alan
# ranking function which returns number of observations less than or equal
ro.globalenv['pvalues'] = ro.FloatVector( pvalues )
R.assign( "robust", robust )
qvalues = R("""u <- order(pvalues)
qvalues.rank <- function(x)
{
idx <- sort.list(x)
fc <- factor(x)
nl <- length(levels(fc))
bin <- as.integer(fc)
tbl <- tabulate(bin)
cs <- cumsum(tbl)
tbl <- rep(cs, tbl)
tbl[idx] <- tbl
return(tbl)
}
v <- qvalues.rank(pvalues)
m <- length(pvalues)
qvalues <- pi0 * m * pvalues / v
if(robust)
{
qvalues <- pi0*m*pvalues/(v*(1-(1-pvalues)^m))
}
qvalues[u[m]] <- min(qvalues[u[m]],1)
rqvalues <- qvalues
for(i in (m-1):1)
{
qvalues[u[i]] <- min(qvalues[u[i]],qvalues[u[i+1]],1)
}
qvalues
""")
result = FDRResult()
result.mQValues = qvalues
if fdr_level != None:
result.mPassed = [ x <= fdr_level for x in result.mQValues ]
else:
result.mPassed = [ False for x in result.mQValues ]
result.mPValues = pvalues
result.mPi0 = pi0
result.mLambda = vlambda
return result
def doFDRPython(pvalues,
vlambda=None,
pi0_method="smoother",
fdr_level=None,
robust=False,
smooth_df = 3,
smooth_log_pi0 = False,
pi0 = None,
plot = False ):
"""modeled after code taken from http://genomics.princeton.edu/storeylab/qvalue/linux.html.
I did not like the error handling so I translated most to python.
Compute FDR after method by Storey et al. (2002).
"""
if min(pvalues) < 0 or max(pvalues) > 1:
raise ValueError( "p-values out of range" )
# set to default of qvalue method
if vlambda == None: vlambda = numpy.arange(0,0.95,0.05)
m = len(pvalues)
pvalues = numpy.array( pvalues, dtype = numpy.float )
if pi0 == None:
if type(vlambda) == float:
vlambda = (vlambda,)
if len(vlambda) > 1 and len(vlambda) < 4:
raise ValueError(" if length of vlambda greater than 1, you need at least 4 values." )
if len(vlambda) > 1 and (min(vlambda) < 0 or max(vlambda) >= 1):
raise ValueError( "vlambda must be within [0, 1).")
# estimate pi0
if len(vlambda)==1:
vlambda = vlambda[0]
if vlambda < 0 or vlambda >=1 :
raise ValueError( "vlambda must be within [0, 1).")
pi0 = numpy.mean( [ x >= vlambda for x in pvalues ] ) / (1.0 - vlambda)
pi0 = min(pi0, 1.0)
else:
pi0 = numpy.zeros( len(vlambda), numpy.float )
for i in range( len(vlambda) ):
pi0[i] = numpy.mean( [x >= vlambda[i] for x in pvalues ]) / (1.0 - vlambda[i] )
if pi0_method=="smoother":
if smooth_log_pi0: pi0 = math.log(pi0)
tck = scipy.interpolate.splrep( vlambda,
pi0,
k = smooth_df,
s = 10000 )
if plot:
import matplotlib.pyplot as plt
plt.figure()
plt.plot( vlambda, pi0 )
x2 = numpy.arange( 0, 1, 0.001 )
y2 = scipy.interpolate.splev( x2, tck )
plt.plot( x2, y2 )
plt.show()
pi0 = scipy.interpolate.splev( max(vlambda), tck )
if smooth_log_pi0: pi0 = math.exp(pi0)
elif pi0_method=="bootstrap":
minpi0 = min(pi0)
mse = numpy.zeros( len(vlambda), numpy.float )
pi0_boot = numpy.zeros( len(vlambda), numpy.float )
for i in xrange(100):
# sample pvalues
idx_boot = numpy.random.random_integers( 0, m-1, m)
pvalues_boot = pvalues[idx_boot]
for x in xrange( len(vlambda )):
# compute number of pvalues larger than lambda[x]
pi0_boot[x] = numpy.mean( pvalues_boot > vlambda[x]) / (1.0 - vlambda[x])
mse += (pi0_boot - minpi0) ** 2
pi0 = min( pi0[mse==min(mse)] )
else:
raise ValueError( "'pi0_method' must be one of 'smoother' or 'bootstrap'.")
pi0 = min(pi0,1.0)
if pi0 <= 0:
raise ValueError( "The estimated pi0 <= 0. Check that you have valid p-values or use another vlambda method." )
if fdr_level != None and (fdr_level <= 0 or fdr_level > 1):
raise ValueError( "'fdr_level' must be within (0, 1].")
# compute qvalues
idx = numpy.argsort( pvalues )
# monotonically decreasing bins, so that bins[i-1] > x >= bins[i]
bins = numpy.unique( pvalues )[::-1]
# v[i] = number of observations less than or equal to pvalue[i]
# could this be done more elegantly?
val2bin = len(bins) - numpy.digitize( pvalues, bins )
v = numpy.zeros( m, dtype = numpy.int )
lastbin = None
for x in xrange( m-1, -1, -1 ):
bin = val2bin[idx[x]]
if bin != lastbin: c = x
v[idx[x]] = c+1
lastbin = bin
qvalues = pvalues * pi0 * m / v
if robust:
qvalues /= ( 1.0 - ( 1.0 - pvalues)**m )
# bound qvalues by 1 and make them monotonic
qvalues[idx[m-1]] = min(qvalues[idx[m-1]],1.0)
for i in xrange(m-2,-1,-1):
qvalues[idx[i]] = min(min(qvalues[idx[i]],qvalues[idx[i+1]]),1.0)
result = FDRResult()
result.mQValues = qvalues
if fdr_level != None:
result.mPassed = [ x <= fdr_level for x in result.mQValues ]
else:
result.mPassed = [ False for x in result.mQValues ]
result.mPValues = pvalues
result.mPi0 = pi0
result.mLambda = vlambda
result.xvalues = qvalues
return result
#################################################################
#################################################################
#################################################################
class CorrelationTest:
'''coefficient is r, not r squared'''
def __init__(self,
r_result = None,
s_result = None,
method = None):
self.mPValue = None
self.mMethod = None
if r_result:
self.mCoefficient = r_result['estimate']['cor']
self.mPValue = float(r_result['p.value'])
self.mNObservations = r_result['parameter']['df']
self.mMethod = r_result['method']
self.mAlternative = r_result['alternative']
elif s_result:
self.mCoefficient = s_result[0]
self.mPValue = s_result[1]
self.mNObservations = 0
self.mAlternative = "two-sided"
else:
self.mCoefficient = 0
self.mPValue = 1
self.mSignificance = "na"
self.mNObservations = 0
self.mAlternative = "na"
self.mMethod = "na"
if method: self.mMethod = method
if self.mPValue != None:
self.mSignificance = getSignificance( self.mPValue )
def __str__(self):
return "\t".join( (
"%6.4f" % self.mCoefficient,
"%e" % self.mPValue,
self.mSignificance,
"%i" % self.mNObservations,
self.mMethod,
self.mAlternative ) )
@classmethod
def getHeaders(cls):
return ("coeff", "pvalue", "significance", "observations", "method", "alternative" )
def filterMasked( xvals, yvals, missing = ("na", "Nan", None, ""), dtype = numpy.float ):
"""convert xvals and yvals to numpy array skipping pairs with
one or more missing values."""
xmask = [ i in missing for i in xvals ]
ymask = [ i in missing for i in yvals ]
return (numpy.array( [xvals[i] for i in range(len(xvals)) if not xmask[i]], dtype = dtype ),
numpy.array( [yvals[i] for i in range(len(yvals)) if not ymask[i]], dtype = dtype) )
def doCorrelationTest( xvals, yvals ):
"""compute correlation between x and y.
Raises a value-error if there are not enough observations.
"""
if len(xvals) <= 1 or len(yvals) <= 1:
raise ValueError( "can not compute correlation with no data" )
if len(xvals) != len(yvals):
raise ValueError( "data vectors have unequal length" )
# try:
# result = CorrelationTest( r_result = R.cor_test( xvals, yvals, na_action="na_omit" ) )
# except rpy.RPyException, msg:
# raise ValueError( msg )
x, y = filterMasked( xvals, yvals )
result = CorrelationTest( s_result = scipy.stats.pearsonr( x, y ),
method = "pearson" )
result.mNObservations = len(x)
return result
def getPooledVariance( data ):
"""return pooled variance from a
list of tuples (sample_size, variance)."""
t, var = 0, 0
for n, s in data:
t += n
var += (n-1) * s
assert t > len(data), "sample size smaller than samples combined"
return var / float(t - len(data))
###################################################################
###################################################################
###################################################################
## compute ROC curves from sorted values
###################################################################
def computeROC( values ):
'''return a roc curve for *values*. Values
is a sorted list of (value, bool) pairs.
Deprecated - use getPerformance instead
returns a list of (FPR,TPR) tuples.
'''
roc = []
npositives = len( [x for x in values if x[1] ] )
if npositives == 0:
raise ValueError( "no positives among values" )
ntotal = len(values)
last_value, last_fpr = None, None
tp, fp = 0, 0
tn, fn = ntotal - npositives, npositives
for value, is_positive in values:
if is_positive:
tp += 1
fn -= 1
else:
fp += 1
tn -= 1
if last_value != value:
try:
tpr = float(tp) / (tp + fn)
except ZeroDivisionError:
tpr = 0
try:
fpr = float(fp) / (fp + tn)
except ZeroDivisionError:
fpr = 0
if last_fpr != fpr:
roc.append( (fpr,tpr) )
last_fpr = fpr
last_values = value
return roc
class TTest:
def __init__(self): pass
class WelchTTest:
def __init__(self): pass
PairedTTest = collections.namedtuple( "PairedTTest", "statistic pvalue" )
def doPairedTTest( vals1, vals2) :
'''perform paired t-test.
vals1 and vals2 need to contain the same number of elements.
'''
return PairedTTest._make( scipy.stats.ttest_rel( vals1, vals2 ) )
def doWelchsTTest(n1, mean1, std1,
n2, mean2, std2,
alpha = 0.05 ):
'''Welch''s approximate t-test for the difference of two means of
heteroscedasctic populations.
This functions does a two-tailed test.
see PMID: 12016052
:Parameters:
n1 : int
number of variates in sample 1
n2 : int
number of variates in sample 2
mean1 : float
mean of sample 1
mean2 : float
mean of sample 2
std1 : float
standard deviation of sample 1
std2 : float
standard deviation of sample 2
returns a WelchTTest
'''
if std1 == 0 and std2 == 0:
raise ValueError( 'standard deviations are 0.')
# convert standard deviation to sample variance
svar1 = std1**2 * n1 / float(n1-1)
svar2 = std2**2 * n2 / float(n2-1)
# compute df and test statistic
df = ((svar1/n1 + svar2/n2)**2) / ( ((svar1/n1)**2)/(n1-1) + ((svar2/n2)**2)/(n2-1))
denom = numpy.sqrt(svar1/n1+svar2/n2)
z = abs(mean1 - mean2) / denom
# do the test
pvalue = 2 * scipy.stats.t.sf(z,df)
result = WelchTTest()
result.mPValue = pvalue
result.mDegreesFreedom = df
result.mZ = z
result.mMean1 = mean1
result.mMean2 = mean2
result.mSampleVariance1 = svar1
result.mSampleVariance2 = svar2
result.mDifference = mean1 - mean2
result.mZLower = scipy.stats.t.ppf( alpha, df )
result.mZUpper = scipy.stats.t.ppf( 1.0-alpha, df )
result.mDifferenceLower = result.mZLower * denom
result.mDifferenceUpper = result.mZUpper * denom
return result
###################################################################
###################################################################
###################################################################
##
###################################################################
def getAreaUnderCurve( xvalues, yvalues ):
'''compute area under curve from a set of discrete x,y coordinates
using trapezoids.
This is only as accurate as the density of points.
'''
assert len(xvalues) == len(yvalues)
last_x, last_y = xvalues[0], yvalues[0]
auc = 0
for x,y in zip(xvalues, yvalues)[1:]:
dx = x - last_x
assert not dx <= 0, "x not increasing: %f >= %f" % (last_x, x)
dy = abs(last_y - y)
my = min(last_y, y)
# rectangle plus triangle
auc += dx * my + dx * dy / 2
last_x, last_y = x, y
return auc
###################################################################
###################################################################
###################################################################
##
###################################################################
def getSensitivityRecall( values ):
'''return sensitivity/selectivity.
Values is a sorted list of (value, bool) pairs.
Deprecated - use getPerformance instead
'''
npositives = 0.0
npredicted = 0.0
l = None
result = []
total = float(len(values))
for value, is_positive in values:
npredicted += 1.0
if is_positive > 0: npositives += 1.0
if value != l:
result.append( (value, npositives / npredicted, npredicted / total ) )
l = value
if l:
result.append( (l, npositives / npredicted, npredicted/total ) )
return result
###################################################################
###################################################################
###################################################################
##
###################################################################
ROCResult = collections.namedtuple( "ROCResult",
"value pred tp fp tn fn tpr fpr tnr fnr rtpr rfnr" )
def getPerformance( values,
skip_redundant = True,
false_negatives = False,
bin_by_value = True,
monotonous = False,
multiple = False,
increasing = True,
total_positives = None,
total_false_negatives = None,
):
'''compute performance estimates for a list of ``(score, flag)``
tuples in *values*.
Values is a sorted list of (value, bool) pairs.
If the option *false-negative* is set, the input is +/- or 1/0 for a
true positive or false negative, respectively.
TP: true positives
FP: false positives
TPR: true positive rate = true_positives / predicted
P: predicted
FPR: false positive rate = false positives / predicted
value: value
'''
true_positives = 0
predicted = 0
last_value = None
binned_values = []
for value, flag in values:
if not bin_by_value:
if last_value != value:
binned_values.append( (true_positives, predicted, value) )
else:
if last_value != None and last_value != value:
binned_values.append( (true_positives, predicted, last_value) )
predicted += 1
if flag: true_positives += 1
last_value = value
binned_values.append( (true_positives, predicted, last_value) )
binned_values.append( (true_positives, predicted, value) )
if true_positives == 0:
raise ValueError("# no true positives!")
if total_positives == None:
if total_false_negatives:
positives = float(predicted)
else:
positives = float(true_positives)
else:
positives = float(total_positives)
last_positives = None
last_tpr = None
result = []
for true_positives, predicted, value in binned_values:
if (predicted == 0):
predicted = 1
if total_false_negatives:
false_negatives = predicted - true_positives
false_positives = 0
true_negatives = 0
else:
true_negatives = 0
false_negatives = positives - true_positives
false_positives = predicted - true_positives
tpr = float(true_positives) / predicted
fpr = float(false_positives) / (true_positives + false_negatives )
fnr = float(false_negatives) / positives
tnr = 0
# relative rates
rfpr = float(false_positives) / predicted
rfnr = float(false_negatives) / predicted
if monotonous and last_tpr and last_tpr < tpr:
continue
if skip_redundant and true_positives == last_positives:
continue
if (predicted > 0):
result.append( ROCResult._make(
(value,
predicted,
true_positives,
false_positives,
true_negatives,
false_negatives,
tpr, fpr, tnr, fnr,
rfpr, rfnr ) ) )
last_positives = true_positives
last_tpr = tpr
return result
###################################################################
###################################################################
###################################################################
##
###################################################################
def doMannWhitneyUTest( xvals, yvals ):
'''apply the Mann-Whitney U test to test for the difference of medians.'''
r_result = R.wilcox_test( xvals, yvals, paired = False )
result = Result().fromR(
( ("pvalue", 'p.value'),
('alternative', None),
('method', None ) ),
r_result )
return result
| [
"andreas.heger@gmail.com"
] | andreas.heger@gmail.com |
f058d7fdba152cf6a99585bac8ef1dbb8de8d21c | 70e2f5dadd8bd57ba0952a0fd86b86b54b44e438 | /preprocessing/preprocess.py | b037838beeec7ef3f7089d15394f81bfd0b73889 | [
"MIT"
] | permissive | egirgin/cmpe493-term-project | 76b8a3d4b01b01387f3fb0defffe57ef329ed345 | 8af20fe33bf3b18d1b8bd66159da7559fe3387a3 | refs/heads/main | 2023-03-01T23:14:29.472601 | 2021-02-15T06:00:40 | 2021-02-15T06:00:40 | 319,282,348 | 0 | 0 | MIT | 2021-02-15T05:55:58 | 2020-12-07T10:24:28 | Python | UTF-8 | Python | false | false | 5,452 | py | import os
import string
import json
import requests
from tqdm import tqdm
import os.path
from os import path
import pickle
import argparse
import pandas as pd
import numpy as np
import nltk
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import *
from nltk.stem import WordNetLemmatizer
stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-l',
'--lemmatization',
action='store_true')
args = arg_parser.parse_args()
data_path = "./data/text/raw_data.csv"
stop_words = stopwords.words('english')
punctuations = list(string.punctuation)
data = pd.read_csv(data_path,encoding="utf-8")
def preprocess(sentence):
"""
Processes a string
1-) Replace ' with whitespace
2-) Replace punctuation with whitespace
3-) Tokenize
4-) Stopword removal
5-) Dismiss token if whitespace or contains a digit
6-) Lowercase
7-) Apply Stemming
:sentence: a string which is the concatenated version of either 'title and abstract' or 'query, question, and narrative'
:return: a list of tokens the procedures above are applied
"""
sentence = "" if type(sentence) != type("str") else sentence.replace("'"," ")
for ch in punctuations:
sentence = sentence.replace(ch," ")
word_tokens = word_tokenize(sentence)
filtered_sentence = [w.lower() for w in word_tokens if not w in stop_words and not any(ch.isdigit() for ch in w) and len(w) > 1] #and not w in punctuations
lemmas = [stemmer.stem(word) for word in filtered_sentence]
return lemmas
def preprocess_lemma(sentence):
"""
Processes a string
1-) Replace ' with whitespace
2-) Replace punctuation with whitespace
3-) Tokenize
4-) Stopword removal
5-) Dismiss token if whitespace or contains a digit
6-) Lowercase
7-) Apply Lemmatization
:sentence: a string which is the concatenated version of either 'title and abstract' or 'query, question, and narrative'
:return: a list of tokens the procedures above are applied
"""
sentence = "" if type(sentence) != type("str") else sentence.replace("'"," ")
for ch in punctuations:
sentence = sentence.replace(ch," ")
word_tokens = word_tokenize(sentence)
filtered_sentence = [w.lower() for w in word_tokens if not w in stop_words and not any(ch.isdigit() for ch in w) and len(w) > 1] #and not w in punctuations
lemmas = [lemmatizer.lemmatize(word) for word in filtered_sentence]
return lemmas
def process_df(data, lemma = False):
"""
Concat title and abstract then 'preprocess'
:data: a dataframe with documents in rows; id, title, and abstract in columns
:return: two lists; first is list of ids (of documents) and the second is 2d list of tokens (rows: docs, columns: tokens)
"""
if lemma:
print("Lemmatization is being used.")
lst_index = []
lst_words = []
for index, row in tqdm(data.iterrows(), total=len(data)):
if lemma:
tmp = preprocess_lemma(row["title"]) + preprocess_lemma(row["abstract"])
else:
tmp = preprocess(row["title"]) + preprocess(row["abstract"])
lst_words.append(tmp)
lst_index.append(row["cord_uid"])
return lst_index, lst_words # lst_words is 2d array -> dim1: document, dim2:tokens in doc
def process_query(data):
"""
Concat query, question, and narrative then 'preprocess'
:data: a dataframe with queries in rows; query, question, and narrative in columns
:return: 2d list of tokens (rows: queries, columns: tokens)
"""
lst_index = []
lst_words = []
for index, row in data.iterrows():
tmp = preprocess(row["query"] +" "+ row["question"]+ " "+row["narrative"])
lst_words.append(tmp)
lst_index.append(row["number"])
return lst_words # lst_words is 2d array -> dim1: query, dim2:tokens in query
def main():
print("Creating Corpus...")
lst_index, lst_words = process_df(data, args.lemmatization)
with open("./preprocessing/processed_data.pickle", "wb") as processedData:
pickle.dump((lst_index, lst_words), processedData)
queries = pd.read_json("./data/queries/queries.json")
#######################################################################
# Odd number of queries are training set
learning_queries = queries.loc[queries.loc[:,"number"] % 2 != 0,:]
processed_train_queries = []
for i in range(learning_queries.shape[0]):
query = process_query(pd.DataFrame(learning_queries.iloc[i,:]).T)
processed_train_queries.append(query)
with open("./preprocessing/training_queries.pickle", "wb") as processedData:
pickle.dump(processed_train_queries, processedData)
#######################################################################
testing_queries = queries.loc[queries.loc[:,"number"] % 2 == 0,:]
processed_test_queries = []
for i in range(testing_queries.shape[0]):
query = process_query(pd.DataFrame(testing_queries.iloc[i,:]).T)
processed_test_queries.append(query)
with open("./preprocessing/testing_queries.pickle", "wb") as processedData:
pickle.dump(processed_test_queries, processedData)
if __name__=="__main__":
main()
| [
"emregirgin171@gmail.com"
] | emregirgin171@gmail.com |
062c9768ec6208be50fcac993a5ed4c2c65b3752 | 4b661b5cf68cce2e187b159fadd31daecbccaf12 | /devel/lib/python2.7/dist-packages/image_geometry/__init__.py | 4e913b70721e0a6fbe199bde8f54c400424a9a2b | [] | no_license | david-crumley/zed-ros-testing | 3ebf67b15e54ec16d40786788b11f75df38fe1fa | 756610c937f194af69b94f9059f835af6e3b7c65 | refs/heads/master | 2022-07-17T01:48:00.873099 | 2020-05-17T20:02:49 | 2020-05-17T20:02:49 | 264,744,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | # -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from pkgutil import extend_path
from sys import path as sys_path
__extended_path = '/home/nvidia/zed-ros/src/zed-ros-testing/vision_opencv/image_geometry/src'.split(';')
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
| [
"david_crumley@knights.ucf.edu"
] | david_crumley@knights.ucf.edu |
4f40417e1b3d5e7727b23349015224819e159c34 | d0efa2026b7ed22ff4f9aa76c27ae2474c30f26d | /test/test_payment_method_payment_schedules_request.py | 452cc8ec29e8473df8ec6a5a8e0ae80b14d7d5f7 | [] | no_license | begum-akbay/Python | 2075650e0ddbf1c51823ebd749742646bf221603 | fe8b47e29aae609b7510af2d21e53b8a575857d8 | refs/heads/master | 2023-03-28T00:11:00.997194 | 2021-03-25T16:38:17 | 2021-03-25T16:38:17 | 351,499,957 | 0 | 0 | null | 2021-03-25T16:38:17 | 2021-03-25T16:15:16 | Python | UTF-8 | Python | false | false | 1,270 | py | # coding: utf-8
"""
Payment Gateway API Specification.
The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501
The version of the OpenAPI document: 21.1.0.20210122.001
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.payment_method_payment_schedules_request import PaymentMethodPaymentSchedulesRequest # noqa: E501
from openapi_client.rest import ApiException
class TestPaymentMethodPaymentSchedulesRequest(unittest.TestCase):
"""PaymentMethodPaymentSchedulesRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPaymentMethodPaymentSchedulesRequest(self):
"""Test PaymentMethodPaymentSchedulesRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.payment_method_payment_schedules_request.PaymentMethodPaymentSchedulesRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"emargules@bluepay.com"
] | emargules@bluepay.com |
b4c6ebfb759f409bfa7ea91fb4c83aa0a5b7ec10 | dd5fc012431687233abbdfee9afcf7a2feefe45b | /LeetCode_024/024_Swap Nodes in Pairs_1.py | fd02bb7b9b99c61fae6b4c05437cec61901a6ab8 | [] | no_license | daodaoawaker/LeetCode | 42a95b49f56c50e06ffbc03ea2fe9b11c5b18e16 | 0a0b76d19460e92f6763337deb0517109169cf98 | refs/heads/master | 2023-04-26T09:57:56.807223 | 2021-05-15T15:55:05 | 2021-05-15T15:55:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | #!usr/bin/env python
#_*_ coding:utf-8 _*_
'''
class ListNode():
def __init__(self, x):
self.val = x
self.next = None
'''
class Solution():
def swapPairs(self, head):
firstHead = ListNode(0)
firstHead.next = head
pre, cur = (firstHead, head)
while(cur and cur.next):
pre.next = cur.next
cur.next = pre.next.next
pre.next.next = cur
pre, cur = cur, cur.next
return firstHead.next
| [
"471229015@qq.com"
] | 471229015@qq.com |
4509cb6755312965760e5aee3e06d33dde821f8a | 8dc40fcf3de568f8d1ba8f4e2fc4a53258bf45a6 | /duration.py | f79b0640d8e5db43d3d84f6c553631b87ea093aa | [] | no_license | hugoatease/lastfmerge | 981db1915bb19b7fb981be3c5e2a4ab6b6bd4813 | dbdad3171f5d6ce81ea57f4540d9c88118e02744 | refs/heads/master | 2020-04-06T04:56:44.332382 | 2012-02-24T20:50:27 | 2012-02-24T20:50:27 | 3,347,771 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,338 | py | import config, common
import json
def url(mbid = None, artist = None, name = None):
if mbid != None:
return 'http://ws.audioscrobbler.com/2.0/?method=album.getinfo&format=json&api_key=' + config.lastfm['Key'] + '&mbid=' + mbid
elif artist != None and name != None:
return 'http://ws.audioscrobbler.com/2.0/?method=track.getinfo&format=json&api_key=' + config.lastfm['Key'] + '&artist=' + artist + '&track=' + name
def parser(data, mbid = True):
if mbid:
try:
artist = data['album']['artist']
tracks = list()
for track in data['album']['tracks']['track']:
duration = int(track['duration'])
name = track['name']
tracks.append({'Artist' : artist, 'Name' : name, 'Duration' : duration})
return tracks
except KeyError:
return None
else:
try:
artist = data['track']['artist']['name']
name = data['track']['name']
duration = int(data['track']['duration'])
return {'Artist' : artist, 'Name' : name, 'Duration' : duration}
except KeyError:
return None
username = raw_input('Last.fm username: ')
f = open(username + '.json', 'r')
data = json.loads(f.read())
f.close()
print 'Total scrobbles: ', str(len(data))
print 'Fetching unique MBIDs...',
mbids = list()
for track in data:
mbid = track['Mbid']
if mbids.count(mbid) == 0:
mbids.append(mbid)
total = len(mbids)
print str(total)
print 'Getting durations, it might take a while...'
i = 0
while i <= (total - 1):
print str(i+1) + ' / ' + str(total)
try:
results = parser( common.jsonfetch( url(mbids[i]) ), mbid = True )
except:
results = None
if results != None:
for track in data:
for td in results:
if track['Artist'] == td['Artist'] and track['Name'] == td['Name']:
track['Duration'] = td['Duration']
print track['Artist'], track['Name'], track['Duration']
i = i + 1
missing = 0
for track in data:
if track.has_key('Duration') == False:
track['Duration'] = None
missing = missing + 1
print "Missing track's durations: " + str(missing)
f = open(username+'.json', 'w')
f.write(json.dumps(data))
f.close() | [
"hugo@caille.tk"
] | hugo@caille.tk |
ee0b6d5c25cb2588f5ec6b542d364e388572b055 | 8e7aca30c00f569573a7121bb90e551c9166210c | /tk1.py | 0b124f2362ebd9b43ef308dd01eff7d228511858 | [] | no_license | Ntsikelelo-L/functions | c7deaefbbceac3cc7e09b0480ec7da0482af2a7b | d1aeacc9f123dbc1d124ae7e1d6bbfb1fc986fc9 | refs/heads/master | 2022-12-23T21:43:03.859502 | 2020-10-08T07:22:52 | 2020-10-08T07:22:52 | 302,262,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40 | py | #sudo apt-get install python3-tk
sudo a
| [
"landeuts3@gmail.com"
] | landeuts3@gmail.com |
77f189259e457de48cd00bed8652a12d95f0a37d | 2de81ff580f7f3f6be21295b073319e51e78c187 | /ver3/Web/login/models.py | 4d0e5461178d175c87509bbdbcdaa7e656a7f7ce | [] | no_license | HungSoma/hello | 2f3c8ac7b3acebec41f0d9636c33b3b0ac8d1e9a | a1d0f04af9cc3d219ec959ba4c5665530219b08a | refs/heads/master | 2021-10-07T23:06:47.056813 | 2021-10-01T10:09:03 | 2021-10-01T10:09:03 | 96,621,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth.models import User
# Create your models here.
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name="user")
Gender_choices = (
('M', 'Male'),
('F', 'Female'),
)
first_name = models.CharField(null = True, blank = True, default = '', max_length = 20)
last_name = models.CharField(null=True, blank=True, default='', max_length=20)
gender = models.CharField(blank = True, default = '', max_length = 2, choices = Gender_choices)
birthday = models.DateField(null=True, blank=True)
bio = models.TextField(default='', blank=True, max_length = 1000)
#phone = models.CharField(null = True, blank = True, default = '', max_length = 12)
country = models.CharField(max_length = 100,default='', blank = True)
city = models.CharField(max_length = 50 ,default='', blank = True)
organization = models.CharField(max_length = 100,default='', blank = True)
def create_profile(sender, **kwargs):
user = kwargs["instance"]
if kwargs["created"]:
user_profile = UserProfile.objects.create(user = user)
user_profile.save()
post_save.connect(create_profile, sender =User) | [
"hung.keima@gmail.com"
] | hung.keima@gmail.com |
dbeacd1bc8650becfac55bd687f13832bebe48a7 | 089f5c32679e62047f92a1cba3b92d64937fb73d | /News_Scraping/Demo/Demo/spiders/topeka/items.py | def7c3244c0e068cac1649524c024d291b7b4503 | [] | no_license | narendra1711/Machine_learning | 25fc60a2aa81d610433029b5a0971717aa98b062 | 2029a565e5d5e817a164e5e8941bcaa5f3e4f3d3 | refs/heads/master | 2022-10-27T21:38:16.674151 | 2018-07-13T16:05:09 | 2018-07-13T16:05:09 | 129,414,266 | 0 | 1 | null | 2022-10-11T00:07:30 | 2018-04-13T14:34:56 | Python | UTF-8 | Python | false | false | 337 | py | # Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy.item import Item, Field
class TopekaItem(Item):
initial_url = Field()
url =Field()
publish_date = Field()
scanned_date = Field()
headline_name = Field()
body = Field()
date_found = Field()
| [
"33336096+narendra1711@users.noreply.github.com"
] | 33336096+narendra1711@users.noreply.github.com |
1721ee747927e7f8df3cac98ab5795c94416b761 | 7ef29c21c939aa7462befa7e5a8979a3a862a19d | /mininet/topologies/topo.py | 419f7cd0797b6a63d6021e8e010f44eac525f2b4 | [] | no_license | geddings/TARN | 743476889a42372e15970477e5a64c60b2260a96 | 6fe1301c1df4aa9b38c0c83e6d4d6109e4e98bac | refs/heads/master | 2022-04-29T04:15:17.270695 | 2018-02-16T15:08:07 | 2018-02-16T15:08:07 | 96,136,521 | 1 | 3 | null | 2022-04-22T07:23:04 | 2017-07-03T17:49:55 | Java | UTF-8 | Python | false | false | 1,874 | py | # import os
#
# import sys
# from mininext.cli import CLI
#
# sys.path.insert(0, os.path.abspath('..'))
# from mininext.topo import Topo as BaseTopo
# from nodes import Floodlight
# from net import MiniNExT
# import mininet.log as log
#
#
# class Topo(BaseTopo):
# """Extended topology to support BGP and session maintenance topologies
# for the EAGER project at Clemson University"""
#
# # TODO Consider adding bopts (BGP options) or something similar if useful
# def __init__(self, **opts):
# BaseTopo.__init__(self, **opts)
#
# def addController(self, name, **opts):
# return self.addNode(name, isController=True, **opts)
#
# def isController(self, n):
# '''Returns true if node is a controller.'''
# info = self.node_info[n]
# return info and info.get('isController', False)
#
# def controllers(self, sort=True):
# '''Return controllers.'''
# return [n for n in self.nodes(sort) if self.isController(n)]
#
# def hosts(self, sort=True):
# '''Return hosts.'''
# return [n for n in self.nodes(sort) if not self.isSwitch(n) and not self.isController(n)]
#
# # Add a group consisting of a controller, a switch, and a variable number of hosts
# def addIPRewriteGroup(self, name, controller=Floodlight, hosts=1, **opts):
# self.addController(name + '-c', controller=controller)
# self.addSwitch()
#
# # Add an autonomous system consisting of variable IP rewrite groups and a BGP router
# def addAutonomousSystem(self, name, **opts):
# pass
#
#
# if __name__ == '__main__':
# log.setLogLevel('info')
# mytopo = Topo()
# mytopo.addController('c0', controller=Floodlight)
# mytopo.addController('c1', controller=Floodlight)
# net = MiniNExT(topo=mytopo, build=True)
# net.start()
# CLI(net)
# net.stop()
| [
"cbarrin@g.clemson.edu"
] | cbarrin@g.clemson.edu |
8147523bcb0f515c279cdd116378042b0911fd7c | 56e469a1bfd29004fa258a54668dfbbc4459663d | /python3-nltk-tutorial/src/lesson2.py | eea468d14140f4c269abb2552dfb9c86ded6c8b6 | [] | no_license | wind86/learning | bfce4a6795b58b27d0148b878299cacfe96aa26f | 4449ba0eed0a8f803a2bb9fbd663faf43148f03a | refs/heads/master | 2020-04-05T23:28:40.082439 | 2017-11-04T11:36:40 | 2017-11-04T11:36:40 | 83,236,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | '''
Created on Apr 09, 2017
Stop words with NLTK
https://www.youtube.com/watch?v=w36-U-ccajM&index=2&list=PLQVvvaa0QuDf2JswnfiGkliBInZnIC4HL
@author: ubuntu
'''
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
example_sent = "This is a sample sentence, showing off the stop words filtration."
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(example_sent)
filtered_sentence = [w for w in word_tokens if not w in stop_words]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
print(word_tokens)
print(filtered_sentence) | [
"wind86@meta.ua"
] | wind86@meta.ua |
b14af7cffc6ef7e61fd07f241da400470e0d2847 | 672fef1cd92f24cc13dbb651f60d7b1081468bed | /catkin_ws/build/kit_agv_teleop/catkin_generated/pkg.installspace.context.pc.py | b82d54ca750a09c83c061b72924977e025a63ceb | [] | no_license | Forrest-Z/DevelopAgv | 49eca36e0a4a714fb232100b6216f4801409aa56 | e7d0ac39f3964557d7f67f074ddba73e5c6f0d3a | refs/heads/master | 2022-12-14T12:41:30.309513 | 2020-09-07T14:21:16 | 2020-09-07T14:21:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "kit_agv_teleop"
PROJECT_SPACE_DIR = "/home/nhamtung/TungNV/DevelopAgv/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"nhamtung125@gmail.com"
] | nhamtung125@gmail.com |
cbcd1ca478b37e9ed80a3369621ea0e28027db98 | eab3f48a302bd07c7052f2e53625284264d0541d | /projectapp/views.py | a8501ea6330fe9f4328a9ed4bb770af7434d214b | [] | no_license | zeyytas/githubAPI-project | 08cadaddc8fc54cadf68179b21451690dec8e402 | 1bd1f0b3a9382b7e14a074b68629ab1235ca8e5c | refs/heads/master | 2020-09-12T20:07:40.814367 | 2019-11-19T10:25:36 | 2019-11-19T10:25:36 | 222,537,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,499 | py |
from django.shortcuts import render
import requests
from projectapp.models import Repositories, Language
def search(request):
query = []
languages = list(map(lambda x: x[0], Language.objects.all().values_list('repo_language')))
for language in languages:
query.append(Repositories.objects.filter(repository_language__repo_language=language))
return render(request, 'search.html',
{'repositories': dict(zip(list(map(lambda x: x + '_repositories', languages)), query)),
'languages': languages})
def __get_final_data(repositories):
final_data = {'items': []}
for repo in repositories:
for repository in Repositories.objects.filter(repo_address=repo):
labels_url = '+'.join(['label:"{}"'.format(x) for x in repository.repository_label.all() if x])
if labels_url:
labels_url = '+' + labels_url
for language in repository.repository_language.all():
index = 1
while True:
url = 'http://api.github.com/search/issues?q=language:{language}+no:assignee+repo:{repo}' \
'{labels_url}&page={index}&per_page=100'.format(language=language, labels_url=labels_url,
repo=repo, index=index)
try:
res = requests.get(url)
res = res.json()
if res['items']:
final_data['items'].append(repository.repo_address)
final_data['items'].extend(res['items'])
if res['total_count'] > 100 and index != (res['total_count'] % 100):
index += 1
else:
break
except AttributeError:
pass
return final_data
def result(request):
if request.GET.getlist('repo'):
final_data = __get_final_data(request.GET.getlist('repo'))
else:
lang = request.GET['l']
repositories = Repositories.objects.filter(repository_language__repo_language=lang)
final_data = __get_final_data(repositories)
if final_data['items']:
return render(request, 'result.html', {'items': final_data['items'], 'message': 'You have some results'})
return render(request, 'result.html', {'message': 'There is no match'})
| [
"zeyneptas@Zeynep-MacBook-Pro.local"
] | zeyneptas@Zeynep-MacBook-Pro.local |
ed0a7a587fa699bb3e21e4116d874fda8a2c2d5c | 3337e9150a743e0df2898528dd1e4dfac9730b25 | /artemis/fileman/persistent_print.py | 13b30ccc07235563122878b4675f41b117e62124 | [] | no_license | ml-lab/artemis | f3353cb462b06d64e1007010db94667b4703c90e | b4f5f627f1798aff90b845d70fd582142a9f76c8 | refs/heads/master | 2021-01-22T06:49:41.346341 | 2017-09-01T15:31:13 | 2017-09-01T15:31:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,171 | py | import sys
from artemis.fileman.local_dir import get_artemis_data_path
from artemis.general.display import CaptureStdOut
__author__ = 'peter'
"""
Save Print statements:
Useful in ipython notebooks where you lose output when printing to the browser.
On advice from:
http://stackoverflow.com/questions/4675728/redirect-stdout-to-a-file-in-python
** Note this is no longer being used. Possibly delete
"""
_ORIGINAL_STDOUT = sys.stdout
_ORIGINAL_STDERR = sys.stderr
def capture_print(log_file_path = 'logs/dump/%T-log.txt', print_to_console=True):
"""
:param log_file_path: Path of file to print to, if (state and to_file). If path does not start with a "/", it will
be relative to the data directory. You can use placeholders such as %T, %R, ... in the path name (see format
filename)
:param print_to_console:
:param print_to_console: Also continue printing to console.
:return: The absolute path to the log file.
"""
local_log_file_path = get_artemis_data_path(log_file_path)
logger = CaptureStdOut(log_file_path=local_log_file_path, print_to_console=print_to_console)
logger.__enter__()
sys.stdout = logger
sys.stderr = logger
return local_log_file_path
def stop_capturing_print():
sys.stdout = _ORIGINAL_STDOUT
sys.stderr = _ORIGINAL_STDERR
def new_log_file(log_file_path = 'dump/%T-log', print_to_console = False):
"""
Just capture-print with different defaults - intended to be called from notebooks where
you don't want all output printed, but want to be able to see it with a link.
:param log_file_path: Path to the log file - %T is replaced with time
:param print_to_console: True to continue printing to console
"""
return capture_print(log_file_path=log_file_path, print_to_console=print_to_console)
def read_print():
return sys.stdout.read()
def reprint():
assert isinstance(sys.stdout, CaptureStdOut), "Can't call reprint unless you've turned on capture_print"
# Need to avoid exponentially growing prints...
current_stdout = sys.stdout
sys.stdout = _ORIGINAL_STDOUT
print read_print()
sys.stdout = current_stdout
| [
"peter.ed.oconnor@gmail.com"
] | peter.ed.oconnor@gmail.com |
fc8f44aeaeeedfd091f2438d3dc903bf2f377586 | 6c6fd5c91a92fa9cb82e2682ff61f4fb2013d82e | /test_YY.py | 9c4bcd4d38749386f2b12a02c05304935b13dac9 | [] | no_license | carstimon/pymaniclust | c2c472b9b18c604babb4528cd989721c4528cd1b | c055757c483b23edf5f0f8a64ed116d7115799bb | refs/heads/master | 2021-01-23T13:29:09.529636 | 2017-09-06T22:47:48 | 2017-09-06T22:47:48 | 102,664,575 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,205 | py | import pymanopt as mo
from pymanopt.manifolds import Stiefel
from pymanopt.solvers import SteepestDescent
from sklearn.cluster import KMeans
import numpy as np
import numpy.linalg as la
import numpy.random as nrand
import scipy.linalg as sla
import scipy.sparse.linalg as spla
import autograd.numpy as anp
import Y_mani
from Cluster_Pblm import Cluster_Pblm
import itertools
import collections
from random_creators import *
DATAFOLDER = "data/"
###
### Functions for generating random problems
###
def stoch_ball_pblm(nballs, npts, dist_cent):
"""
Creates a basic test problem.
k = nballs.
There are k balls in \real^k with equal spacing. From each ball we draw an equal number of points (npts).
The balls have radius one, and lie on the simplex with edge length dist_cent.
"""
cs = np.eye(nballs)*dist_cent/np.sqrt(2) #Centers on simplex with distance dist_cent from eachother.
P = stoch_ball([1]*nballs, cs, [npts]*nballs)
return Cluster_Pblm(P, nballs)
def stoch_ball_pblm_uneven(npts_list, dist_cent):
"""
Similar to stoch_ball_pblm,
but is able to draw different numbers of points from each ball.
npts_list is the list of number of points, the number of balls is the length of this list.
dist_cent is the distance between the centers.
"""
nballs = len(npts_list)
cs = np.eye(nballs)*dist_cent/np.sqrt(2) #Centers on simplex with distance dist_cent from eachother.
P = stoch_ball([1]*nballs, cs, npts_list)
return Cluster_Pblm(P,nballs)
###
### Functions for generating solutions and saving.
###
def gen_local_mins(pblm, a, b, N, eps = None):
"""
Runs the minimization for the Cluster_Pblm pblm N times, for a random point and coefficients (a,b).
Does it N times and returns the minimizers.
If eps is not None, prunes the minimizers with tolerance eps.
Returns the triplets (Y_minimizing, tr(Y), neg(Y))
"""
Ys = []
for j in range(N):
Y,_ = pblm.run_minimization(a, b)
Ys.append((Y,pblm.tr(Y), pblm.neg(Y)))
if eps is not None:
prune_mins(Ys, eps)
return Ys
def gen_many_mins(pblm, As, Bs, N, suffix=None, eps=.01):
"""
Generates local minimizers for the Cluster Problem pblm.
For each (a,b) pair runs it N times.
If suffix is not none, the data is saved to file.
Also saves the problems P and k values.
Also prints the current (a,b) pair under consideration, and how many minimizers were generated.
"""
A_list, B_list, trs, negs = [],[],[],[]
data = [A_list, B_list, trs, negs, pblm.P, pblm.k]
datanames = ["As","Bs","trs","negs", "P", "k"]
for (a,b) in zip(As, Bs):
print("(a,b): " + str((a,b)))
Ys = gen_local_mins(pblm, a, b, N, eps=eps)
print("minimizers generated: " + str(len(Ys)))
for Y, tr, neg in Ys:
A_list.append(a)
B_list.append(b)
trs.append(tr)
negs.append(neg)
if suffix is not None:
for datum, name in zip(data, datanames):
np.save(DATAFOLDER + name + "_" + suffix, datum)
return A_list, B_list, trs, negs
def gen_path(pblm, As, Bs, suffix):
"""
Generates a path from the homotopy method for the Cluster_Pblm pblm,
with (a,b) pairs given by As and Bs
Saves to files given by suffix.
Also saves the problem's P and k values.
"""
Ys = pblm.do_path(As, Bs, save=True)[1]
datanames = ["As_path", "Bs_path", "trs_path", "negs_path", "P", "k"]
trs = [pblm.tr(Y) for Y in Ys]
negs = [pblm.neg(Y) for Y in Ys]
B_path = np.hstack([0,Bs])
A_path = np.hstack([1,As])
data = [A_path, B_path, trs, negs, pblm.P, pblm.k]
for datum, name in zip(data, datanames):
np.save(DATAFOLDER + name + "_" + suffix, datum)
def gen_lloyd_comp(pblm_generator, As, Bs, N, suffix=None, num_lloyd_runs = 5):
pblm = pblm_generator()
lloyd_values = [pblm.tr(pblm.run_lloyd()) for j in range(num_lloyd_runs)]
Y_from_path = pblm.M.round_clustering(pblm.do_path(As, Bs)[0])
homotopy_value = pblm.tr(Y_from_path)
return homotopy_value, lloyd_values
###
### Functions for loading data generated to files
###
def load_mins(suffix):
"""
Loads the saved minimizers corresponding to the string suffix, as generated by gen_many_mins
Returns the tuple (As, Bs, trs, negs)
where As and Bs are the coefficient a and b for each minimizer,
trs and negs are the respective functions.
"""
datanames = ["As","Bs","trs","negs"]
return (np.load(DATAFOLDER + name + "_" + suffix + ".npy") for name in datanames)
def load_path(suffix):
"""
Loads the saved path corresponding to the string suffix, as generated by gen_path.
Returns the tuple As, Bs, trs, negs
where As and Bs are the coefficient a and b for each point on the path,
trs and negs are the respective functions.
"""
datanames = ["As_path", "Bs_path", "trs_path", "negs_path"]
return (np.load(DATAFOLDER + name + "_" + suffix + ".npy") for name in datanames)
def load_pblm(suffix):
"""
Given a suffix string, loads the P matrix and k value and returns a Cluster_Pblm object for that data.
"""
P = np.load(DATAFOLDER + "P_" + suffix + ".npy")
k = np.load(DATAFOLDER + "k_" + suffix + ".npy")
return Cluster_Pblm(P,k)
###
### Functions for comparing matrices.
###
def compare_permuting(A,B):
"""
Finds the permutation of B's columns so that ||A-B||_{\infty} is minimized.
Returns the value of ||A-B||_{\infty}, as well as B permuted in the best way.
"""
m = np.inf
Bpermedbest = None
for perm in itertools.permutations(range(np.shape(A)[1])):
Bpermed = col_permute(B, perm)
diff = la.norm(la.norm(A-Bpermed, np.inf, axis=1), np.inf)
if diff<m:
m = diff
Bpermedbest = Bpermed
return m, Bpermedbest
def col_permute(A, perm):
perm = list(perm)
inds = np.argsort(perm)
return(A[:,inds])
def prune_mins(lst, eps):
"""
Given a list of tuples
(matrix, tr_value, neg_value)
sorts the list (in place) by tr_value
and then prunes matrices which are less than eps away in the sup norm, up to column permutation.
"""
lst.sort(key = lambda x: x[1]) #Sort by cost_trace.
i = 0
while i < len(lst)-1:
while (i < len(lst)-1
and compare_permuting(lst[i][0], lst[i+1][0])[0] < eps):
lst.pop(i+1)
i+=1
def comps(lst):
"""
The idea of this was to just make a list of differences when permuted the best way.
It is not tested or used by anything else currently, so it should be checked before use.
"""
lst.sort(key = lambda x: x[1]) #Sort by cost_trace.
diffs = []
permeds = [lst[0][0]]
for i in range(len(lst)-1):
diff, Bpermed = compare_permuting(lst[i][0], lst[i+1][0])
diffs.append(diff)
permeds.append(Bpermed)
return diffs, permeds
class Pblm_Test:
"""
Hold some information about a problem
and has the ability to save it.
"""
def __init__(self, pblm):
self.pblm = pblm
#a path is a list of pairs (ts, Y).
self.paths = []
#Dict taking t's and giving a list of local mins we've found at that t-level.
self.local_mins_found = []
def ab_map(self, t):
return (1-t), self.pblm.Dsize*t
def run_path(self, tstep):
ts = np.arange(tstep, 1, tstep)**(.5)
ts_full = np.hstack([0, ts, 1])
As, Bs = self.ab_map(np.arange(tstep, 1, tstep))
_, Ys = self.pblm.do_path(As, Bs, smart_start=True, save=True)
self.paths.append((ts_full, Ys))
def initiate_splitter(self):
Y0,_ = self.pblm.run_minimization(1, 0)
LM0 = Local_Min(0, Y0, self.pblm)
Y1,_ = self.pblm.run_minimization(0, 1, Y0 = Y0)
LM1 = Local_Min(1, Y1, self.pblm)
LM0.add_conn(LM1)
self.local_mins_found.append(LM0)
self.local_mins_found.append(LM1)
def add_LM(self, LM):
if LM in self.local_mins_found:
i = self.local_mins_found.index(LM)
return self.local_mins_found[i]
else:
self.local_mins_found.append(LM)
return LM
def split_min(self,LM):
LM_next = LM.closest_conn()
t_next = LM_next.t
a_next, b_next = self.ab_map(t_next)
t_new = (LM.t + t_next)/2
a_new, b_new = self.ab_map(t_new)
Y_new,_ = self.pblm.run_minimization(a_new, b_new, Y0 = LM.Y)
LM_new = Local_Min(t_new, Y_new, self.pblm)
LM_new = self.add_LM(LM_new)
LM.add_conn(LM_new)
Y_new2, _ = self.pblm.run_minimization(a_next, b_next, Y0 = Y_new)
LM_new2 = Local_Min(t_next, Y_new2, self.pblm)
if LM_new2 != LM_next:
print("New LM")
LM_new2 = self.add_LM(LM_new2)
LM_new.add_conn(LM_new2)
def print_LMS(self):
num = len(self.local_mins_found)
for (j, LM) in zip(range(num), self.local_mins_found):
print(str(j) + ": " + str(LM))
def plt_LMS(self, fig):
ts = [LM.t for LM in self.local_mins_found]
trs = [LM.tr for LM in self.local_mins_found]
fig.clear()
ax = fig.add_subplot(111)
ax.scatter(ts, trs)
class Local_Min:
"""
Holds local minimizers
"""
def __init__(self, t, Y, pblm):
self.eps = .1
self.tr = pblm.tr(Y)
self.neg = pblm.neg(Y)
self.Y = Y
self.t = t
#List of Local_Mins reached from this one.
self.conns = []
def add_conn(self, other):
self.conns.append(other)
def closest_conn(self):
return min(self.conns, key= lambda LM: LM.t)
def hop_dist(self):
return compare_permuting(self.Y, self.closest_conn().Y)[0]
def __eq__(self, other):
return (self.t == other.t
and compare_permuting(self.Y, other.Y)[0] < self.eps)
def __str__(self):
return "Local min at " + str(self.t) + " with tr " + str(self.tr)
__repr__ = __str__
| [
"carstimon@gmail.com"
] | carstimon@gmail.com |
23bd2d54fbfbf65b24fbb8aa996758ebdc57fc2a | 0447ad0db48e5fe23105e2704fdc6cebe343b2ce | /authapp/views.py | 0cfc95dbda9db1ca541bf29594ec914563a6a157 | [] | no_license | Telwen/vk_authapp | 8dd33ce2b00430a0563677d37d8657dc9ce9459e | 2eaf8cb9c63e9b1c1ec69b11948801c2cd990e40 | refs/heads/master | 2020-03-23T04:02:22.179254 | 2018-07-15T22:17:26 | 2018-07-15T22:17:26 | 141,062,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, 'authapp/index.html')
def login(request):
pass
| [
"noreply@github.com"
] | Telwen.noreply@github.com |
1ee17c4b8cf0971fdfc7594364d884769edb8db1 | 8805fadcd91cbd2242cc6af21dd0d5cabaf42625 | /tests/test_common.py | a3640ab72f728c72b8a8f5c43e5cff12c92a00c7 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | weiplanet/refactor | c54605d0e980e199c90215c5cb8238617c622274 | 1178e9faee21227b5f73f8815724cf9bae048200 | refs/heads/master | 2023-07-03T00:49:12.905498 | 2021-08-14T20:22:54 | 2021-08-14T20:22:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,489 | py | import ast
import textwrap
import pytest
from refactor.common import (
Singleton,
apply_condition,
find_closest,
find_indent,
has_positions,
is_contextful,
is_truthy,
negate,
pascal_to_snake,
position_for,
)
def test_negate():
source = ast.parse("foo")
source.body[0].value = negate(source.body[0].value)
assert ast.unparse(source) == "not foo"
@pytest.mark.parametrize(
"condition, expected_source", [(True, "foo"), (False, "not foo")]
)
def test_apply_condition(condition, expected_source):
source = ast.parse("foo")
source.body[0].value = apply_condition(condition, source.body[0].value)
assert ast.unparse(source) == expected_source
@pytest.mark.parametrize(
"operator, expected",
[
(ast.Eq(), True),
(ast.NotEq(), False),
(ast.In(), True),
(ast.NotIn(), False),
(ast.Is(), True),
(ast.IsNot(), False),
(ast.Lt(), None),
(ast.Gt(), None),
(ast.GtE(), None),
(ast.LtE(), None),
],
)
def test_is_truthy(operator, expected):
assert is_truthy(operator) is expected
@pytest.mark.parametrize(
"node, expected",
[
(ast.Module(), True),
(ast.FunctionDef(), True),
(ast.AsyncFunctionDef(), True),
(ast.ClassDef(), True),
(ast.Lambda(), True),
(ast.BinOp(), False),
(ast.Constant(), False),
(ast.If(), False),
],
)
def test_is_contextful(node, expected):
assert is_contextful(node) is expected
@pytest.mark.parametrize(
"original, expected",
[
(str(), str()),
("rule", "rule"),
("Rule", "rule"),
("SomeRule", "some_rule"),
("LiteralToConstantRule", "literal_to_constant_rule"),
],
)
def test_pascal_to_snake(original, expected):
assert pascal_to_snake(original) == expected
@pytest.mark.parametrize(
"original, indent, prefix",
[
(str(), str(), str()),
(" ", " ", str()),
("x", "", "x"),
(" x", " ", "x"),
(" x", " ", "x"),
(" x", " ", "x"),
(" ", " ", ""),
("x ", "", "x "),
(" x ", " ", "x "),
],
)
def test_find_indent(original, indent, prefix):
assert find_indent(original) == (indent, prefix)
def test_find_closest():
source = textwrap.dedent(
"""\
def func():
if a > 5:
return 5 + 3 + 2
elif b > 10:
return 1 + 3 + 5 + 7
"""
)
tree = ast.parse(source)
right_node = tree.body[0].body[0].body[0].value.right
target_nodes = [
node
for node in ast.walk(tree)
if has_positions(node)
if node is not right_node
]
closest_node = find_closest(right_node, *target_nodes)
assert ast.unparse(closest_node) == "3"
def test_get_positions():
source = textwrap.dedent(
"""\
def func():
if a > 5:
return 5 + 3 + 25
elif b > 10:
return 1 + 3 + 5 + 7
"""
)
tree = ast.parse(source)
right_node = tree.body[0].body[0].body[0].value.right
assert position_for(right_node) == (3, 23, 3, 25)
def test_singleton():
from dataclasses import dataclass
@dataclass
class Point(Singleton):
x: int
y: int
z: int
p1 = Point(1, 2, 3)
p2 = Point(1, 2, 3)
p3 = Point(0, 1, 2)
assert p1 is p2
assert p1 is not p3
assert p2 is not p3
| [
"batuhan@python.org"
] | batuhan@python.org |
3e5fbe86c5d9342c5ec97bf0d05e8f07a6a02bd9 | cf37f7632c4e93dd3061353d8af422002868725a | /vagrant/puppy/puppies.py | 43ad9c0cb06cb68ebaeebcc2a95edc6dfe04b837 | [] | no_license | tomca32/fullstack-nanodegree-vm | eef6b8bb9f1d089160e333521c1d2ca25a1d4135 | 7aeea854b4ae2763725b2db5e3400b014d8863ed | refs/heads/master | 2020-04-08T21:29:34.708980 | 2016-01-12T23:13:27 | 2016-01-12T23:13:27 | 41,940,649 | 0 | 0 | null | 2015-09-04T22:35:32 | 2015-09-04T22:35:30 | null | UTF-8 | Python | false | false | 996 | py | import sys
from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class Shelter(Base):
__tablename__ = 'shelters'
id = Column(Integer, primary_key = True)
name = Column(String(80), nullable = False)
address = Column(String(250))
city = Column(String(250))
state = Column(String(80))
zipCode = Column(String(5))
website = Column(String(150))
class Puppy(Base):
__tablename__ = 'puppies'
id = Column(Integer, primary_key = True)
name = Column(String(80), nullable = False)
dateOfBirth = Column(Date)
breed = Column(String(80))
gender = Column(String(1), nullable = False)
weight = Column(Float)
picture = Column(String(250))
shelter_id = Column(Integer, ForeignKey('shelters.id'))
shelter = relationship(Shelter)
engine = create_engine('sqlite:///puppyshelter.db')
Base.metadata.create_all(engine) | [
"tomca32@gmail.com"
] | tomca32@gmail.com |
cd1a4eb1bfcb7c51d5622395150e22b21b9c8eb4 | 785a8702e8be6baa0d34ec812c7b92b281fcc78b | /data.py | 3baa51ca7fea185cc1df80678c2d541b3e02f460 | [] | no_license | kotov-nu/tours | 12e7155a4236bb957167d772f4316f2d50926c4e | 3144a1dcbbe628f2514206b6845e6aa2efbd3941 | refs/heads/master | 2021-07-07T14:14:05.668248 | 2020-01-08T15:34:45 | 2020-01-08T15:34:45 | 230,787,845 | 0 | 1 | null | 2021-03-20T02:31:08 | 2019-12-29T18:25:19 | Python | UTF-8 | Python | false | false | 15,183 | py | title = "Stepik Travel"
subtitle = "Для тех, кого отвлекают дома"
description = "Лучшие направления, где никто не будет вам мешать сидеть на берегу и изучать программирование, дизайн, разработку игр и управление продуктами"
departures = {"msk":"Из Москвы","spb":"Из Петербурга","nsk":"Из Новосибирска","ekb":"Из Екатеринбурга","kazan":"Из Казани"}
tours = {
1: {
"title": "Marina Lake Hotel & Spa",
"description": "Отель выглядит уютно. Он был построен из красного соснового дерева и украшен синими камнями. Высокие округлые окна добавляют общий стиль дома и были добавлены в дом в довольно симметричном образце.",
"departure": "nsk",
"picture": "https://images.unsplash.com/photo-1551882547-ff40c63fe5fa?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=800&q=60",
"price": 62000,
"stars": "4",
"country": "Куба",
"nights": 6,
"date": "2 марта",
},
2: {
"title": "Baroque Hotel",
"description": "Здание отеля имеет форму короткой буквы U. Два расширения связаны стеклянными нависающими панелями. Второй этаж такого же размера, как и первый, который был построен точно над полом под ним. Этот этаж имеет совершенно другой стиль, чем этаж ниже.",
"departure": "ekb",
"picture": "https://images.unsplash.com/photo-1445019980597-93fa8acb246c?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=800&q=60",
"price": 85000,
"stars": "5",
"country": "Вьетнам",
"nights": 8,
"date": "12 января",
},
3: {
"title": "Voyager Resort",
"description": "Снаружи отель выглядит красиво и традиционно. Он был построен с белыми камнями и имеет еловые деревянные украшения. Высокие, большие окна добавляют к общему стилю дома и были добавлены в дом в основном симметричным способом.",
"departure": "nsk",
"picture": "https://images.unsplash.com/photo-1569660072562-48a035e65c30?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=800&q=60",
"price": 63000,
"stars": "3",
"country": "Пакистан",
"nights": 11,
"date": "7 февраля",
},
4: {
"title": "Orbit Hotel",
"description": "Каждый домик оборудован средней кухней и одной небольшой ванной комнатой, в нем также есть уютная гостиная, две спальни, скромная столовая и большой подвал. Небольшие треугольные окна добавляют к общему стилю дома и были добавлены в дом в основном симметричным способом.",
"departure": "msk",
"picture": "https://images.unsplash.com/photo-1520250497591-112f2f40a3f4?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=800&q=60",
"price": 62000,
"stars": "4",
"country": "Индия",
"nights": 9,
"date": "22 января",
},
5: {
"title": "Atlantis Cabin Hotel",
"description": "Этот дом среднего размера имеет футуристический вид и находится в среднем состоянии. Интерьер выполнен в насыщенных тонах. Двор небольшой и выглядит очень формально. Кроме того, странные огни были замечены движущимися в доме ночью.",
"departure": "msk",
"picture": "https://images.unsplash.com/photo-1566073771259-6a8506099945?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=800&q=60",
"price": 68000,
"stars": "4",
"country": "Доминикана",
"nights": 8,
"date": "18 января",
},
6: {
"title": "Light Renaissance Hotel",
"description": "Этот крошечный дом выглядит довольно современно и находится в ужасном состоянии. Интерьер выполнен в цветах, которые напоминают вам о тропическом лесу. Двор небольшой и заросший дикими растениями. Кроме того, это было однажды показано в телесериале, демонстрирующем необычно украшенные дома.",
"departure": "spb",
"picture": "https://images.unsplash.com/photo-1571896349842-33c89424de2d?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=800&q=60",
"price": 53000,
"stars": "3",
"country": "Пакистан",
"nights": 13,
"date": "15 февраля",
},
7: {
"title": "King's Majesty Hotel",
"description": "Этот дом средних размеров выглядит немного старомодно и находится в среднем состоянии. Интерьер выполнен в цветах, которые напоминают о весеннем цветнике. Двор среднего размера и напоминает луг. Кроме того, он был построен над остатками дома, который был разрушен в результате пожара.",
"departure": "ekb",
"picture": "https://images.unsplash.com/photo-1468824357306-a439d58ccb1c?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=800&q=60",
"price": 72000,
"stars": "5",
"country": "Мексика",
"nights": 9,
"date": "22 января",
},
8: {
"title": "Crown Hotel",
"description": "Этот огромный дом почти выглядит инопланетянином и находится в среднем состоянии. Интерьер выполнен в цветах, напоминающих апельсиновое дерево. Двор среднего размера и напоминает луг. Кроме того, это место печально известного убийства.",
"departure": "kazan",
"picture": "https://images.unsplash.com/photo-1549109786-eb80da56e693?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=800&q=60",
"price": 44000,
"stars": "4",
"country": "Тайланд",
"nights": 7,
"date": "3 февраля",
},
9: {
"title": "Seascape Resort",
"description": "Этот большой дом имеет сказочный вид и находится в отличном состоянии. Интерьер выполнен в ярких цветах. Двор маленький и аккуратно подстрижен. На заднем дворе есть большой участок недавно созданной земли, а дом имеет большой решетчатый забор через него. На заднем дворе живут различные животные. Многие владельцы приложили согласованные усилия для поддержания этой собственности.",
"departure": "nsk",
"picture": "https://images.unsplash.com/photo-1570214476695-19bd467e6f7a?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=800&q=60",
"price": 39000,
"stars": "3",
"country": "Индия",
"nights": 10,
"date": "1 февраля",
},
10: {
"title": "Rose Sanctum Hotel",
"description": "Снаружи этот дом выглядит старым, но чудесным. Он был построен из желтого соснового дерева и украшен белым кирпичом. Короткие, широкие окна пропускают много света и были добавлены в дом очень симметричным способом.",
"departure": "msk",
"picture": "https://images.unsplash.com/photo-1560200353-ce0a76b1d438?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=800&q=60",
"price": 52000,
"stars": "4",
"country": "Куба",
"nights": 10,
"date": "30 января",
},
11: {
"title": "Viridian Obelisk Hotel & Spa",
"description": "В доме очень хороший двор с большими камнями, похожими на озеро. В задней части дома окна просторные, с большими окнами, они светлее, чтобы улучшить впечатление. Снаружи есть пять маленьких деревьев. Двор в очень хорошем состоянии и очень живописный. Есть пруд для развлечения",
"departure": "spb",
"picture": "https://images.unsplash.com/photo-1477120128765-a0528148fed2?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=800&q=60",
"price": 68000,
"stars": "5",
"country": "Индия",
"nights": 9,
"date": "1 марта",
},
12: {
"title": "Saffron Tundra Hotel & Spa",
"description": "Дом оборудован огромной кухней и одной современной ванной комнатой, а также имеет огромную гостиную, две спальни, небольшую столовую, гостиную и скромную кладовую. Дом чистый, хорошо построенный и в хорошем состоянии, но, к сожалению, кровати сгорели в мае этого года и, к сожалению, все еще нуждаются в ремонте. Возможно, понадобится целая команда, чтобы заменить старую медную топку.",
"departure": "kazan",
"picture": "https://images.unsplash.com/photo-1440151050977-247552660a3b?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=800&q=60",
"price": 72000,
"stars": "4",
"country": "Мексика",
"nights": 12,
"date": "17 февраля",
},
13: {
"title": "Traveller Resort",
"description": "Снаружи этот дом выглядит очень элегантно. Он был построен из коричневого кирпича и имеет коричневые кирпичные украшения. Высокие, большие окна добавляют к общему стилю дома и были добавлены к дому в довольно асимметричном образце. Крыша высокая и наклонена в одну сторону и покрыта коричневой черепицей. Один большой дымоход высовывает центр крыши. На крыше нет окон. Сам дом окружен великолепным садом с виноградными лозами, пагодой, прудом и множеством разных цветов.",
"departure": "ekb",
"picture": "https://images.unsplash.com/photo-1553653924-39b70295f8da?ixlib=rb-1.2.1&auto=format&fit=crop&w=800&q=60",
"price": 49000,
"stars": "3",
"country": "Куба",
"nights": 8,
"date": "26 января",
},
14: {
"title": "History Hotel & Spa",
"description": "Крыша высокая, треугольная, многослойная, покрыта пшеничной соломой. Две большие трубы находятся по обе стороны от дома. Многие меньшие окна пропускают много света в комнаты под крышей.Сам дом окружен асфальтированной землей, с местом для еды и отдыха на открытом воздухе и различными горшечными растениями.",
"departure": "kazan",
"picture": "https://images.unsplash.com/photo-1509600110300-21b9d5fedeb7?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=800&q=60",
"price": 91000,
"stars": "5",
"country": "Вьетнам",
"nights": 9,
"date": "3 февраля",
},
15: {
"title": "Riverside Lagoon Hotel & Spa",
"description": "Здание имеет форму круга. Дом частично окружен деревянными нависающими панелями с двух сторон. Второй этаж меньше первого, что позволило создать несколько балконов по бокам дома. Этот этаж следует тому же стилю, что и этаж ниже.",
"departure": "spb",
"picture": "https://images.unsplash.com/photo-1568084680786-a84f91d1153c?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=800&q=60",
"price": 82000,
"stars": "4",
"country": "Доминикана",
"nights": 8,
"date": "5 февраля",
},
16: {
"title": "History Hotel & Spa",
"description": "Крыша высокая, треугольная, многослойная, покрыта пшеничной соломой. Две большие трубы находятся по обе стороны от дома. Многие меньшие окна пропускают много света в комнаты под крышей.Сам дом окружен асфальтированной землей, с местом для еды и отдыха на открытом воздухе и различными горшечными растениями.",
"departure": "spb",
"picture": "https://images.unsplash.com/photo-1564056095795-4d63b6463dbf?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=800&q=60",
"price": 74000,
"stars": "5",
"country": "Вьетнам",
"nights": 12,
"date": "24 января",
}
} | [
"80386.sl@gmail.com"
] | 80386.sl@gmail.com |
30bc24f4f2b7cd631dbf2c677a8e0f0d88c6e9f6 | ccda4776cb55607536eef473e31b1999df4e639c | /culinaryblog/posts/views.py | c2c9b31e8b90856b5d2abd1a088cd5325a8a7e74 | [] | no_license | EwaGrela/culinaryblog | 8168b9edc37a223db3d96a25241fe6ed1433c4ff | e772c7f5e15a9bc71222efad4700924186cb2f5b | refs/heads/master | 2021-04-27T00:17:53.779039 | 2018-03-08T13:41:50 | 2018-03-08T13:41:50 | 123,787,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,463 | py | from django.shortcuts import render, redirect
from django.views.generic import (TemplateView, ListView, CreateView, DetailView, UpdateView, DeleteView)
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django.views import generic
from django.http import Http404
from braces.views import SelectRelatedMixin
from . import models
# from posts.models import Post
from . import forms
# Create your views here.
class PostListView(SelectRelatedMixin, ListView):
model = models.Post
select_related = ['user']
def get_query_set(self):
return models.Post.objects.filter(created_at__lte= timezone.now()).order_by(self.created_at)
class PostDetail(DetailView):
model = models.Post
select_related = ["user"]
# def get_queryset(self):
# queryset = super().get_queryset()
# return queryset.filter(user__username__iexact=self.kwargs.get('username'))
# class NewPost(LoginRequiredMixin, SelectRelatedMixin, CreateView):
class NewPost(LoginRequiredMixin, CreateView):
fields = ('title', 'content')
model = models.Post
# form_class = forms.PostForm
# redirect_field_name = "posts/post_detail.html"
success_url = reverse_lazy('posts:post_list')
# success_url = '/posts'
# success_url = reverse_lazy('posts/post_list')
def form_valid(self, form):
self.object = form.save(commit = False)
self.object.user = self.request.user
self.object.save()
return super().form_valid(form)
# class DeletePost(LoginRequiredMixin, SelectRelatedMixin, DeleteView):
# select_related = ["user"]
# model = models.Post
# success_url = reverse_lazy("posts:post_list")
# def get_queryset(self):
# queryset = super().get_queryset()
# return queryset.filter(user_id = self.request.user.id)
# def delete(self):
# messages.success(self.request, "Post was deleted!")
# return super().delete()
@login_required
def upvote(request, pk):
if request.method == "POST":
post = models.Post.objects.get(pk=pk)
post.points +=1
post.save()
return redirect('posts:post_list')
@login_required
def delete(request, pk):
post = models.Post.objects.get(pk=pk)
post.delete()
return redirect('posts:post_list')
@login_required
def downvote(request, pk):
if request.method == "POST":
post = models.Post.objects.get(pk=pk)
post.points -=1
post.save()
return redirect('posts:post_list')
| [
"kwiaciarnia_maciejka@wp.pl"
] | kwiaciarnia_maciejka@wp.pl |
b1291a2686c426c466bc9b5706b49b5bf3099ec5 | 64657f30d5f77e2fa646de71f8396943cb97a98f | /sampletracking/wsgi.py | d159fcd2108938e74f0635ea20d002dc2b0a3e22 | [] | no_license | zerongtonywang/sampletracking | d5be52fe630ea8461734f25e0e5e1f7bda3d53c7 | a3f96aaa5d1bbfe27d8b4972495363d27ba10acd | refs/heads/master | 2021-05-30T17:17:03.244179 | 2016-03-05T07:44:30 | 2016-03-05T07:44:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for sampletracking project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sampletracking.settings")
application = get_wsgi_application()
| [
"ztony_wang@hotmail.com"
] | ztony_wang@hotmail.com |
62ca712948af9224c0b7caac28ce87a4da87fb0b | 3adec884f06eabfe50d4ab3456123e04d02b02ff | /70. Climbing Stairs.py | 681c6ec4c4dcb65967c9ac71727df86efc4e0c13 | [] | no_license | windmzx/pyleetcode | c57ecb855c8e560dd32cf7cf14616be2f91ba50e | d0a1cb895e1604fcf70a73ea1c4b1e6b283e3400 | refs/heads/master | 2022-10-05T17:51:08.394112 | 2020-06-09T09:24:28 | 2020-06-09T09:24:28 | 250,222,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | class Solution:
def climbStairs(self, n: int) -> int:
if n==1:
return 1
if n==2:
return 2
dp=[0]*n
dp[0]=1
dp[1]=2
for i in range(2,n):
dp[i]=dp[i-1]+dp[i-2]
return dp[-1] | [
"windmzx@github.com"
] | windmzx@github.com |
287986c00609f089101ca3c4da78af3306ee86ae | f77c13d738d1ce80b6017cea41796458f2648bfb | /restfulAPITestUtility.py | 751b1a3aab8d85c188c5af3ce2c7c399677677b9 | [] | no_license | KenshinQ/RestfulAPITestUtility | e579737c0fb5c83af9134ac681efa978442a9da5 | adbf30fe512dfab9f71244f9e49b45c608d4b369 | refs/heads/master | 2020-06-03T04:57:39.757169 | 2014-05-12T05:43:56 | 2014-05-12T05:43:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,032 | py | __author__ = 'apple'
import tornado.httpclient
import tornado.ioloop
import json
#remote_url = raw_input("please input request url:")
#request_method = raw_input("set the request method:")
#request_parameters = dict
base_url = "http://192.168.3.144/"
tornado.httpclient.AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
proxy_config = {"proxy_host":"192.168.3.21", "proxy_port": 8888}
#has_parameters = raw_input("do you have requset parameters?y/n:")
token = ""
http_client = tornado.httpclient.AsyncHTTPClient()
command_map = {"authorize":True,"GET":True,"POST":True,"PUT":True,"DELETE":True}
#authorize = raw_input("whether need authorized(y/n)?:")
def handle_requset(resp):
if resp.error:
print resp.error
else:
response_data = json.loads(resp.body)
response_data = response_data.encode("utf-8")
print "the response body:"
print "##################################"
print json.dumps(response_data,indent=2)
print "##################################"
tornado.ioloop.IOLoop.instance().stop()
def handle_authorize(authorize_response):
global token
if authorize_response.error:
print authorize_response.error
else:
response_data = json.loads(authorize_response.body)
token = response_data.get("accesstoken",None);
if token:
print "Authorize success with token %s" %token
else:
print "Authorize fail,please retry!"
tornado.ioloop.IOLoop.instance().stop()
def make_authorize():
global http_client
username = raw_input("username:")
password = raw_input("password:")
post_data = {"mobile": username, "password": password}
authorize_url = base_url+"user/login"
body = json.dumps(post_data)
request = tornado.httpclient.HTTPRequest(authorize_url, method="POST", body=body)
#request.proxy_host = proxy_config["proxy_host"]
#request.proxy_port = proxy_config["proxy_port"]
http_client.fetch(request, handle_authorize)
tornado.ioloop.IOLoop.instance().start()
def main():
global http_client
print "START TEST"
print "---------------------------------"
while True:
print "+++++++++++++++++++++++++++++++"
command = raw_input("please input request method:")
command_isVaild = command_map.get(command,False)
if not command_isVaild:
continue
if command == "authorize":
make_authorize()
else:
is_set_token = raw_input("is it with token(y/n)?:")
request_url = raw_input("please input the request url:")
body_string = raw_input("please input the body:")
body_compenents = body_string.rsplit(',')
post_dictionary = {}
if len(body_string)>2:
for compenent in body_compenents:
key_values = compenent.split('=')
post_dictionary[key_values[0]]=key_values[1]
completion_url = base_url+request_url
body = json.dumps(post_dictionary)
request = tornado.httpclient.HTTPRequest(completion_url,method=command,body=body)
if is_set_token.lower()=='y':
request.headers["accesstoken"]=token
http_client.fetch(request,handle_requset)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| [
"apple@appledeiMac.local"
] | apple@appledeiMac.local |
4a2680d2bc85510fddc11a16eb0970f2b8164a62 | 80ea526fb7c64b9177440580dd1909d8a3e51860 | /Networks/cloneClass.py | 7eb82be05f03c054507a34a6387c14f45d393055 | [] | no_license | wrbbz/RNNCodeClones | 7ec475ff0bd303bb2542ea2b81762d2241edfdab | 1d3aa2cb8738c59b189f8b47401d8427a956ae90 | refs/heads/master | 2021-09-13T15:47:13.357955 | 2018-05-01T19:41:57 | 2018-05-01T19:41:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | class CloneClass:
def __init__(self, base_class):
self.base_class = base_class
self.clones = []
def append(self, clone):
self.clones.append(clone)
| [
"zorinarseny@yandex.ru"
] | zorinarseny@yandex.ru |
90bb085da65f84288dce7b20345cef23b48bc827 | e4b3c95379b99e8e818e65685bd1cbcb80f7f08c | /data/news/fetch_articles.py | 50ab71414975c3904e310f55057e0b2e39682029 | [] | no_license | mohammedFurqan/Exchange-Rate-Forecast-through-News-Articles | 150c93c3331b0f74b4284b5ff489eb87a5fcf4e8 | 72a89df915b75536ee41c9683157ae8b847c129a | refs/heads/master | 2022-05-19T05:23:59.279793 | 2020-04-22T07:02:55 | 2020-04-22T07:02:55 | 248,609,648 | 1 | 2 | null | 2020-04-22T07:02:56 | 2020-03-19T21:34:58 | Python | UTF-8 | Python | false | false | 3,431 | py | # Author: Mohammed Furqan Rahamath
# Last updated: 21 March 2020
#
# Purpose: Fetch the articles content for corresponding meta-data
import multiprocessing as mp
import requests
from bs4 import BeautifulSoup
import pandas as pd
import csv
import time
from tqdm import tqdm
import os
import os.path
# Total range of years
start_year = 1981
end_year = 2020
# Customise which year, quarter and row to start from and which year to end at
start_from_year = 2018
start_from_quarter = 1
start_from_row = 1
stop_at_year = 2020
def worker(row, q):
row_text = []
page = requests.get(row["url"]).text
soup = BeautifulSoup(page, 'html.parser')
article_body = soup.select('section[name=articleBody] p')
if len(article_body) == 0:
article_body = soup.select('p.story-body-text')
content = ""
for paragraph in article_body:
text_content = (paragraph.get_text()).strip()
if not text_content:
continue
content += (' ' + text_content.replace('\n', ''))
row['article'] = content
row_text = [row['_id'],row['url'],row['word_count'],row['section'],row['date'],row['type'],row['headline'].replace('\n', ''),row['abstract'].replace('\n', ''),row['article'].replace('\n', '')]
q.put(row_text)
return row_text
def listener(q):
while 1:
m = q.get()
if m == 'kill':
break
for year in range(end_year - start_year):
# Stop at the mentioned year
if start_year+year == stop_at_year:
break
# Skip fetching previously covered data
if (start_year+year < start_from_year):
continue
out_file_name = 'articles/articles_{}.csv'.format(start_year+year)
columns = ['_id','url','word_count','section','date','type','headline','abstract','article']
with open(out_file_name, 'a+') as out_file:
writer = csv.writer(out_file)
# If file is created new or empty, write column names first
if os.path.isfile(out_file_name):
if (os.stat(out_file_name).st_size == 0):
writer.writerow(columns)
else:
writer.writerow(columns)
for quarter in range(4):
# Skip Quarters as per the customisations
if (start_year+year == start_from_year) and (quarter < start_from_quarter-1):
continue
with open('meta_data/articles_metadata_{}_{}.csv'.format(start_year+year, quarter+1)) as meta_file:
data = csv.DictReader(meta_file)
num_articles = 2000
manager = mp.Manager()
q = manager.Queue()
pool = mp.Pool(10)
#put listener to work first
watcher = pool.apply_async(listener, (q,))
#fire off workers
jobs = []
for row in data:
job = pool.apply_async(worker, (row, q))
jobs.append(job)
# collect results from the workers through the pool result queue
for job in jobs:
row_val = job.get()
if not row_val:
continue
writer.writerow(row_val)
#now we are done, kill the listener
q.put('kill')
pool.close()
pool.join()
| [
"mohammed@moonraft.com"
] | mohammed@moonraft.com |
c367f874817b32c6f63cee71858c33cc30dede45 | 5d0fe4a9e026234fe15e6c4380355061bb4dac64 | /tests/functional/pages/profile/individual_enter_your_personal_details.py | 53c55f143ecca632274757bbfec1c6127897fa4a | [
"MIT"
] | permissive | uktrade/directory-tests | 37e243862da8ac594cf1ea06ade714db5e1aba03 | 39ec6c26203580238e65566a472cbd80916e6726 | refs/heads/master | 2022-08-09T16:58:56.248982 | 2022-08-01T12:25:10 | 2022-08-01T12:25:10 | 71,367,747 | 4 | 3 | MIT | 2022-08-01T12:26:09 | 2016-10-19T14:48:57 | Python | UTF-8 | Python | false | false | 1,572 | py | # -*- coding: utf-8 -*-
"""Profile - Enter your personal details"""
from requests import Response, Session
from directory_tests_shared import PageType, Service, URLs
from tests.functional.utils.context_utils import Actor
from tests.functional.utils.request import (
Method,
check_response,
check_url,
make_request,
)
SERVICE = Service.PROFILE
NAME = "Enter your individual details"
TYPE = PageType.FORM
URL = URLs.PROFILE_ENROL_INDIVIDUAL_ENTER_YOUR_PERSONAL_DETAILS.absolute
EXPECTED_STRINGS = [
"Enter your personal details",
"First name",
"Last name",
"Job title",
"Phone number (optional)",
]
def go_to(session: Session) -> Response:
return make_request(Method.GET, URL, session=session)
def should_be_here(response: Response):
check_url(response, URL)
check_response(response, 200, body_contains=EXPECTED_STRINGS)
def submit(actor: Actor):
session = actor.session
headers = {"Referer": URL}
data = {
"csrfmiddlewaretoken": actor.csrfmiddlewaretoken,
"individual_user_enrolment_view-current_step": "personal-details",
"personal-details-given_name": actor.alias,
"personal-details-family_name": "AUTOMATED TESTS",
"personal-details-job_title": "DIT AUTOMATED TESTS",
"personal-details-phone_number": "0987654321",
"personal-details-terms_agreed": "on",
}
return make_request(
Method.POST,
URL,
session=session,
headers=headers,
files=data,
no_filename_in_multipart_form_data=True,
)
| [
"kowalczykjanusz@gmail.com"
] | kowalczykjanusz@gmail.com |
398f7c107b2af8f1aac79c33a0579535b47aa915 | 64723d47ce3651e4bbb85344a284371e61d31ffa | /ProgramFlow/sectionchallenge.py | 973cf72679b18664f2aa426eedbbd21d9fc0afa2 | [
"MIT"
] | permissive | BrandonP321/Python-masterclass | 659674cede81a3d0b4681481cbba8f3dfdd3d996 | fac81fe4f8acfa37076820405d96132f9f23b311 | refs/heads/master | 2022-12-12T13:59:37.828859 | 2020-09-05T02:29:40 | 2020-09-05T02:29:40 | 266,238,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | options = ["Learn Python", "Learn Java", "Go Swimming", "Have Dinner", "Go to Bed"]
menu = "1. Learn Python\n" \
"2. Learn Java\n" \
"3. Go Swimming\n" \
"4. Have Dinner\n" \
"5. Go to Bed\n" \
"0. Exit"
print("Please choose an option from the list below:")
print(menu)
while True:
choice = int(input("Make your choice: "))
if choice == 0:
print("See ya!")
break
elif 1 <= choice <= 5:
print(f"You chose to {options[choice - 1]}")
print("Choose again")
else:
print("You have to choose from the list:")
print(menu)
| [
"brandon.phillips976@gmail.com"
] | brandon.phillips976@gmail.com |
6334affbab5fed3ac2c4e47e4f118063cd80e199 | dd234e43eb2e3e17421c0284e11fdebf6a6e657c | /profile/tflow/_max.py | 0b2fb75b11be48deecdeead02e06768d243f5477 | [] | no_license | vlamprinidis/nn-estimation | 4133532a2063e80e4320b4d206ce0ded2ce0fe03 | 832cdc250c6be595a240fc8670636b45710ffe56 | refs/heads/master | 2023-01-10T09:47:21.699453 | 2020-11-17T15:53:40 | 2020-11-17T15:53:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | import tensorflow as tf
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras.layers import Dense, Flatten
import argparse
from tf_data import give, ds_size
import lib_tflow
from lib_tflow import distribute
import sys
sys.path.append('/home/ubuntu/profile')
import lib
parser = argparse.ArgumentParser()
parser = lib.arg_all(parser)
parser = lib.arg_pool(parser)
args = parser.parse_args()
DIM = args.dim
RESULT = '__max{}d.tflow'.format(DIM)
NAME = 'MAX{}D'.format(DIM)
max_pool = layers.MaxPool1D if DIM==1 else layers.MaxPool2D
class Max:
def create(self):
model = Sequential()
model.add(
max_pool(pool_size = args.pool, strides=args.stride, name = NAME)
)
model.add( Flatten(name='FLATTEN') )
model.add( Dense(units = 10, name='FINAL_DENSE') )
model.compile(loss = lib_tflow.loss, optimizer = lib_tflow.opt, metrics=['accuracy'])
self.model = model
Model = Max()
if args.nodes > 1:
distribute(strategy, Model, args.nodes)
else:
Model.create()
dataset = give(DIM, args.numf, args.channels)
dataset = dataset.batch(args.batch)
if args.nodes > 1:
dataset = strategy.experimental_distribute_dataset(dataset)
steps = ds_size//args.batch//args.nodes
the_typs = ['MaxPool']
time = lib_tflow.profile(the_typs, None, Model.model, dataset, steps, args.epochs)
import numpy as np
data = np.array([[
args.epochs, ds_size, # dataset size
args.numf,
args.channels,
args.batch,
args.nodes,
args.pool,
args.stride,
time
]])
with open('max{}d.tflow'.format(DIM),'a') as file:
np.savetxt(file, data, delimiter=",", fmt="%s") | [
"vlassis.ld@gmail.com"
] | vlassis.ld@gmail.com |
8c9b50a1fb059ec85baff0eef6b35238e58a497e | b4cce226536a96f543798d87e79e3fda5cb3af8a | /calculatorCheckTable.py | c550fd0a5d0a8f5e06f97c7a1067f1116b55d804 | [] | no_license | phanben110/Smart-Lab | 3fb10e637dac83a35396dcbfffd02de3562ae8fa | 1a6cdc27e06575be3f2d326ac754e3b2eee329b9 | refs/heads/master | 2023-02-04T00:35:24.876257 | 2020-12-13T06:31:05 | 2020-12-13T06:31:05 | 279,117,104 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,980 | py | import define as de
import math as ma
def distanceFromCtoAB(TABLE,C):
xa = TABLE[0][0]
ya = TABLE[0][1]
xb = TABLE[1][0]
yb = TABLE[1][1]
xc = C[0]
yc = C[1]
a = abs((ya-yb)*xc+(xb-xa)*yc-xa*(ya-yb)-ya*(xb-xa))/ ma.sqrt(xc*xc + yc*yc)
# print("CH")
# print(a)
return abs((ya-yb)*xc+(xb-xa)*yc-xa*(ya-yb)-ya*(xb-xa))/ ma.sqrt(xc*xc + yc*yc)
def solvePointH(TABLE,C):
xa = TABLE[0][0]
ya = TABLE[0][1]
xb = TABLE[1][0]
yb = TABLE[1][1]
xc = C[0]
yc = C[1]
A1 = ya-yb
if A1 == 0:
A1 = A1+0.001
B1 = xb-xa
C1 = -xa*(ya-yb)-ya*(xb-xa)
A2 = xb-xa
B2 = yb-ya
C2 = -xc*(xb-xa)-yc*(yb-ya)
mau = (B2 - B1 * A2 / A1)
if ( B2-B1*A2/A1 ) == 0 :
mau = 0.001
yh = (A2*C1/A1 - C2)/mau
xh = (-C1-yh*B1)/A1
H = (xh, yh)
# print(H)
return H
def distanceAH(TABLE,H):
xa = TABLE[0][0]
ya = TABLE[0][1]
xh = H[0]
yh = H[1]
# print("AH")
# print( ma.sqrt((xh-xa)*(xh-xa)+(yh-ya)*(yh-ya)))
return ma.sqrt((xh-xa)*(xh-xa)+(yh-ya)*(yh-ya))
def distanceBH(TABLE,H):
xb = TABLE[1][0]
yb = TABLE[1][1]
xh = H[0]
yh = H[1]
# print("BH")
# print( ma.sqrt((xh - xb)*(xh - xb) + (yh - yb)*(yh - yb)))
return ma.sqrt((xh - xb)*(xh - xb) + (yh - yb)*(yh - yb))
def distanceAB(TABLE):
xa = TABLE[0][0]
ya = TABLE[0][1]
xb = TABLE[1][0]
yb = TABLE[1][1]
# print("AB")
# print( ma.sqrt((xa-xb)*(xa-xb)+(ya-yb)*(ya-yb)))
return ma.sqrt((xa-xb)*(xa-xb)+(ya-yb)*(ya-yb))
def checkCondition( AH, BH , AB , HC , Value):
print("Ben")
print(AH)
print(BH)
print(AB)
print(HC)
if (AH<AB) and (BH<AB) and (HC<= 45) :
print("Ben")
print(AH)
print(BH)
print(AB)
print(HC)
return True
else :
return False
solvePointH(de.table1, (3, 4))
distanceAH(de.table1,solvePointH( de.table1, solvePointH( de.table1,(3,4 ))))
distanceBH(de.table1,solvePointH( de.table1, solvePointH( de.table1,(3,4 ))))
distanceAB(de.table1)
distanceFromCtoAB(de.table1, (3,4))
| [
"noreply@github.com"
] | phanben110.noreply@github.com |
2340b6eb8def046972b6d1941ebf2b84f327599d | f4316751a2bc5d7de2874d4726286257d3beebe7 | /model.py | 35de2fad41fb3b2060ce525003204693b12af0ed | [
"MIT"
] | permissive | poom-cyber/SpeakVrification | 7f58bc475ee749fd2b961eba96db81e87f310788 | 4a6c582aabcbd52841ca3b24ad9e7f7ff3bdbf1a | refs/heads/master | 2023-09-05T18:33:04.862619 | 2021-11-11T11:07:59 | 2021-11-11T11:07:59 | 426,970,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,421 | py | import tensorflow as tf
import numpy as np
import os
import time
from utils import random_batch, normalize, similarity, loss_cal, optim
from configuration import get_config
from tensorflow.contrib import rnn
config = get_config()
def train(path):
tf.reset_default_graph() # reset graph
# draw graph
batch = tf.placeholder(shape= [None, config.N*config.M, 40], dtype=tf.float32) # input batch (time x batch x n_mel)
lr = tf.placeholder(dtype= tf.float32) # learning rate
global_step = tf.Variable(0, name='global_step', trainable=False)
w = tf.get_variable("w", initializer= np.array([10], dtype=np.float32))
b = tf.get_variable("b", initializer= np.array([-5], dtype=np.float32))
# embedding lstm (3-layer default)
with tf.variable_scope("lstm"):
lstm_cells = [tf.contrib.rnn.LSTMCell(num_units=config.hidden, num_proj=config.proj) for i in range(config.num_layer)]
lstm = tf.contrib.rnn.MultiRNNCell(lstm_cells) # define lstm op and variables
outputs, _ = tf.nn.dynamic_rnn(cell=lstm, inputs=batch, dtype=tf.float32, time_major=True) # for TI-VS must use dynamic rnn
embedded = outputs[-1] # the last ouput is the embedded d-vector
embedded = normalize(embedded) # normalize
print("embedded size: ", embedded.shape)
# loss
sim_matrix = similarity(embedded, w, b)
print("similarity matrix size: ", sim_matrix.shape)
loss = loss_cal(sim_matrix, type=config.loss)
# optimizer operation
trainable_vars= tf.trainable_variables() # get variable list
optimizer= optim(lr) # get optimizer (type is determined by configuration)
grads, vars= zip(*optimizer.compute_gradients(loss)) # compute gradients of variables with respect to loss
grads_clip, _ = tf.clip_by_global_norm(grads, 3.0) # l2 norm clipping by 3
grads_rescale= [0.01*grad for grad in grads_clip[:2]] + grads_clip[2:] # smaller gradient scale for w, b
train_op= optimizer.apply_gradients(zip(grads_rescale, vars), global_step= global_step) # gradient update operation
# check variables memory
variable_count = np.sum(np.array([np.prod(np.array(v.get_shape().as_list())) for v in trainable_vars]))
print("total variables :", variable_count)
# record loss
loss_summary = tf.summary.scalar("loss", loss)
merged = tf.summary.merge_all()
saver = tf.train.Saver()
# training session
with tf.Session() as sess:
tf.global_variables_initializer().run()
os.makedirs(os.path.join(path, "Check_Point"), exist_ok=True) # make folder to save model
os.makedirs(os.path.join(path, "logs"), exist_ok=True) # make folder to save log
writer = tf.summary.FileWriter(os.path.join(path, "logs"), sess.graph)
epoch = 0
lr_factor = 1 # lr decay factor ( 1/2 per 10000 iteration)
loss_acc = 0 # accumulated loss ( for running average of loss)
for iter in range(config.iteration):
# run forward and backward propagation and update parameters
_, loss_cur, summary = sess.run([train_op, loss, merged],
feed_dict={batch: random_batch(), lr: config.lr*lr_factor})
loss_acc += loss_cur # accumulated loss for each 100 iteration
if iter % 10 == 0:
writer.add_summary(summary, iter) # write at tensorboard
if (iter+1) % 100 == 0:
print("(iter : %d) loss: %.4f" % ((iter+1),loss_acc/100))
loss_acc = 0 # reset accumulated loss
if (iter+1) % 10000 == 0:
lr_factor /= 2 # lr decay
print("learning rate is decayed! current lr : ", config.lr*lr_factor)
if (iter+1) % 10000 == 0:
saver.save(sess, os.path.join(path, "./Check_Point/model.ckpt"), global_step=iter//10000)
print("model is saved!")
# Test Session
def test(path):
tf.reset_default_graph()
# draw graph
enroll = tf.placeholder(shape=[None, config.N*config.M, 40], dtype=tf.float32) # enrollment batch (time x batch x n_mel)
verif = tf.placeholder(shape=[None, config.N*config.M, 40], dtype=tf.float32) # verification batch (time x batch x n_mel)
batch = tf.concat([enroll, verif], axis=1)
# embedding lstm (3-layer default)
with tf.variable_scope("lstm"):
lstm_cells = [tf.contrib.rnn.LSTMCell(num_units=config.hidden, num_proj=config.proj) for i in range(config.num_layer)]
lstm = tf.contrib.rnn.MultiRNNCell(lstm_cells) # make lstm op and variables
outputs, _ = tf.nn.dynamic_rnn(cell=lstm, inputs=batch, dtype=tf.float32, time_major=True) # for TI-VS must use dynamic rnn
embedded = outputs[-1] # the last ouput is the embedded d-vector
embedded = normalize(embedded) # normalize
print("embedded size: ", embedded.shape)
# enrollment embedded vectors (speaker model)
enroll_embed = normalize(tf.reduce_mean(tf.reshape(embedded[:config.N*config.M, :], shape= [config.N, config.M, -1]), axis=1))
# verification embedded vectors
verif_embed = embedded[config.N*config.M:, :]
similarity_matrix = similarity(embedded=verif_embed, w=1., b=0., center=enroll_embed)
saver = tf.train.Saver(var_list=tf.global_variables())
with tf.Session() as sess:
tf.global_variables_initializer().run()
# load model
print("model path :", path)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir=os.path.join(path, "Check_Point"))
ckpt_list = ckpt.all_model_checkpoint_paths
loaded = 0
for model in ckpt_list:
if config.model_num == int(model.split('-')[-1]): # find ckpt file which matches configuration model number
print("ckpt file is loaded !", model)
loaded = 1
saver.restore(sess, model) # restore variables from selected ckpt file
break
if loaded == 0:
raise AssertionError("ckpt file does not exist! Check config.model_num or config.model_path.")
print("test file path : ", config.test_path)
# return similarity matrix after enrollment and verification
time1 = time.time() # for check inference time
if config.tdsv:
S = sess.run(similarity_matrix, feed_dict={enroll:random_batch(shuffle=False, noise_filenum=1),
verif:random_batch(shuffle=False, noise_filenum=2)})
else:
S = sess.run(similarity_matrix, feed_dict={enroll:random_batch(shuffle=False),
verif:random_batch(shuffle=False, utter_start=config.M)})
S = S.reshape([config.N, config.M, -1])
time2 = time.time()
np.set_printoptions(precision=2)
print("inference time for %d utterences : %0.2fs"%(2*config.M*config.N, time2-time1))
print(S) # print similarity matrix
# calculating EER
diff = 1; EER=0; EER_thres = 0; EER_FAR=0; EER_FRR=0
# through thresholds calculate false acceptance ratio (FAR) and false reject ratio (FRR)
for thres in [0.01*i+0.5 for i in range(50)]:
S_thres = S>thres
# False acceptance ratio = false acceptance / mismatched population (enroll speaker != verification speaker)
FAR = sum([np.sum(S_thres[i])-np.sum(S_thres[i,:,i]) for i in range(config.N)])/(config.N-1)/config.M/config.N
# False reject ratio = false reject / matched population (enroll speaker = verification speaker)
FRR = sum([config.M-np.sum(S_thres[i][:,i]) for i in range(config.N)])/config.M/config.N
# Save threshold when FAR = FRR (=EER)
if diff> abs(FAR-FRR):
diff = abs(FAR-FRR)
EER = (FAR+FRR)/2
EER_thres = thres
EER_FAR = FAR
EER_FRR = FRR
print("\nEER : %0.2f (thres:%0.2f, FAR:%0.2f, FRR:%0.2f)"%(EER,EER_thres,EER_FAR,EER_FRR))
| [
"noreply@github.com"
] | poom-cyber.noreply@github.com |
6e3f7646454551de97bff7229a6e4a0d163b2856 | ca231a325e8f4c18d50d89ffa7eec993d4cc68c3 | /codility/minimal_interger_not_ocurrs.py | 4f9b4ac785566637a02e89df334015135a5bb335 | [] | no_license | HugoPorto/PythonCodes | 8e1597999ccd34ffa86df5ae7e91111d77dc7a22 | 539ad6891cbd49a2c011349f843ab710aad2993a | refs/heads/master | 2022-02-13T05:48:24.633750 | 2017-09-12T15:44:06 | 2017-09-12T15:44:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | # -*- coding:utf-8 -*-
def solution(A):
''' Solve it with Pigeonhole principle.
There are N integers in the input. So for the
first N+1 positive integers, at least one of
them must be missing.
'''
# We only care about the first N+1 positive integers.
# occurrence[i] is for the integer i+1.
occurrence = [False] * (len(A) + 1)
for item in A:
if 1 <= item <= len(A) + 1:
occurrence[item - 1] = True
# Find out the missing minimal positive integer.
for index in xrange(len(A) + 1):
if occurrence[index] == False:
return index + 1
raise Exception("Should never be here.")
return -1
assert solution([-1]) == 1
assert solution([1, 3, 6, 4, 1, 2]) == 5
assert solution([1]) == 2
assert solution([-1, 0, 1, 3]) == 2
assert solution([-1, 0, 1, 2]) == 3 | [
"gpzim98@gmail.com"
] | gpzim98@gmail.com |
4bb1dcbfb6124c53a97f603aad7a0a35ec8a77ef | 11c3a8812210b73f489ae2268104eb3c8176a707 | /dictionary.py | 48e4d29739869c435bc0857c40e6aff22aa40f07 | [] | no_license | Dragues/love_bot | 297a3832c4f157d0fa9560b76a30e783889ddf5d | 958b1284e9e19d53bd6ca0cd9fd53ecd2626ceaa | refs/heads/master | 2021-01-20T06:05:38.731402 | 2017-08-26T11:59:36 | 2017-08-26T11:59:36 | 101,482,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | already_exist = 'Ты уже любишь кого-то'
help_msg = 'Ничего полезного я не сделаю'
config = 'chat_config'
search_start = ['Поиск любви активирован!', 'От меня не спрячешься!']
search_midlle = ['Да у Вас тут любовь', 'Проанализируем вас']
search_end = ['Ого, вы токо гляньте!', 'Так, кто же у нас любит больше всех?', 'ВОТ ЭТО НЕОЖИДАННОСТЬ', 'Чур я первый с ним/ней гуляю!']
user_reg = ['Ты в деле!', 'У нас пополнение', ]
need_sign_up = 'КокококКоКо, пиши /love_reg и регайся.'
one_user = 'Один чтоль играть будешь?'
game_win = 'Сегодня больше любит оппонента - '
last_winner = 'Любящий дня - '
| [
"chist-34dml@mail.ru"
] | chist-34dml@mail.ru |
12ccbb6a49dc123cca42202409efb9bb333f2c8c | a135e6aebb4b525d090272c107d9986ed50ec919 | /grip/__init__.py | 263bab0ee2649d40658a1dc3023c1a3e0b27c6d5 | [
"MIT"
] | permissive | wemersondev/grip | 2a6740d32e045cfa6639936d6640555ea81d3b53 | 8a9d7caf2f8a7cf07d8b31e030600404b4c498c7 | refs/heads/master | 2021-01-24T03:26:40.071776 | 2018-02-25T19:58:13 | 2018-02-25T19:58:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,663 | py | """\
Grip
----
Render local readme files before sending off to GitHub.
:copyright: (c) 2014-2016 by Joe Esposito.
:license: MIT, see LICENSE for more details.
"""
__version__ = '4.4.0'
import sys
# Patch for Flask 11.0+ on Python 3 (pypy3)
if not hasattr(sys, 'exc_clear'):
sys.exc_clear = lambda: None
from .api import (
clear_cache, create_app, export, render_content, render_page, serve)
from .app import Grip
from .assets import GitHubAssetManager, ReadmeAssetManager
from .command import main
from .constants import (
DEFAULT_API_URL, DEFAULT_FILENAMES, DEFAULT_FILENAME, DEFAULT_GRIPHOME,
DEFAULT_GRIPURL, STYLE_ASSET_URLS_INLINE_FORMAT, STYLE_ASSET_URLS_RE,
STYLE_ASSET_URLS_SUB_FORMAT, STYLE_URLS_RE, STYLE_URLS_SOURCE,
SUPPORTED_EXTENSIONS, SUPPORTED_TITLES)
from .exceptions import AlreadyRunningError, ReadmeNotFoundError
from .readers import ReadmeReader, DirectoryReader, StdinReader, TextReader
from .renderers import ReadmeRenderer, GitHubRenderer, OfflineRenderer
__all__ = [
'__version__',
'DEFAULT_API_URL', 'DEFAULT_FILENAMES', 'DEFAULT_FILENAME',
'DEFAULT_GRIPHOME', 'DEFAULT_GRIPURL', 'STYLE_ASSET_URLS_INLINE_FORMAT',
'STYLE_ASSET_URLS_RE', 'STYLE_ASSET_URLS_SUB_FORMAT', 'STYLE_URLS_RE',
'STYLE_URLS_SOURCE', 'SUPPORTED_EXTENSIONS', 'SUPPORTED_TITLES',
'AlreadyRunningError', 'DirectoryReader', 'GitHubAssetManager',
'GitHubRenderer', 'Grip', 'OfflineRenderer', 'ReadmeNotFoundError',
'ReadmeAssetManager', 'ReadmeReader', 'ReadmeRenderer', 'StdinReader',
'TextReader',
'clear_cache', 'create_app', 'export', 'main', 'render_content',
'render_page', 'serve',
]
| [
"joe@joeyespo.com"
] | joe@joeyespo.com |
35fcdd02ed0a020911e0359cb8dad25a77a25018 | ba0bdc9fe595e0e77a78c534e8a40136a1a3bf3c | /users/forms.py | 0a415f83bbcd83d8f0053d431c911b9935f31b7d | [] | no_license | santoshdhulgand/BlogProject | 1031bfebbc8b02943a1d6175c78ae084233cc93f | ead8a7342e62be3391e7bb310fb53697c7cb1c0b | refs/heads/master | 2023-05-27T12:13:14.289634 | 2021-06-14T11:12:34 | 2021-06-14T11:12:34 | 375,331,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py |
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.forms import fields
from django.contrib.auth.models import User
from users.models import Profile
class UserRegistrationForm(UserCreationForm):
class Meta:
model = User
fields = ['username' , 'email' , 'password1' , 'password2']
class UserUpdateForm(forms.ModelForm):
class Meta:
model = User
fields = ['username' , 'email']
class ProfileUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['image'] | [
"santoshdhulgand81195@gmail.com"
] | santoshdhulgand81195@gmail.com |
994f92f1e53aa2fed33734fdbcd961e6dd651f3b | f137fb92afb7265b6d9fcace33ba72ea8e38a837 | /starter_code/backend/src/auth/auth.py | ebb19ac7279d1819924d8e38b14f5a138f210038 | [] | no_license | naimishparikh/coffee-shop | b355c9df71ff414f8683fcdbd54acbabeecc255f | 8b69e54bb89d75a66681ae7d213d195c0a6fa7b2 | refs/heads/master | 2023-07-31T09:06:36.751453 | 2021-09-18T12:00:26 | 2021-09-18T12:00:26 | 406,225,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,499 | py | import json
from flask import request, _request_ctx_stack, abort
from functools import wraps
from jose import jwt
from urllib.request import urlopen
AUTH0_DOMAIN = 'dev-9a27t4db.us.auth0.com'
ALGORITHMS = ['RS256']
API_AUDIENCE = 'drinks'
## AuthError Exception
'''
AuthError Exception
A standardized way to communicate auth failure modes
'''
class AuthError(Exception):
def __init__(self, error, status_code):
self.error = error
self.status_code = status_code
## Auth Header
'''
@TODO implement get_token_auth_header() method
it should attempt to get the header from the request
it should raise an AuthError if no header is present
it should attempt to split bearer and the token
it should raise an AuthError if the header is malformed
return the token part of the header
'''
def get_token_auth_header():
auth = request.headers.get('Authorization', None)
print("auth header", type(auth))
print("get header", type(request.headers))
if not auth:
raise AuthError({
'code': 'authorization_header_missing',
'description': 'Authorization header is expected.'
}, 401)
parts = auth.split()
if parts[0].lower() != 'bearer':
raise AuthError({
'code': 'invalid_header',
'description': 'Authorization header must start with "Bearer".'
}, 401)
elif len(parts) == 1:
raise AuthError({
'code': 'invalid_header',
'description': 'Token not found.'
}, 401)
elif len(parts) > 2:
raise AuthError({
'code': 'invalid_header',
'description': 'Authorization header must be bearer token.'
}, 401)
token = parts[1]
return token
'''
@TODO implement check_permissions(permission, payload) method
@INPUTS
permission: string permission (i.e. 'post:drink')
payload: decoded jwt payload
it should raise an AuthError if permissions are not included in the payload
!!NOTE check your RBAC settings in Auth0
it should raise an AuthError if the requested permission string is not in the payload permissions array
return true otherwise
'''
def check_permissions(permission, payload):
if 'permissions' not in payload:
abort(400)
if permission not in payload['permissions']:
abort(403)
return True
'''
@TODO implement verify_decode_jwt(token) method
@INPUTS
token: a json web token (string)
it should be an Auth0 token with key id (kid)
it should verify the token using Auth0 /.well-known/jwks.json
it should decode the payload from the token
it should validate the claims
return the decoded payload
!!NOTE urlopen has a common certificate error described here: https://stackoverflow.com/questions/50236117/scraping-ssl-certificate-verify-failed-error-for-http-en-wikipedia-org
'''
def verify_decode_jwt(token):
jsonurl = urlopen(f'https://{AUTH0_DOMAIN}/.well-known/jwks.json')
jwks = json.loads(jsonurl.read())
unverified_header = jwt.get_unverified_header(token)
rsa_key = {}
if 'kid' not in unverified_header:
raise AuthError({
'code': 'invalid_header',
'description': 'Authorization malformed.'
}, 401)
for key in jwks['keys']:
if key['kid'] == unverified_header['kid']:
rsa_key = {
'kty': key['kty'],
'kid': key['kid'],
'use': key['use'],
'n': key['n'],
'e': key['e']
}
if rsa_key:
try:
payload = jwt.decode(
token,
rsa_key,
algorithms=ALGORITHMS,
audience=API_AUDIENCE,
issuer='https://' + AUTH0_DOMAIN + '/'
)
return payload
except jwt.ExpiredSignatureError:
raise AuthError({
'code': 'token_expired',
'description': 'Token expired.'
}, 401)
except jwt.JWTClaimsError:
raise AuthError({
'code': 'invalid_claims',
'description': 'Incorrect claims. Please, check the audience and issuer.'
}, 401)
except Exception:
raise AuthError({
'code': 'invalid_header',
'description': 'Unable to parse authentication token.'
}, 400)
raise AuthError({
'code': 'invalid_header',
'description': 'Unable to find the appropriate key.'
}, 400)
'''
@TODO implement @requires_auth(pelrmission) decorator method
@INPUTS
permission: string permission (i.e. 'post:drink')
it should use the get_token_auth_header method to get the token
it should use the verify_decode_jwt method to decode the jwt
it should use the check_permissions method validate claims and check the requested permission
return the decorator which passes the decoded payload to the decorated method
'''
def requires_auth(permission=''):
def requires_auth_decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
token = get_token_auth_header()
payload = verify_decode_jwt(token)
print(payload)
print(permission)
check_permissions(permission, payload)
return f(token, *args, **kwargs)
return wrapper
return requires_auth_decorator
| [
"naimish.parikh@gmail.com"
] | naimish.parikh@gmail.com |
e6923ea0892f3397caa1a86538ff202290001c3e | d085cc74e7598bdf9744c2adbbee5c463ae7d54f | /bgt/bgt/wsgi.py | 4fb2a95733c9835f0f12e93cd1a8a04b11f5583a | [] | no_license | connor-richards/britgolfdad.com | 4e71b51faa0abf48956086f184e80e9393112884 | 4aa5a2319b8dd957175461cc4523d666b06daa50 | refs/heads/main | 2023-06-19T12:49:58.935316 | 2021-07-13T19:49:09 | 2021-07-13T19:49:09 | 385,718,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | """
WSGI config for bgt project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bgt.settings')
application = get_wsgi_application()
| [
"connor.richards899@gmail.com"
] | connor.richards899@gmail.com |
18df10d8b1c09bf6663d3185bce769d2c532a8f7 | 8c6816435093cb8e9e45593d3ffdd67028a011b6 | /tests/test_is_palindrome_permutation.py | 8afe1e3ee3486b7078ef4211c354a84d7504048b | [] | no_license | Keeady/daily-coding-challenge | 6ee74a5fe639a1f5b4753dd4848d0696bef15c28 | 31eebbf4c1d0eb88a00f71bd5741adf5e07d0e94 | refs/heads/master | 2020-03-27T07:58:05.713290 | 2019-03-08T15:03:05 | 2019-03-08T15:03:05 | 146,210,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | from String import is_palindrome_permutation
def test_is_palindrome_permutation():
str = 'Tact Coa'
assert True == is_palindrome_permutation.is_palindrome_permutation(str)
str = 'Tact oCoa'
assert True == is_palindrome_permutation.is_palindrome_permutation(str)
str = 'Tact Ca'
assert True == is_palindrome_permutation.is_palindrome_permutation(str)\
str = 'Duck Duck Go'
assert False == is_palindrome_permutation.is_palindrome_permutation(str)
str = 'tactcoapapa'
assert True == is_palindrome_permutation.is_palindrome_permutation(str) | [
"cbevavy@datto.com"
] | cbevavy@datto.com |
1e6091a1a4a76c441f86675983dbc6b2e6409305 | 61f8250b3c0567a460bc7cfd8720e910170bcc4f | /naiveBayseModel/Calculator/calc/generateInputDataBase.py | 2402ede0a33446bb364439e69a9e4e5d2588126b | [] | no_license | Lucas-Armand/murphy-machine-learning-chapter-3-number-game | 85adad4cf88d994efc22939c83ee2185987a02a7 | a6da9e160097c1ab1d31a6b96be36e01fc191b17 | refs/heads/master | 2022-01-23T20:21:16.583021 | 2019-07-31T05:18:27 | 2019-07-31T05:18:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,838 | py | import math
import numpy as np
# Criando inputs:
powers_of_2 = [ index**2 for index in range(1,101) if index**2<100 and index**2>0]
powers_of_3 = [ index**3 for index in range(1,101) if index**3<100 and index**3>0]
powers_of_4 = [ index**4 for index in range(1,101) if index**4<100 and index**4>0]
powers_of_5 = [ index**5 for index in range(1,101) if index**5<100 and index**5>0]
powers_of_6 = [ index**6 for index in range(1,101) if index**6<100 and index**6>0]
powers_of_7 = [ index**7 for index in range(1,101) if index**7<100 and index**7>0]
powers_of_8 = [ index**8 for index in range(1,101) if index**8<100 and index**8>0]
powers_of_9 = [ index**9 for index in range(1,101) if index**9<100 and index**9>0]
powers_of_10 = [ index**10 for index in range(1,101) if index**10<100 and index**10>0]
even = [ 2*index for index in range(1,101) if 2*index<100 and 2*index>0]
odd = [ 2*index-1 for index in range(1,101) if 2*index-1<100 and 2*index-1>0]
factorial = [ math.factorial(index) for index in range(1,101) if math.factorial(index)<100 and math.factorial(index)>0]
ends_in_0 = [ (index)*10 + 0 for index in range(1,101) if (index)*10 + 0<100 and (index)*10 + 0>0]
ends_in_1 = [ (index-1)*10 + 1 for index in range(1,101) if (index-1)*10 + 1<100 and (index-1)*10 + 1>0]
ends_in_2 = [ (index-1)*10 + 2 for index in range(1,101) if (index-1)*10 + 2<100 and (index-1)*10 + 2>0]
ends_in_3 = [ (index-1)*10 + 3 for index in range(1,101) if (index-1)*10 + 3<100 and (index-1)*10 + 3>0]
ends_in_4 = [ (index-1)*10 + 4 for index in range(1,101) if (index-1)*10 + 4<100 and (index-1)*10 + 4>0]
ends_in_5 = [ (index-1)*10 + 5 for index in range(1,101) if (index-1)*10 + 5<100 and (index-1)*10 + 5>0]
ends_in_6 = [ (index-1)*10 + 6 for index in range(1,101) if (index-1)*10 + 6<100 and (index-1)*10 + 6>0]
ends_in_7 = [ (index-1)*10 + 7 for index in range(1,101) if (index-1)*10 + 7<100 and (index-1)*10 + 7>0]
ends_in_8 = [ (index-1)*10 + 8 for index in range(1,101) if (index-1)*10 + 8<100 and (index-1)*10 + 8>0]
ends_in_9 = [ (index-1)*10 + 9 for index in range(1,101) if (index-1)*10 + 9<100 and (index-1)*10 + 9>0]
multiples_of_3 = [ index*3 for index in range(1,101) if index*3<100 and index*3>0]
multiples_of_4 = [ index*4 for index in range(1,101) if index*4<100 and index*4>0]
multiples_of_5 = [ index*5 for index in range(1,101) if index*5<100 and index*5>0]
multiples_of_6 = [ index*6 for index in range(1,101) if index*6<100 and index*6>0]
multiples_of_7 = [ index*7 for index in range(1,101) if index*7<100 and index*7>0]
multiples_of_8 = [ index*8 for index in range(1,101) if index*8<100 and index*8>0]
multiples_of_9 = [ index*9 for index in range(1,101) if index*9<100 and index*9>0]
powers_of_2 = [ 2**index for index in range(1,101) if 2**index<100 and 2**index>0]
powers_of_3 = [ 3**index for index in range(1,101) if 3**index<100 and 3**index>0]
powers_of_4 = [ 4**index for index in range(1,101) if 4**index<100 and 4**index>0]
pi = [3,1,4,1,5,9,2,6,5,2,5,8,9,7,9,3,2,3,8,4,6,2,6]
fibonati = [1,1,2,3,5,8,13,21,34,55,89]
primes = [1,2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]
# transformando sets para o formato table:
powers_of_2 =[powers_of_2.count(i)/len(powers_of_2) for i in range(101)]
powers_of_3 =[powers_of_3.count(i)/len(powers_of_3) for i in range(101)]
powers_of_4 =[powers_of_4.count(i)/len(powers_of_4) for i in range(101)]
powers_of_5 =[powers_of_5.count(i)/len(powers_of_5) for i in range(101)]
powers_of_6 =[powers_of_6.count(i)/len(powers_of_6) for i in range(101)]
powers_of_7 =[powers_of_7.count(i)/len(powers_of_7) for i in range(101)]
powers_of_8 =[powers_of_8.count(i)/len(powers_of_8) for i in range(101)]
powers_of_9 =[powers_of_9.count(i)/len(powers_of_9) for i in range(101)]
powers_of_10 =[powers_of_10.count(i)/len(powers_of_10) for i in range(101)]
even =[even.count(i)/len(even) for i in range(101)]
odd =[odd.count(i)/len(odd) for i in range(101)]
factorial =[factorial.count(i)/len(factorial) for i in range(101)]
ends_in_0 =[ends_in_0.count(i)/len(ends_in_0) for i in range(101)]
ends_in_1 =[ends_in_1.count(i)/len(ends_in_1) for i in range(101)]
ends_in_2 =[ends_in_2.count(i)/len(ends_in_2) for i in range(101)]
ends_in_3 =[ends_in_3.count(i)/len(ends_in_3) for i in range(101)]
ends_in_4 =[ends_in_4.count(i)/len(ends_in_4) for i in range(101)]
ends_in_5 =[ends_in_5.count(i)/len(ends_in_5) for i in range(101)]
ends_in_6 =[ends_in_6.count(i)/len(ends_in_6) for i in range(101)]
ends_in_7 =[ends_in_7.count(i)/len(ends_in_7) for i in range(101)]
ends_in_8 =[ends_in_8.count(i)/len(ends_in_8) for i in range(101)]
ends_in_9 =[ends_in_9.count(i)/len(ends_in_9) for i in range(101)]
multiples_of_3 =[multiples_of_3.count(i)/len(multiples_of_3) for i in range(101)]
multiples_of_4 =[multiples_of_4.count(i)/len(multiples_of_4) for i in range(101)]
multiples_of_5 =[multiples_of_5.count(i)/len(multiples_of_5) for i in range(101)]
multiples_of_6 =[multiples_of_6.count(i)/len(multiples_of_6) for i in range(101)]
multiples_of_7 =[multiples_of_7.count(i)/len(multiples_of_7) for i in range(101)]
multiples_of_8 =[multiples_of_8.count(i)/len(multiples_of_8) for i in range(101)]
multiples_of_9 =[multiples_of_9.count(i)/len(multiples_of_9) for i in range(101)]
pi =[pi.count(i)/len(pi) for i in range(101)]
fibonati =[fibonati.count(i)/len(fibonati) for i in range(101)]
primes =[primes.count(i)/len(primes) for i in range(101)]
# criando objetos
h = Hypoteses(name = "set of powers of 2 integers",prior = 0.1, table = str(powers_of_2))
h.save()
h = Hypoteses(name = "set of powers of 3 integers",prior = 0.01, table = str(powers_of_3))
h.save()
h = Hypoteses(name = "set of powers of 4 integers",prior = 0.01, table = str(powers_of_4))
h.save()
h = Hypoteses(name = "set of powers of 5 integers",prior = 0.01, table = str(powers_of_5))
h.save()
h = Hypoteses(name = "set of powers of 6 integers",prior = 0.01, table = str(powers_of_6))
h.save()
h = Hypoteses(name = "set of powers of 7 integers",prior = 0.01, table = str(powers_of_7))
h.save()
h = Hypoteses(name = "set of powers of 8 integers",prior = 0.01, table = str(powers_of_8))
h.save()
h = Hypoteses(name = "set of powers of 9 integers",prior = 0.01, table = str(powers_of_9))
h.save()
h = Hypoteses(name = "set of powers of 10 integers",prior = 0.01, table = str(powers_of_10))
h.save()
h = Hypoteses(name = "set of even integers",prior = 0.5, table = str(even))
h.save()
h = Hypoteses(name = "set of odd integers",prior = 0.5, table = str(odd))
h.save()
h = Hypoteses(name = "set of factorial integers",prior = 0.1, table = str(factorial))
h.save()
h = Hypoteses(name = "set of ends in 0 integers",prior = 0.1, table = str(ends_in_0))
h.save()
h = Hypoteses(name = "set of ends in 1 integers",prior = 0.1, table = str(ends_in_1))
h.save()
h = Hypoteses(name = "set of ends in 2 integers",prior = 0.1, table = str(ends_in_2))
h.save()
h = Hypoteses(name = "set of ends in 3 integers",prior = 0.1, table = str(ends_in_3))
h.save()
h = Hypoteses(name = "set of ends in 4 integers",prior = 0.1, table = str(ends_in_4))
h.save()
h = Hypoteses(name = "set of ends in 5 integers",prior = 0.1, table = str(ends_in_5))
h.save()
h = Hypoteses(name = "set of ends in 6 integers",prior = 0.1, table = str(ends_in_6))
h.save()
h = Hypoteses(name = "set of ends in 7 integers",prior = 0.1, table = str(ends_in_7))
h.save()
h = Hypoteses(name = "set of ends in 8 integers",prior = 0.1, table = str(ends_in_8))
h.save()
h = Hypoteses(name = "set of ends in 9 integers",prior = 0.1, table = str(ends_in_9))
h.save()
h = Hypoteses(name = "set of multiples of 3 integers",prior = 0.1, table = str(multiples_of_3))
h.save()
h = Hypoteses(name = "set of multiples of 4 integers",prior = 0.1, table = str(multiples_of_4))
h.save()
h = Hypoteses(name = "set of multiples of 5 integers",prior = 0.1, table = str(multiples_of_5))
h.save()
h = Hypoteses(name = "set of multiples of 6 integers",prior = 0.1, table = str(multiples_of_6))
h.save()
h = Hypoteses(name = "set of multiples of 7 integers",prior = 0.1, table = str(multiples_of_7))
h.save()
h = Hypoteses(name = "set of multiples of 8 integers",prior = 0.1, table = str(multiples_of_8))
h.save()
h = Hypoteses(name = "set of multiples of 9 integers",prior = 0.1, table = str(multiples_of_9))
h.save()
h = Hypoteses(name = "set of algorithms pi integers",prior = 0.1, table = str(pi))
h.save()
h = Hypoteses(name = "set of fibonati",prior = 0.1, table = str(fibonati))
h.save()
h = Hypoteses(name = "set of primes",prior = 0.1, table = str(primes))
h.save()
| [
"noreply@github.com"
] | Lucas-Armand.noreply@github.com |
0d8d94a7e0d4531e4f23418512049d17661a3fa8 | 746b445f70568d9560392d32f74db76efb5a5a19 | /botcommands/pi.py | 4b7fed8c377c1928220f8ccea49e16251d2d23f6 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | JokerQyou/bot | 6cbfb5ccea1f38c398c7311e4a7ecc04ef7ee188 | 1d0140c7b7fa7921b37da306f772d1b19654b46d | refs/heads/master | 2020-12-24T06:35:43.738445 | 2015-08-21T14:05:28 | 2015-08-21T14:05:28 | 39,199,031 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | # coding: utf-8
from config import pi_command
@pi_command
def pi(msg=None, debug=False):
pass
| [
"Joker.Qyou@gmail.com"
] | Joker.Qyou@gmail.com |
f940567509d57c12c3c53e8a941436c2f4034e03 | ef5bafe7e740da575e8207684dff492d6b04b6e8 | /python/broadlinkweb/broadlinkweb/__init__.py | 6461fc3a05977c86c39da3862a15979ef7616b30 | [] | no_license | janstenpickle/broadlink-web | 402f591f58b0384aa578d6cefa184e6450779eab | 6bd7c0534069aef4bbcb160cc2cf2d8b664857c9 | refs/heads/master | 2021-01-20T22:11:39.102614 | 2018-01-22T20:18:17 | 2018-01-22T20:53:19 | 101,804,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,575 | py | import os
import broadlink
import yaml
from flask import Flask, g
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
app.config.from_object('broadlinkweb.default_settings')
app.config.from_envvar('BROADLINKWEB_SETTINGS')
if not app.debug:
import logging
from logging.handlers import TimedRotatingFileHandler
# https://docs.python.org/3.6/library/logging.handlers.html#timedrotatingfilehandler
file_handler = TimedRotatingFileHandler(os.path.join(app.config['LOG_DIR'], 'broadlinkweb.log'), 'midnight')
file_handler.setLevel(logging.WARNING)
file_handler.setFormatter(logging.Formatter('<%(asctime)s> <%(levelname)s> %(message)s'))
app.logger.addHandler(file_handler)
def configure_device():
host = (app.config['DEVICE_HOST'], 80)
mac = bytearray.fromhex(app.config['DEVICE_MAC'].replace(':', ''))
device = broadlink.rm(host=host, mac=mac)
device.auth()
return device
def get_device():
if not hasattr(g, 'device'):
g.device = configure_device()
return g.device
def get_data_dir():
return app.config['DATA_DIR']
def get_commands_dir():
return os.path.join(get_data_dir(), 'commands')
def get_macros_dir():
return os.path.join(get_data_dir(), 'macros')
def get_config_file(name):
return os.path.join(get_data_dir(), name + '.yaml')
def get_config(name):
config = open(get_config_file(name), 'r')
return yaml.load(config)
def write_config(name, config):
with open(get_config_file(name), 'w') as config_file:
yaml.dump(data, config)
import broadlinkweb.views
| [
"jansen.chris@gmail.com"
] | jansen.chris@gmail.com |
da72ad4262e7f932b872b51eb8316e48381f5d2d | 528bd6c511b07472b1c6cc00a9ff91f6faa9dbcc | /libvirt/depl2/nginx/apps/lotylda_mng/modules/users_mod.py | 573c249949f941e5ce5ee15944f54c36a7cb4b9d | [] | no_license | tbots/docs | 15448fdfdcb10b50a958de262e2c5f9396d9bb76 | 8443141926cf4bd3949d94eae20f70963802ab5b | refs/heads/master | 2020-08-17T03:16:14.686338 | 2019-10-12T16:34:18 | 2019-10-12T16:34:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,511 | py | # -*- coding: utf-8 -*-
from gluon import current
import mng
import copy
def getUserItems(project_id, user_id):
'''Return dic of user's dasbhboards, reports, measures by user_id'''
items_dic = {'dasbhboards':[],
'reports':[],
'measures':[]
}
items_dic_empty = copy.deepcopy(items_dic)
# get users dashboards
dashs_q = "SELECT dashboard_id, dashboard_name FROM userspace.dashboards WHERE created_user = '%s'"%user_id
dashs = mng.executeRemoteSQL(project_id, dashs_q , True, True, False)
if len(dashs):
for d in dashs:
items_dic['dasbhboards'].append((d['dashboard_id'],d['dashboard_name']))
# get users reports
reps_q = "SELECT report_id, report_name FROM userspace.reports WHERE created_user = '%s'"%user_id
reps = mng.executeRemoteSQL(project_id, reps_q , True, True, False)
if len(reps):
for r in reps:
items_dic['reports'].append((r['report_id'],r['report_name']))
# get users measures
meas_q = "SELECT measure_id, measure_name FROM engine.measures WHERE created_user = '%s'"%user_id
meas = mng.executeRemoteSQL(project_id, meas_q , True, True, False)
if len(dashs):
for m in meas:
items_dic['measures'].append((m['measure_id'],m['measure_name']))
if items_dic == items_dic_empty:
return {}
else:
return items_dic
| [
"oleg.sergiyuk@gmail.com"
] | oleg.sergiyuk@gmail.com |
d6974c5cab337554aae9ea6e6488efef275a738b | 075f4add816176981ae5744ee806e5fb3d99952d | /wisdompets/adoptions/templatetags/adoptionsfilters.py | 2826c39c696098e93cda5e6dfc271cec4049729f | [] | no_license | myanryers/lil-django | 90d6f885e33d0d0e3ab23f53517e541f5c473c67 | 0cf9dcddecb3316c41a08742791b205763ed812a | refs/heads/main | 2023-06-26T16:47:36.066076 | 2021-07-28T16:30:36 | 2021-07-28T16:30:36 | 386,709,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | from django import template
register = template.Library()
@register.filter
def get_attr(obj, attr):
"""Gets `attr` on the given class object `obj`."""
return getattr(obj, attr, None)
@register.filter
def split(value, delim=" "):
"""Split the `value` string on `delim`."""
return value.split(delim)
| [
"myersry@us.ibm.com"
] | myersry@us.ibm.com |
bf220ec9e1b3b975f52a72cf90820138d48076d9 | 6e046530f2b1af0427eb62ea5dfbaa22d5237d13 | /gammaDIME.py | 5f729d7f9fd14c83201d4aa9df356454336a8101 | [] | no_license | tonellolab/capacity-approaching-autoencoders | 6280ed3e2e1cd4f84849560b5cacb62c6ef81b0e | 8cbee1c3ef397e43c31e92d1c4daff6223a950a8 | refs/heads/master | 2023-07-14T16:17:03.702755 | 2021-08-16T15:06:40 | 2021-08-16T15:06:40 | 358,267,025 | 4 | 3 | null | 2021-08-16T15:06:40 | 2021-04-15T13:21:37 | Python | UTF-8 | Python | false | false | 256 | py | from keras import backend as K
# gamma-DIME loss
def gamma_dime_loss(args):
# define the parameter gamma
gamma = 1
t_xy = args[0]
t_xy_bar = args[1]
loss = -(gamma*K.mean(K.log(t_xy)) - K.mean(K.pow(t_xy_bar, gamma))+1)
return loss | [
"noreply@github.com"
] | tonellolab.noreply@github.com |
5d298ff21e6afdadc21bc2fbe07ae4710da9daab | 38c56fc953fe0357c37583adccb4e0f4a2527889 | /tutorial/snippets/serializers.py | 2cf614b0e3a3ad79b43a3922455a17e34f9bb093 | [] | no_license | GritTsai/Quanto | 8ff8c6c8e37eb5f63a153ec81cd8e28a1477b843 | 37c5f2769d1da406d9a901faa4e364027fdf9d69 | refs/heads/master | 2021-01-23T02:00:24.335310 | 2018-02-05T16:11:01 | 2018-02-05T16:11:01 | 85,957,943 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | from rest_framework import serializers
from snippets.models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES
from django.contrib.auth.models import User
class SnippetSerializer(serializers.ModelSerializer):
class Meta:
model = Snippet
fields = ('id', 'title', 'code', 'linenos', 'language', 'style')
owner = serializers.ReadOnlyField(source='owner.username')
class UserSerializer(serializers.ModelSerializer):
snippets = serializers.PrimaryKeyRelatedField(many=True, queryset=Snippet.objects.all())
class Meta:
model = User
fields = ('id', 'username', 'snippets') | [
"role0523@gmail.com"
] | role0523@gmail.com |
34169bde45dda89682996308a8ccec44842e2387 | 3d6faaf3df00343d43a595ec30ecae9d7f4e7d79 | /lib/assertions.py | 19357c98cf43c355ed20596d2161249df9cf0e36 | [] | no_license | softester-git/LearnQA_PythonAPI | 5b87847f2d8083206b2df5699ce7863eb07c64c8 | 9797d724f6f352a1a33253accc71347c476f7690 | refs/heads/master | 2023-08-11T07:30:12.123582 | 2021-10-09T04:37:26 | 2021-10-09T04:37:26 | 406,547,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,288 | py | from requests import Response
import json
class Assertions:
@staticmethod
def assert_json_value_by_name(response: Response, name, expected_value, error_message):
try:
response_as_dict = response.json()
except json.JSONDecodeError:
assert False, f"Response is not in JSON format. Response text is '{response.text}'"
assert name in response_as_dict, f"Response JSON does not have key {name}"
assert response_as_dict[name] == expected_value, error_message
@staticmethod
def assert_json_has_key(response: Response, name):
try:
response_as_dict = response.json()
except json.JSONDecodeError:
assert False, f"Response is not in JSON format. Response text is '{response.text}'"
assert name in response_as_dict, f"Response JSON does not have key {name}"
@staticmethod
def assert_json_has_keys(response: Response, names: list):
try:
response_as_dict = response.json()
except json.JSONDecodeError:
assert False, f"Response is not in JSON format. Response text is '{response.text}'"
for name in names:
assert name in response_as_dict, f"Response JSON does not have key {name}"
@staticmethod
def assert_json_has_no_key(response: Response, name):
try:
response_as_dict = response.json()
except json.JSONDecodeError:
assert False, f"Response is not in JSON format. Response text is '{response.text}'"
assert name not in response_as_dict, f"Response JSON shouldn`t not have key {name}. But it`s present"
@staticmethod
def assert_json_has_no_keys(response: Response, names: list):
try:
response_as_dict = response.json()
except json.JSONDecodeError:
assert False, f"Response is not in JSON format. Response text is '{response.text}'"
for name in names:
assert name not in response_as_dict, f"Response JSON does not have key {name}"
@staticmethod
def assert_code_status(response: Response, expected_status_code):
assert response.status_code == expected_status_code, \
f"Unexpected status code. Expected {expected_status_code}. Actual {response.status_code}" | [
"softester@yandex.ru"
] | softester@yandex.ru |
924ae65f20072b085f7fdd454b10903b4ecfb6c2 | 73b7eb7d35080dc651c5a175dc93c9a963dd72d7 | /funciones_lambda/lambda_uno.py | f4e162a7b228673d1bf8b8bc56e8e5fa1a91aa0f | [] | no_license | jegiraldp/python | 90e0eea855f7fe30e11e91eb1fd735c33f1eeb48 | 440348356c86a957bd65c9f6ad632c557c2dbc3e | refs/heads/master | 2023-09-03T17:54:35.423108 | 2023-08-13T23:32:08 | 2023-08-13T23:32:08 | 159,452,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | def cuadrados(num):
return num ** 2
print(cuadrados(3))
cuadrado= lambda x: x**2
print(cuadrado(2))
lambdaUno = lambda x: True if(x**2) >=10 else False
print(lambdaUno(5)) | [
"jegiraldp@gmail.com"
] | jegiraldp@gmail.com |
448999205226670ad837088b271340afbbc8ca80 | dc03f8304a56ec41b57086bf1412e36224048b5c | /run_visualizations_novelty.py | c379dfe427edd4612823b2ef0181d0c346c3b507 | [] | no_license | raulsenaferreira/PRDC_2021_Evaluation_module | 715001948ff72a5e9443004429919a83f78074f8 | 896125b7bfccb5ecf5482a1216bcc657764590ab | refs/heads/main | 2023-08-27T11:57:00.132854 | 2021-10-18T19:24:17 | 2021-10-18T19:24:17 | 301,345,580 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 21,273 | py | import os
import argparse
from config import plot_pos_neg_comparison
from config import eval_sm_performance
from config import eval_sm_impact_on_the_system
from tensorflow.keras.datasets import cifar10, mnist
from src import plot_functions as pf
from src import util
import neptune_config as npte
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("config_id", type=int, help="ID for a set of pre-defined visualizations")
parser.add_argument("path_for_saving_plots", help="Root path for saving visualizations")
args = parser.parse_args()
exp_type = 'novelty_detection'
names = ["ALOOC", "OOB", "ODIN"]
classes_ID_OOD = [
[43, 62], # gtsrb + btsc
[43, 10], # gtsrb + cifar10
[10, 43] # cifar10 + gtsrb
]
# gtsrb+btsc = 26600 images for test (4005 ood instances)
# gtsrb+cifar10 = 72600 images for test (60000 ood instances)
# cifar10+gtsrb = 61800 images for test (51800 ood instances)
#ploting statistics
if args.config_id == 0:
num_datasets = 6 # benchmark datasets
# avg ranks:
# ALOOC = 3 + 3 + 3
# OOB = 1 + 2 + 2
# ODIN = 1 + 1 + 2
avranks = [3, 1.66, 1.33]
pf.plot_critical_difference(names, avranks, num_datasets)
# experiments ODIN
if args.config_id == 1:
instances = [26600, 72600, 61800]
arr_exp = [
'NOV-10', # cifar10 + gtsrb
'NOV-12', # gtsrb + btsc
'NOV-13' # gtsrb + cifar10
]
path_for_saving_plots = os.path.join(args.path_for_saving_plots, exp_type)
path_for_load_neptune = exp_type.replace('_', '-') # correcting for loading neptune experiments
project = npte.neptune_init(path_for_load_neptune)
experiments = project.get_experiments(arr_exp)
# readouts
arr_ml_pred = util.load_artifact('arr_classification_pred.npy', experiments)
arr_ml_true = util.load_artifact('arr_classification_true.npy', experiments)
arr_detection_SM = util.load_artifact('arr_detection_SM.npy', experiments)
arr_detection_true = util.load_artifact('arr_detection_true.npy', experiments)
arr_reaction_SM = util.load_artifact('arr_reaction_SM.npy', experiments)
arr_reaction_true = util.load_artifact('arr_reaction_true.npy', experiments)
readouts_ML = [arr_ml_pred, arr_ml_true]
readouts_SM_detection = [arr_detection_SM, arr_detection_true]
readouts_SM_reaction = [arr_reaction_SM, arr_reaction_true]
#############################
######## one dataset each time
#############################
#'''
exp_id = 0
indices_experiments = [0]
for i in range(3):
# SM's impact on the system
label = 'table_{}'.format(7+i)
caption = 'Table {}: comparing the impact of ODIN-based monitors for the datasets divided into ID and OOD.'.format(7+i)
classes_ID, classes_OOD = classes_ID_OOD[i][0], classes_ID_OOD[i][1]
eval_sm_impact_on_the_system.plot1([i], classes_ID, classes_OOD, readouts_ML, readouts_SM_reaction,
names, label, caption, path_for_saving_plots, is_pytorch=True)
#############################
# SM results
label = 'table_{}'.format(10+i)
caption = 'Table {}: comparing ODIN-based monitors for the datasets divided into ID and OOD.'.format(10+i)
eval_sm_performance.plot1([i], readouts_SM_detection,
names, label, caption, path_for_saving_plots)
#############################
#############################
#'''
'''
#############################
# results across datasets: gtsrb + btsc, cifar10 + gtsrb, gtsrb + cifar10
#############################
arr_exp_dict = {}
arr_exp_dict.update({names[0]: [experiments[0]]}) #gtsrb + btsc
arr_exp_dict.update({names[1]: [experiments[1]]}) #cifar10 + gtsrb
arr_exp_dict.update({names[2]: [experiments[2]]}) #gtsrb + cifar10
# SM's impact
eval_sm_impact_on_the_system.plot2(arr_exp_dict, names, path_for_saving_plots)
#############################
# SM results
eval_sm_performance.plot2(arr_exp_dict, path_for_saving_plots)
#############################
#############################
'''
#############################
# Specific metrics for ID x OOD detection from the SM
#eval_sm_performance.plot3(arr_exp, names, label, caption, path_for_saving_plots, path_for_load_neptune)
#############################
#############################
'''
# varables for plot_B
datasets = ['GTSRB', 'CIFAR-10']
#indices_experiments = {datasets[0]: [0, 1, 2, 3], datasets[1]: [4, 5, 6, 7]}
indices_experiments = {datasets[0]: [4, 5, 6, 7], datasets[1]: [8, 9, 10, 11]}
classes_ID = {datasets[0]: 43, datasets[1]: 10}
label, caption = 'table_99', 'Table 1: MCC for measuring the overall impact of data-based SM in the system.'
eval_sm_impact_on_the_system.plot1_B(datasets, indices_experiments, classes_ID, readouts_ML, readouts_SM_reaction,
names, label, caption, path_for_saving_plots)
'''
#plot_pos_neg_comparison.plot(arr_exp, names, caption, path_for_saving_plots)
'''
(x_train, y_train), (x_test, y_test) = cifar10.load_data() # mnist.load_data()
dataset_name = 'CIFAR-10' # 'MNIST'
model_name = 'resNet_'+dataset_name+'.h5' # 'leNet_'+dataset_name+'.h5'
#path to load the model
models_folder = os.path.join("aux_data", "temp")
model_file = os.path.join(models_folder, model_name)
pf.visualize_distributions(x_train, y_train, dataset_name, model_file)
'''
#pf.visualize_pair_distributions(x_train, y_train, dataset_name, model_file\
# x_train_2, y_train_2, dataset_name_2, model_file_2)
# experiments OOB
elif args.config_id == 2:
a = [26600]*12
b = [72600]*12
c = [61800]*12
instances = np.concatenate((a,b,c), axis=None)
#print(len(instances))
arr_exp1 = [
## gtsrb + btsc
'NOV-50', # simple + 17 + 1
'NOV-51', # simple + 17 + 1.1
'NOV-52', # simple + 17 + 1.35
'NOV-53', # simple + 3 + 1
'NOV-54', # isomap + 3 + 1
'NOV-55', # pca + 3 + 1
'NOV-56', # simple + 3 + 1.1
'NOV-57', # isomap + 3 + 1.1
'NOV-58', # pca + 3 + 1.1
'NOV-59', # simple + 3 + 1.35
'NOV-60', # isomap + 3 + 1.35
'NOV-61' # pca + 3 + 1.35
]
names1 = ['simple + 17 + 1', 'simple + 17 + 1.1', 'simple + 17 + 1.35', 'simple + 3 + 1', 'isomap + 3 + 1', 'pca + 3 + 1',
'simple + 3 + 1.1','isomap + 3 + 1.1', 'pca + 3 + 1.1', 'simple + 3 + 1.35', 'isomap + 3 + 1.35', 'pca + 3 + 1.35']
arr_exp2 = [
## gtsrb + cifar10
'NOV-7', # simple + 3 + 1
'NOV-8', # isomap + 3 + 1
'NOV-9', # pca + 3 + 1
'NOV-23', # simple + 0 + 1.1
'NOV-24', # isomap + 0 + 1.1
'NOV-25', # pca + 0 + 1.1
'NOV-26', # simple + 0 + 1
'NOV-27', # isomap + 0 + 1
'NOV-28', # pca + 0 + 1
'NOV-32', # simple + 0 + 1.35
'NOV-33', # isomap + 0 + 1.35
'NOV-34' # pca + 0 + 1.35
]
names2 = ['simple + 3 + 1', 'isomap + 3 + 1', 'pca + 3 + 1', 'simple + 0 + 1.1', 'isomap + 0 + 1.1', 'pca + 0 + 1.1', 'simple + 0 + 1',
'isomap + 0 + 1', 'pca + 0 + 1', 'simple + 0 + 1.35', 'isomap + 0 + 1.35', 'pca + 0 + 1.35']
arr_exp3 = [
## cifar10 + gtsrb
'NOV-4', # simple + 3 + 1
'NOV-5', # isomap + 3 + 1
'NOV-6', # pca + 3 + 1
'NOV-41', # simple + 5 + 1
'NOV-42', # isomap + 5 + 1
'NOV-43', # pca + 5 + 1
'NOV-44', # simple + 5 + 1.1
'NOV-45', # isomap + 5 + 1.1
'NOV-46', # pca + 5 + 1.1
'NOV-47', # simple + 5 + 1.35
'NOV-48', # isomap + 5 + 1.35
'NOV-49' # pca + 5 + 1.35
]
names3 = ['simple + 3 + 1', 'isomap + 3 + 1', 'pca + 3 + 1', 'simple + 5 + 1', 'isomap + 5 + 1', 'pca + 5 + 1', 'simple + 5 + 1.1',
'isomap + 5 + 1.1', 'pca + 5 + 1.1', 'simple + 5 + 1.35', 'isomap + 5 + 1.35', 'pca + 5 + 1.35']
path_for_saving_plots = os.path.join(args.path_for_saving_plots, exp_type)
path_for_load_neptune = exp_type.replace('_', '-') # correcting for loading neptune experiments
project = npte.neptune_init(path_for_load_neptune)
indices_experiments = list(range(0, 12))
#############################
######## gtsrb + btsc
#############################
dataset = 'GTSRB + BTSC'
exp_id = 0
experiments = project.get_experiments(arr_exp1)
# readouts
arr_ml_pred = util.load_artifact('arr_classification_pred.npy', experiments)
arr_ml_true = util.load_artifact('arr_classification_true.npy', experiments)
arr_detection_SM = util.load_artifact('arr_detection_SM.npy', experiments)
arr_detection_true = util.load_artifact('arr_detection_true.npy', experiments)
arr_reaction_SM = util.load_artifact('arr_reaction_SM.npy', experiments)
arr_reaction_true = util.load_artifact('arr_reaction_true.npy', experiments)
'''
arr_ml_time = util.load_values(experiments)
arr_sm_time = util.load_values(experiments)
arr_total_time = util.load_values(experiments)
arr_total_memory = util.load_values(experiments)
'''
readouts_ML = [arr_ml_pred, arr_ml_true]
readouts_SM_detection = [arr_detection_SM, arr_detection_true]
readouts_SM_reaction = [arr_reaction_SM, arr_reaction_true]
# SM's impact on the system
label = 'table_1'
caption = 'Table 1: comparing the impact of data-based monitors for GTSRB as ID dataset, and BTSC as OOD dataset.'
classes_ID, classes_OOD = classes_ID_OOD[exp_id][0], classes_ID_OOD[exp_id][1]
eval_sm_impact_on_the_system.plot1(indices_experiments, classes_ID, classes_OOD, readouts_ML, readouts_SM_reaction,
names1, label, caption, path_for_saving_plots)
#############################
# SM results
label = 'table_4'
caption = 'Table 4: comparing data-based monitors for GTSRB as ID dataset, and BTSC as OOD dataset.'
eval_sm_performance.plot1(indices_experiments, readouts_SM_detection,
names1, label, caption, path_for_saving_plots)
# Time
#readouts_time = util.load_time_info(experiments, instances, indices_experiments, path_for_saving_plots, dataset, names)
#############################
#############################
#############################
######## gtsrb + cifar10
#############################
dataset = 'GTSRB + CIFAR-10'
exp_id = 1
experiments = project.get_experiments(arr_exp2)
# readouts
arr_ml_pred = util.load_artifact('arr_classification_pred.npy', experiments)
arr_ml_true = util.load_artifact('arr_classification_true.npy', experiments)
arr_detection_SM = util.load_artifact('arr_detection_SM.npy', experiments)
arr_detection_true = util.load_artifact('arr_detection_true.npy', experiments)
arr_reaction_SM = util.load_artifact('arr_reaction_SM.npy', experiments)
arr_reaction_true = util.load_artifact('arr_reaction_true.npy', experiments)
'''
arr_ml_time = util.load_values(experiments)
arr_sm_time = util.load_values(experiments)
arr_total_time = util.load_values(experiments)
arr_total_memory = util.load_values(experiments)
'''
readouts_ML = [arr_ml_pred, arr_ml_true]
readouts_SM_detection = [arr_detection_SM, arr_detection_true]
readouts_SM_reaction = [arr_reaction_SM, arr_reaction_true]
# SM's impact on the system
label = 'table_2'
caption = 'Table 2: comparing the impact of data-based monitors for GTSRB as ID dataset, and CIFAR-10 as OOD dataset.'
classes_ID, classes_OOD = classes_ID_OOD[exp_id][0], classes_ID_OOD[exp_id][1]
eval_sm_impact_on_the_system.plot1(indices_experiments, classes_ID, classes_OOD, readouts_ML, readouts_SM_reaction,
names2, label, caption, path_for_saving_plots)
#############################
# SM results
label = 'table_5'
caption = 'Table 5: comparing data-based monitors for GTSRB as ID dataset, and CIFAR-10 as OOD dataset.'
eval_sm_performance.plot1(indices_experiments, readouts_SM_detection,
names2, label, caption, path_for_saving_plots)
# Time
#readouts_time = util.load_time_info(experiments, instances, indices_experiments, path_for_saving_plots, dataset, names)
#############################
#############################
#############################
######## cifar10 + gtsrb
#############################
dataset = 'CIFAR-10 + GTSRB'
#indices_experiments = list(range(0, 10))
exp_id = 2
experiments = project.get_experiments(arr_exp3)
# readouts
arr_ml_pred = util.load_artifact('arr_classification_pred.npy', experiments)
arr_ml_true = util.load_artifact('arr_classification_true.npy', experiments)
arr_detection_SM = util.load_artifact('arr_detection_SM.npy', experiments)
arr_detection_true = util.load_artifact('arr_detection_true.npy', experiments)
arr_reaction_SM = util.load_artifact('arr_reaction_SM.npy', experiments)
arr_reaction_true = util.load_artifact('arr_reaction_true.npy', experiments)
'''
arr_ml_time = util.load_values(experiments)
arr_sm_time = util.load_values(experiments)
arr_total_time = util.load_values(experiments)
arr_total_memory = util.load_values(experiments)
'''
readouts_ML = [arr_ml_pred, arr_ml_true]
readouts_SM_detection = [arr_detection_SM, arr_detection_true]
readouts_SM_reaction = [arr_reaction_SM, arr_reaction_true]
# SM's impact on the system
label = 'table_3'
caption = 'Table 3: comparing the impact of data-based monitors for CIFAR-10 as ID dataset, and GTSRB as OOD dataset.'
classes_ID, classes_OOD = classes_ID_OOD[exp_id][0], classes_ID_OOD[exp_id][1]
eval_sm_impact_on_the_system.plot1(indices_experiments, classes_ID, classes_OOD, readouts_ML, readouts_SM_reaction,
names3, label, caption, path_for_saving_plots)
#############################
# SM results
label = 'table_6'
caption = 'Table 6: comparing data-based monitors for CIFAR-10 as ID dataset, and GTSRB as OOD dataset.'
eval_sm_performance.plot1(indices_experiments, readouts_SM_detection,
names3, label, caption, path_for_saving_plots)
# Time
#readouts_time = util.load_time_info(experiments, instances, indices_experiments, path_for_saving_plots, dataset, names)
#############################
#############################
'''
#############################
# results across datasets: gtsrb + btsc, cifar10 + gtsrb, gtsrb + cifar10
#############################
arr_exp_dict = {}
arr_exp_dict.update({names[0]: [experiments[0], experiments[4], experiments[8]]}) #oob
arr_exp_dict.update({names[1]: [experiments[1], experiments[5], experiments[9]]}) #oob isomap
arr_exp_dict.update({names[2]: [experiments[2], experiments[6], experiments[10]]}) #oob pca
arr_exp_dict.update({names[3]: [experiments[3], experiments[7], experiments[11]]}) #odin
# SM's impact
#eval_sm_impact_on_the_system.plot2(arr_exp_dict, names, path_for_saving_plots)
#############################
# SM results
#eval_sm_performance.plot2(arr_exp_dict, path_for_saving_plots)
#############################
#############################
#############################
# Specific metrics for ID x OOD detection from the SM
#eval_sm_performance.plot3(arr_exp, names, label, caption, path_for_saving_plots, path_for_load_neptune)
#############################
#############################
# variables for plot_B
datasets = ['GTSRB', 'CIFAR-10']
#indices_experiments = {datasets[0]: [0, 1, 2, 3], datasets[1]: [4, 5, 6, 7]}
indices_experiments = {datasets[0]: [4, 5, 6, 7], datasets[1]: [8, 9, 10, 11]}
classes_ID = {datasets[0]: 43, datasets[1]: 10}
label, caption = 'table_99', 'Table 1: MCC for measuring the overall impact of data-based SM in the system.'
eval_sm_impact_on_the_system.plot1_B(datasets, indices_experiments, classes_ID, readouts_ML, readouts_SM_reaction,
names, label, caption, path_for_saving_plots)
'''
#plot_pos_neg_comparison.plot(arr_exp, names, caption, path_for_saving_plots)
'''
(x_train, y_train), (x_test, y_test) = cifar10.load_data() # mnist.load_data()
dataset_name = 'CIFAR-10' # 'MNIST'
model_name = 'resNet_'+dataset_name+'.h5' # 'leNet_'+dataset_name+'.h5'
#path to load the model
models_folder = os.path.join("aux_data", "temp")
model_file = os.path.join(models_folder, model_name)
pf.visualize_distributions(x_train, y_train, dataset_name, model_file)
'''
#pf.visualize_pair_distributions(x_train, y_train, dataset_name, model_file\
# x_train_2, y_train_2, dataset_name_2, model_file_2)
# alooc
elif args.config_id == 3:
arr_exp = [
## cifar10 + gtsrb
'NOV-62', # rmsprop
'NOV-63', # adam
## gtsrb + cifar10
'NOV-64', # rmsprop
'NOV-65', # adam
## gtsrb + btsc
'NOV-66', # rmsprop
'NOV-67' # adam
]
instances = [61800, 61800, 72600, 72600, 26600, 26600]
path_for_saving_plots = os.path.join(args.path_for_saving_plots, exp_type)
path_for_load_neptune = exp_type.replace('_', '-') # correcting for loading neptune experiments
project = npte.neptune_init(path_for_load_neptune)
experiments = project.get_experiments(arr_exp)
# readouts
arr_ml_pred = util.load_artifact('arr_classification_pred.npy', experiments)
arr_ml_true = util.load_artifact('arr_classification_true.npy', experiments)
arr_detection_SM = util.load_artifact('arr_detection_SM.npy', experiments)
arr_detection_true = util.load_artifact('arr_detection_true.npy', experiments)
arr_reaction_SM = util.load_artifact('arr_reaction_SM.npy', experiments)
arr_reaction_true = util.load_artifact('arr_reaction_true.npy', experiments)
readouts_ML = [arr_ml_pred, arr_ml_true]
readouts_SM_detection = [arr_detection_SM, arr_detection_true]
readouts_SM_reaction = [arr_reaction_SM, arr_reaction_true]
#############################
######## gtsrb + btsc
#############################
dataset = 'CIFAR-10 + GTSRB'
exp_id = 0
names = ['rmsprop', 'adam']
indices_experiments = list(range(0, 2))
# SM's impact on the system
label = 'table_7'
caption = 'Table 7: comparing the impact of alooc-based monitors for CIFAR-10 as ID dataset, and GTSRB as OOD dataset.'
classes_ID, classes_OOD = classes_ID_OOD[exp_id][0], classes_ID_OOD[exp_id][1]
eval_sm_impact_on_the_system.plot1(indices_experiments, classes_ID, classes_OOD, readouts_ML, readouts_SM_reaction,
names, label, caption, path_for_saving_plots)
#############################
# SM results
label = 'table_10'
caption = 'Table 10: comparing alooc-based monitors for CIFAR-10 as ID dataset, and GTSRB as OOD dataset.'
eval_sm_performance.plot1(indices_experiments, readouts_SM_detection,
names, label, caption, path_for_saving_plots)
# Time
readouts_time = util.load_time_info(experiments, instances, indices_experiments, path_for_saving_plots, dataset, names)
#############################
#############################
#############################
######## gtsrb + cifar10
#############################
dataset = 'GTSRB + CIFAR-10'
exp_id = 1
names = ['rmsprop', 'adam']
indices_experiments = list(range(2, 4))
# SM's impact on the system
label = 'table_8'
caption = 'Table 8: comparing the impact of alooc-based monitors for GTSRB as ID dataset, and CIFAR-10 as OOD dataset.'
classes_ID, classes_OOD = classes_ID_OOD[exp_id][0], classes_ID_OOD[exp_id][1]
eval_sm_impact_on_the_system.plot1(indices_experiments, classes_ID, classes_OOD, readouts_ML, readouts_SM_reaction,
names, label, caption, path_for_saving_plots)
#############################
# SM results
label = 'table_11'
caption = 'Table 11: comparing alooc-based monitors for GTSRB as ID dataset, and CIFAR-10 as OOD dataset.'
eval_sm_performance.plot1(indices_experiments, readouts_SM_detection,
names, label, caption, path_for_saving_plots)
# Time
readouts_time = util.load_time_info(experiments, instances, indices_experiments, path_for_saving_plots, dataset, names)
#############################
#############################
#############################
######## cifar10 + gtsrb
#############################
dataset = 'GTSRB + BTSC'
exp_id = 2
names = ['rmsprop', 'adam']
indices_experiments = list(range(4, 6))
# SM's impact on the system
label = 'table_9'
caption = 'Table 9: comparing the impact of alooc-based monitors for GTSRB as ID dataset, and BTSC as OOD dataset.'
classes_ID, classes_OOD = classes_ID_OOD[exp_id][0], classes_ID_OOD[exp_id][1]
eval_sm_impact_on_the_system.plot1(indices_experiments, classes_ID, classes_OOD, readouts_ML, readouts_SM_reaction,
names, label, caption, path_for_saving_plots)
#############################
# SM results
label = 'table_12'
caption = 'Table 12: comparing alooc-based monitors for GTSRB as ID dataset, and BTSC as OOD dataset.'
eval_sm_performance.plot1(indices_experiments, readouts_SM_detection,
names, label, caption, path_for_saving_plots)
# Time
readouts_time = util.load_time_info(experiments, instances, indices_experiments, path_for_saving_plots, dataset, names)
#############################
############################# | [
"raulsenaferreira@gmail.com"
] | raulsenaferreira@gmail.com |
1556be169d22189943f753594145fa1744b6e4c6 | de0341aef4a487d1be271fc2bc3b3b60258ef6b0 | /programmers/Level 3/길 찾기 게임/solve 복사본.py | 1e15158ddbf3d18c418d9dfaebc5cb5c21cbdcb6 | [] | no_license | aver1001/github-practice | 485d8695cd4b9aa374c6b069832b3c0999fc4b6c | 62ab6de80e8246b627b880a7aff5d668b0fea889 | refs/heads/main | 2023-08-24T09:49:35.498578 | 2021-10-13T23:57:18 | 2021-10-13T23:57:18 | 379,813,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,535 | py | preorders = []
postorders = []
class Node :
def __init__ (self,data,left = None, right = None):
self.data = data
self.left = left
self.right = right
def preorder(node):
if node == None:
return
preorders.append(node.data[0])
preorder(node.left)
preorder(node.right)
def postorder(node):
if node == None:
return
postorder(node.left)
postorder(node.right)
postorders.append(node.data[0])
def solution(nodeinfo):
table = []
for idx, (x,y) in enumerate(nodeinfo,start = 1):
table.append([idx,x,y])
table.sort(key = lambda x:(-x[2],x[1]))
root = Node(table.pop(0))
for idx,x,y in table:
cur_node = root
## 왼쪽
while(True):
if cur_node.data[1] > x:
## 이미 누가 있으면
if cur_node.left:
cur_node = cur_node.left
continue
else:
cur_node.left = Node([idx,x,y])
break
## 오른쪽
elif cur_node.data[1] < x:
## 이미 누가 있으면
if cur_node.right:
cur_node = cur_node.right
continue
else:
cur_node.right = Node([idx,x,y])
break
preorder(root)
postorder(root)
return [preorders,postorders]
solution([[5,3],[11,5],[13,3],[3,5],[6,1],[1,3],[8,6],[7,2],[2,2]],)
| [
"69618305+aver1001@users.noreply.github.com"
] | 69618305+aver1001@users.noreply.github.com |
afb152b826001990479508de7d7e34cea0e4f637 | 50b886e9565468e74a64a5563ef1d9f65f393fb1 | /problem_5.py | e6250dabb8a7b1d8ffd3404dc87ca4f9f24d817e | [] | no_license | ben-hunter-hansen/ProjectEuler | 8f4a3131a09b95118b0d9336230202e704646e90 | 5a307a71bf12b0f41fb9738334b8a9b446d51859 | refs/heads/master | 2021-01-17T09:01:35.451297 | 2016-03-30T02:09:12 | 2016-03-30T02:09:12 | 37,669,575 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | # Project Euler Problem #5
# -----------------------------------------
# 2520 is the smallest number that can be divided by each of the numbers
# from 1 to 10 without any remainder.
#
# What is the smallest positive number that is
# evenly divisible by all of the numbers from 1 to 20?
from functools import reduce
def divide_evenly_from(n,min,max):
if max == min:
return n
if n % max == 0:
return divide_evenly_from(n,min,max-1)
def main():
all_nums = [i for i in range(1,21)]
product = reduce(lambda a,b: a * b, all_nums)
for i in xrange(1,product):
found_smallest = divide_evenly_from(i,1,20)
if found_smallest:
print "smallest positive number divisible by all numbers 1-20 is:"
print i
break
main()
| [
"ben.hunter.hansen@gmail.com"
] | ben.hunter.hansen@gmail.com |
5f7a882ac493f5606e6abb240272852b980809e0 | bd053d2bf5444ab8f0b8b0ff56772fa75281e38d | /pennylane/ops/qubit/parametric_ops.py | 555e4e926946ab402e54ca0a390ea633b1db97ed | [
"Apache-2.0"
] | permissive | johannesjmeyer/pennylane | bcb762583e95537b04a9b38756369571f957d2e5 | 8f602312baea107d5248267fb3dc1593722810e0 | refs/heads/master | 2023-07-11T18:21:31.086858 | 2021-08-14T19:21:42 | 2021-08-14T19:21:42 | 341,190,636 | 3 | 1 | Apache-2.0 | 2021-06-16T09:01:58 | 2021-02-22T12:19:10 | Python | UTF-8 | Python | false | false | 40,493 | py | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This submodule contains the discrete-variable quantum operations that are the
core parameterized gates.
"""
# pylint:disable=abstract-method,arguments-differ,protected-access
import cmath
import functools
import math
import numpy as np
import pennylane as qml
from pennylane.operation import AnyWires, DiagonalOperation, Operation
from pennylane.ops.qubit.non_parametric_ops import PauliX, PauliY, PauliZ, Hadamard
from pennylane.templates.decorator import template
from pennylane.utils import expand, pauli_eigs
from pennylane.wires import Wires
INV_SQRT2 = 1 / math.sqrt(2)
class RX(Operation):
r"""RX(phi, wires)
The single qubit X rotation
.. math:: R_x(\phi) = e^{-i\phi\sigma_x/2} = \begin{bmatrix}
\cos(\phi/2) & -i\sin(\phi/2) \\
-i\sin(\phi/2) & \cos(\phi/2)
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}f(R_x(\phi)) = \frac{1}{2}\left[f(R_x(\phi+\pi/2)) - f(R_x(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`R_x(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 1
num_wires = 1
par_domain = "R"
is_composable_rotation = True
basis = "X"
grad_method = "A"
generator = [PauliX, -1 / 2]
@classmethod
def _matrix(cls, *params):
theta = params[0]
c = math.cos(theta / 2)
js = 1j * math.sin(-theta / 2)
return np.array([[c, js], [js, c]])
def adjoint(self):
return RX(-self.data[0], wires=self.wires)
def _controlled(self, wire):
CRX(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
# RX(\theta) = RZ(-\pi/2) RY(\theta) RZ(\pi/2)
return [np.pi / 2, self.data[0], -np.pi / 2]
class RY(Operation):
r"""RY(phi, wires)
The single qubit Y rotation
.. math:: R_y(\phi) = e^{-i\phi\sigma_y/2} = \begin{bmatrix}
\cos(\phi/2) & -\sin(\phi/2) \\
\sin(\phi/2) & \cos(\phi/2)
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}f(R_y(\phi)) = \frac{1}{2}\left[f(R_y(\phi+\pi/2)) - f(R_y(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`R_y(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 1
num_wires = 1
par_domain = "R"
is_composable_rotation = True
basis = "Y"
grad_method = "A"
generator = [PauliY, -1 / 2]
@classmethod
def _matrix(cls, *params):
theta = params[0]
c = math.cos(theta / 2)
s = math.sin(theta / 2)
return np.array([[c, -s], [s, c]])
def adjoint(self):
return RY(-self.data[0], wires=self.wires)
def _controlled(self, wire):
CRY(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
# RY(\theta) = RZ(0) RY(\theta) RZ(0)
return [0.0, self.data[0], 0.0]
class RZ(DiagonalOperation):
r"""RZ(phi, wires)
The single qubit Z rotation
.. math:: R_z(\phi) = e^{-i\phi\sigma_z/2} = \begin{bmatrix}
e^{-i\phi/2} & 0 \\
0 & e^{i\phi/2}
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}f(R_z(\phi)) = \frac{1}{2}\left[f(R_z(\phi+\pi/2)) - f(R_z(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`R_z(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 1
num_wires = 1
par_domain = "R"
is_composable_rotation = True
basis = "Z"
grad_method = "A"
generator = [PauliZ, -1 / 2]
@classmethod
def _matrix(cls, *params):
theta = params[0]
p = cmath.exp(-0.5j * theta)
return np.array([[p, 0], [0, p.conjugate()]])
@classmethod
def _eigvals(cls, *params):
theta = params[0]
p = cmath.exp(-0.5j * theta)
return np.array([p, p.conjugate()])
def adjoint(self):
return RZ(-self.data[0], wires=self.wires)
def _controlled(self, wire):
CRZ(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
# RZ(\theta) = RZ(\theta) RY(0) RZ(0)
return [self.data[0], 0.0, 0.0]
class PhaseShift(DiagonalOperation):
r"""PhaseShift(phi, wires)
Arbitrary single qubit local phase shift
.. math:: R_\phi(\phi) = e^{i\phi/2}R_z(\phi) = \begin{bmatrix}
1 & 0 \\
0 & e^{i\phi}
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}f(R_\phi(\phi)) = \frac{1}{2}\left[f(R_\phi(\phi+\pi/2)) - f(R_\phi(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`R_{\phi}(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 1
num_wires = 1
par_domain = "R"
is_composable_rotation = True
basis = "Z"
grad_method = "A"
generator = [np.array([[0, 0], [0, 1]]), 1]
@classmethod
def _matrix(cls, *params):
phi = params[0]
return np.array([[1, 0], [0, cmath.exp(1j * phi)]])
@classmethod
def _eigvals(cls, *params):
phi = params[0]
return np.array([1, cmath.exp(1j * phi)])
@staticmethod
def decomposition(phi, wires):
decomp_ops = [RZ(phi, wires=wires)]
return decomp_ops
def adjoint(self):
return PhaseShift(-self.data[0], wires=self.wires)
def _controlled(self, wire):
ControlledPhaseShift(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
# PhaseShift(\theta) = RZ(\theta) RY(0) RZ(0)
return [self.data[0], 0.0, 0.0]
class ControlledPhaseShift(DiagonalOperation):
r"""ControlledPhaseShift(phi, wires)
A qubit controlled phase shift.
.. math:: CR_\phi(\phi) = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 1 & 0 \\
0 & 0 & 0 & e^{i\phi}
\end{bmatrix}.
.. note:: The first wire provided corresponds to the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}f(CR_\phi(\phi)) = \frac{1}{2}\left[f(CR_\phi(\phi+\pi/2)) - f(CR_\phi(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`CR_{\phi}(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int]): the wire the operation acts on
"""
num_params = 1
num_wires = 2
par_domain = "R"
is_composable_rotation = True
basis = "Z"
grad_method = "A"
generator = [np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]]), 1]
@classmethod
def _matrix(cls, *params):
phi = params[0]
return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, cmath.exp(1j * phi)]])
@classmethod
def _eigvals(cls, *params):
phi = params[0]
return np.array([1, 1, 1, cmath.exp(1j * phi)])
@staticmethod
def decomposition(phi, wires):
decomp_ops = [
qml.PhaseShift(phi / 2, wires=wires[0]),
qml.CNOT(wires=wires),
qml.PhaseShift(-phi / 2, wires=wires[1]),
qml.CNOT(wires=wires),
qml.PhaseShift(phi / 2, wires=wires[1]),
]
return decomp_ops
def adjoint(self):
return ControlledPhaseShift(-self.data[0], wires=self.wires)
@property
def control_wires(self):
return Wires(self.wires[0])
CPhase = ControlledPhaseShift
class Rot(Operation):
r"""Rot(phi, theta, omega, wires)
Arbitrary single qubit rotation
.. math::
R(\phi,\theta,\omega) = RZ(\omega)RY(\theta)RZ(\phi)= \begin{bmatrix}
e^{-i(\phi+\omega)/2}\cos(\theta/2) & -e^{i(\phi-\omega)/2}\sin(\theta/2) \\
e^{-i(\phi-\omega)/2}\sin(\theta/2) & e^{i(\phi+\omega)/2}\cos(\theta/2)
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 3
* Gradient recipe: :math:`\frac{d}{d\phi}f(R(\phi, \theta, \omega)) = \frac{1}{2}\left[f(R(\phi+\pi/2, \theta, \omega)) - f(R(\phi-\pi/2, \theta, \omega))\right]`
where :math:`f` is an expectation value depending on :math:`R(\phi, \theta, \omega)`.
This gradient recipe applies for each angle argument :math:`\{\phi, \theta, \omega\}`.
.. note::
If the ``Rot`` gate is not supported on the targeted device, PennyLane
will attempt to decompose the gate into :class:`~.RZ` and :class:`~.RY` gates.
Args:
phi (float): rotation angle :math:`\phi`
theta (float): rotation angle :math:`\theta`
omega (float): rotation angle :math:`\omega`
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 3
num_wires = 1
par_domain = "R"
is_composable_rotation = True
grad_method = "A"
@classmethod
def _matrix(cls, *params):
phi, theta, omega = params
c = math.cos(theta / 2)
s = math.sin(theta / 2)
return np.array(
[
[
cmath.exp(-0.5j * (phi + omega)) * c,
-cmath.exp(0.5j * (phi - omega)) * s,
],
[
cmath.exp(-0.5j * (phi - omega)) * s,
cmath.exp(0.5j * (phi + omega)) * c,
],
]
)
@staticmethod
def decomposition(phi, theta, omega, wires):
decomp_ops = [
RZ(phi, wires=wires),
RY(theta, wires=wires),
RZ(omega, wires=wires),
]
return decomp_ops
def adjoint(self):
phi, theta, omega = self.parameters
return Rot(-omega, -theta, -phi, wires=self.wires)
def _controlled(self, wire):
CRot(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
return self.data
class MultiRZ(DiagonalOperation):
r"""MultiRZ(theta, wires)
Arbitrary multi Z rotation.
.. math::
MultiRZ(\theta) = \exp(-i \frac{\theta}{2} Z^{\otimes n})
**Details:**
* Number of wires: Any
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\theta}f(MultiRZ(\theta)) = \frac{1}{2}\left[f(MultiRZ(\theta +\pi/2)) - f(MultiRZ(\theta-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`MultiRZ(\theta)`.
.. note::
If the ``MultiRZ`` gate is not supported on the targeted device, PennyLane
will decompose the gate using :class:`~.RZ` and :class:`~.CNOT` gates.
Args:
theta (float): rotation angle :math:`\theta`
wires (Sequence[int] or int): the wires the operation acts on
"""
num_params = 1
num_wires = AnyWires
par_domain = "R"
grad_method = "A"
@classmethod
def _matrix(cls, theta, n):
"""Matrix representation of a MultiRZ gate.
Args:
theta (float): Rotation angle.
n (int): Number of wires the rotation acts on. This has
to be given explicitly in the static method as the
wires object is not available.
Returns:
array[complex]: The matrix representation
"""
multi_Z_rot_eigs = MultiRZ._eigvals(theta, n)
multi_Z_rot_matrix = np.diag(multi_Z_rot_eigs)
return multi_Z_rot_matrix
_generator = None
@property
def generator(self):
if self._generator is None:
self._generator = [np.diag(pauli_eigs(len(self.wires))), -1 / 2]
return self._generator
@property
def matrix(self):
# Redefine the property here to pass additionally the number of wires to the ``_matrix`` method
if self.inverse:
# The matrix is diagonal, so there is no need to transpose
return self._matrix(*self.parameters, len(self.wires)).conj()
return self._matrix(*self.parameters, len(self.wires))
@classmethod
def _eigvals(cls, theta, n):
return np.exp(-1j * theta / 2 * pauli_eigs(n))
@property
def eigvals(self):
# Redefine the property here to pass additionally the number of wires to the ``_eigvals`` method
if self.inverse:
return self._eigvals(*self.parameters, len(self.wires)).conj()
return self._eigvals(*self.parameters, len(self.wires))
@staticmethod
@template
def decomposition(theta, wires):
for i in range(len(wires) - 1, 0, -1):
qml.CNOT(wires=[wires[i], wires[i - 1]])
RZ(theta, wires=wires[0])
for i in range(len(wires) - 1):
qml.CNOT(wires=[wires[i + 1], wires[i]])
def adjoint(self):
return MultiRZ(-self.parameters[0], wires=self.wires)
class PauliRot(Operation):
r"""PauliRot(theta, pauli_word, wires)
Arbitrary Pauli word rotation.
.. math::
RP(\theta, P) = \exp(-i \frac{\theta}{2} P)
**Details:**
* Number of wires: Any
* Number of parameters: 2 (1 differentiable parameter)
* Gradient recipe: :math:`\frac{d}{d\theta}f(RP(\theta)) = \frac{1}{2}\left[f(RP(\theta +\pi/2)) - f(RP(\theta-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`RP(\theta)`.
.. note::
If the ``PauliRot`` gate is not supported on the targeted device, PennyLane
will decompose the gate using :class:`~.RX`, :class:`~.Hadamard`, :class:`~.RZ`
and :class:`~.CNOT` gates.
Args:
theta (float): rotation angle :math:`\theta`
pauli_word (string): the Pauli word defining the rotation
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 2
num_wires = AnyWires
do_check_domain = False
par_domain = "R"
grad_method = "A"
_ALLOWED_CHARACTERS = "IXYZ"
_PAULI_CONJUGATION_MATRICES = {
"X": Hadamard._matrix(),
"Y": RX._matrix(np.pi / 2),
"Z": np.array([[1, 0], [0, 1]]),
}
def __init__(self, *params, wires=None, do_queue=True):
super().__init__(*params, wires=wires, do_queue=do_queue)
pauli_word = params[1]
if not PauliRot._check_pauli_word(pauli_word):
raise ValueError(
'The given Pauli word "{}" contains characters that are not allowed.'
" Allowed characters are I, X, Y and Z".format(pauli_word)
)
num_wires = 1 if isinstance(wires, int) else len(wires)
if not len(pauli_word) == num_wires:
raise ValueError(
"The given Pauli word has length {}, length {} was expected for wires {}".format(
len(pauli_word), num_wires, wires
)
)
@staticmethod
def _check_pauli_word(pauli_word):
"""Check that the given Pauli word has correct structure.
Args:
pauli_word (str): Pauli word to be checked
Returns:
bool: Whether the Pauli word has correct structure.
"""
return all(pauli in PauliRot._ALLOWED_CHARACTERS for pauli in pauli_word)
@classmethod
def _matrix(cls, *params):
theta = params[0]
pauli_word = params[1]
if not PauliRot._check_pauli_word(pauli_word):
raise ValueError(
'The given Pauli word "{}" contains characters that are not allowed.'
" Allowed characters are I, X, Y and Z".format(pauli_word)
)
# Simplest case is if the Pauli is the identity matrix
if pauli_word == "I" * len(pauli_word):
return np.exp(-1j * theta / 2) * np.eye(2 ** len(pauli_word))
# We first generate the matrix excluding the identity parts and expand it afterwards.
# To this end, we have to store on which wires the non-identity parts act
non_identity_wires, non_identity_gates = zip(
*[(wire, gate) for wire, gate in enumerate(pauli_word) if gate != "I"]
)
multi_Z_rot_matrix = MultiRZ._matrix(theta, len(non_identity_gates))
# now we conjugate with Hadamard and RX to create the Pauli string
conjugation_matrix = functools.reduce(
np.kron,
[PauliRot._PAULI_CONJUGATION_MATRICES[gate] for gate in non_identity_gates],
)
return expand(
conjugation_matrix.T.conj() @ multi_Z_rot_matrix @ conjugation_matrix,
non_identity_wires,
list(range(len(pauli_word))),
)
_generator = None
@property
def generator(self):
if self._generator is None:
pauli_word = self.parameters[1]
# Simplest case is if the Pauli is the identity matrix
if pauli_word == "I" * len(pauli_word):
self._generator = [np.eye(2 ** len(pauli_word)), -1 / 2]
return self._generator
# We first generate the matrix excluding the identity parts and expand it afterwards.
# To this end, we have to store on which wires the non-identity parts act
non_identity_wires, non_identity_gates = zip(
*[(wire, gate) for wire, gate in enumerate(pauli_word) if gate != "I"]
)
# get MultiRZ's generator
multi_Z_rot_generator = np.diag(pauli_eigs(len(non_identity_gates)))
# now we conjugate with Hadamard and RX to create the Pauli string
conjugation_matrix = functools.reduce(
np.kron,
[PauliRot._PAULI_CONJUGATION_MATRICES[gate] for gate in non_identity_gates],
)
self._generator = [
expand(
conjugation_matrix.T.conj() @ multi_Z_rot_generator @ conjugation_matrix,
non_identity_wires,
list(range(len(pauli_word))),
),
-1 / 2,
]
return self._generator
@classmethod
def _eigvals(cls, theta, pauli_word):
# Identity must be treated specially because its eigenvalues are all the same
if pauli_word == "I" * len(pauli_word):
return np.exp(-1j * theta / 2) * np.ones(2 ** len(pauli_word))
return MultiRZ._eigvals(theta, len(pauli_word))
@staticmethod
@template
def decomposition(theta, pauli_word, wires):
# Catch cases when the wire is passed as a single int.
if isinstance(wires, int):
wires = [wires]
# Check for identity and do nothing
if pauli_word == "I" * len(wires):
return
active_wires, active_gates = zip(
*[(wire, gate) for wire, gate in zip(wires, pauli_word) if gate != "I"]
)
for wire, gate in zip(active_wires, active_gates):
if gate == "X":
Hadamard(wires=[wire])
elif gate == "Y":
RX(np.pi / 2, wires=[wire])
MultiRZ(theta, wires=list(active_wires))
for wire, gate in zip(active_wires, active_gates):
if gate == "X":
Hadamard(wires=[wire])
elif gate == "Y":
RX(-np.pi / 2, wires=[wire])
def adjoint(self):
return PauliRot(-self.parameters[0], self.parameters[1], wires=self.wires)
# Four term gradient recipe for controlled rotations
c1 = INV_SQRT2 * (np.sqrt(2) + 1) / 4
c2 = INV_SQRT2 * (np.sqrt(2) - 1) / 4
a = np.pi / 2
b = 3 * np.pi / 2
four_term_grad_recipe = ([[c1, 1, a], [-c1, 1, -a], [-c2, 1, b], [c2, 1, -b]],)
class CRX(Operation):
r"""CRX(phi, wires)
The controlled-RX operator
.. math::
\begin{align}
CR_x(\phi) &=
\begin{bmatrix}
& 1 & 0 & 0 & 0 \\
& 0 & 1 & 0 & 0\\
& 0 & 0 & \cos(\phi/2) & -i\sin(\phi/2)\\
& 0 & 0 & -i\sin(\phi/2) & \cos(\phi/2)
\end{bmatrix}.
\end{align}
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Gradient recipe: The controlled-RX operator satisfies a four-term parameter-shift rule
(see Appendix F, https://arxiv.org/abs/2104.05695):
.. math::
\frac{d}{d\phi}f(CR_x(\phi)) = c_+ \left[f(CR_x(\phi+a)) - f(CR_x(\phi-a))\right] - c_- \left[f(CR_x(\phi+b)) - f(CR_x(\phi-b))\right]
where :math:`f` is an expectation value depending on :math:`CR_x(\phi)`, and
- :math:`a = \pi/2`
- :math:`b = 3\pi/2`
- :math:`c_{\pm} = (\sqrt{2} \pm 1)/{4\sqrt{2}}`
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int]): the wire the operation acts on
"""
num_params = 1
num_wires = 2
par_domain = "R"
is_composable_rotation = True
basis = "X"
grad_method = "A"
grad_recipe = four_term_grad_recipe
generator = [
np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]),
-1 / 2,
]
@classmethod
def _matrix(cls, *params):
theta = params[0]
c = math.cos(theta / 2)
js = 1j * math.sin(-theta / 2)
return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, c, js], [0, 0, js, c]])
@staticmethod
def decomposition(theta, wires):
decomp_ops = [
RZ(np.pi / 2, wires=wires[1]),
RY(theta / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RY(-theta / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RZ(-np.pi / 2, wires=wires[1]),
]
return decomp_ops
def adjoint(self):
return CRX(-self.data[0], wires=self.wires)
@property
def control_wires(self):
return Wires(self.wires[0])
class CRY(Operation):
r"""CRY(phi, wires)
The controlled-RY operator
.. math::
\begin{align}
CR_y(\phi) &=
\begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0\\
0 & 0 & \cos(\phi/2) & -\sin(\phi/2)\\
0 & 0 & \sin(\phi/2) & \cos(\phi/2)
\end{bmatrix}.
\end{align}
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Gradient recipe: The controlled-RY operator satisfies a four-term parameter-shift rule
(see Appendix F, https://arxiv.org/abs/2104.05695):
.. math::
\frac{d}{d\phi}f(CR_y(\phi)) = c_+ \left[f(CR_y(\phi+a)) - f(CR_y(\phi-a))\right] - c_- \left[f(CR_y(\phi+b)) - f(CR_y(\phi-b))\right]
where :math:`f` is an expectation value depending on :math:`CR_y(\phi)`, and
- :math:`a = \pi/2`
- :math:`b = 3\pi/2`
- :math:`c_{\pm} = (\sqrt{2} \pm 1)/{4\sqrt{2}}`
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int]): the wire the operation acts on
"""
num_params = 1
num_wires = 2
par_domain = "R"
is_composable_rotation = True
basis = "Y"
grad_method = "A"
grad_recipe = four_term_grad_recipe
generator = [
np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, -1j], [0, 0, 1j, 0]]),
-1 / 2,
]
@classmethod
def _matrix(cls, *params):
theta = params[0]
c = math.cos(theta / 2)
s = math.sin(theta / 2)
return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, c, -s], [0, 0, s, c]])
@staticmethod
def decomposition(theta, wires):
decomp_ops = [
RY(theta / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RY(-theta / 2, wires=wires[1]),
qml.CNOT(wires=wires),
]
return decomp_ops
def adjoint(self):
return CRY(-self.data[0], wires=self.wires)
@property
def control_wires(self):
return Wires(self.wires[0])
class CRZ(DiagonalOperation):
r"""CRZ(phi, wires)
The controlled-RZ operator
.. math::
\begin{align}
CR_z(\phi) &=
\begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0\\
0 & 0 & e^{-i\phi/2} & 0\\
0 & 0 & 0 & e^{i\phi/2}
\end{bmatrix}.
\end{align}
.. note:: The subscripts of the operations in the formula refer to the wires they act on, e.g. 1 corresponds to the first element in ``wires`` that is the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Gradient recipe: The controlled-RZ operator satisfies a four-term parameter-shift rule
(see Appendix F, https://arxiv.org/abs/2104.05695):
.. math::
\frac{d}{d\phi}f(CR_z(\phi)) = c_+ \left[f(CR_z(\phi+a)) - f(CR_z(\phi-a))\right] - c_- \left[f(CR_z(\phi+b)) - f(CR_z(\phi-b))\right]
where :math:`f` is an expectation value depending on :math:`CR_z(\phi)`, and
- :math:`a = \pi/2`
- :math:`b = 3\pi/2`
- :math:`c_{\pm} = (\sqrt{2} \pm 1)/{4\sqrt{2}}`
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int]): the wire the operation acts on
"""
num_params = 1
num_wires = 2
par_domain = "R"
is_composable_rotation = True
basis = "Z"
grad_method = "A"
grad_recipe = four_term_grad_recipe
generator = [
np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]]),
-1 / 2,
]
@classmethod
def _matrix(cls, *params):
theta = params[0]
return np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, cmath.exp(-0.5j * theta), 0],
[0, 0, 0, cmath.exp(0.5j * theta)],
]
)
@classmethod
def _eigvals(cls, *params):
theta = params[0]
return np.array(
[
1,
1,
cmath.exp(-0.5j * theta),
cmath.exp(0.5j * theta),
]
)
@staticmethod
def decomposition(lam, wires):
decomp_ops = [
PhaseShift(lam / 2, wires=wires[1]),
qml.CNOT(wires=wires),
PhaseShift(-lam / 2, wires=wires[1]),
qml.CNOT(wires=wires),
]
return decomp_ops
def adjoint(self):
return CRZ(-self.data[0], wires=self.wires)
@property
def control_wires(self):
return Wires(self.wires[0])
class CRot(Operation):
r"""CRot(phi, theta, omega, wires)
The controlled-Rot operator
.. math:: CR(\phi, \theta, \omega) = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0\\
0 & 0 & e^{-i(\phi+\omega)/2}\cos(\theta/2) & -e^{i(\phi-\omega)/2}\sin(\theta/2)\\
0 & 0 & e^{-i(\phi-\omega)/2}\sin(\theta/2) & e^{i(\phi+\omega)/2}\cos(\theta/2)
\end{bmatrix}.
.. note:: The first wire provided corresponds to the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 3
* Gradient recipe: The controlled-Rot operator satisfies a four-term parameter-shift rule
(see Appendix F, https://arxiv.org/abs/2104.05695):
.. math::
\frac{d}{d\mathbf{x}_i}f(CR(\mathbf{x}_i)) = c_+ \left[f(CR(\mathbf{x}_i+a)) - f(CR(\mathbf{x}_i-a))\right] - c_- \left[f(CR(\mathbf{x}_i+b)) - f(CR(\mathbf{x}_i-b))\right]
where :math:`f` is an expectation value depending on :math:`CR(\mathbf{x}_i)`, and
- :math:`\mathbf{x} = (\phi, \theta, \omega)` and `i` is an index to :math:`\mathbf{x}`
- :math:`a = \pi/2`
- :math:`b = 3\pi/2`
- :math:`c_{\pm} = (\sqrt{2} \pm 1)/{4\sqrt{2}}`
Args:
phi (float): rotation angle :math:`\phi`
theta (float): rotation angle :math:`\theta`
omega (float): rotation angle :math:`\omega`
wires (Sequence[int]): the wire the operation acts on
"""
num_params = 3
num_wires = 2
par_domain = "R"
grad_method = "A"
grad_recipe = four_term_grad_recipe * 3
@classmethod
def _matrix(cls, *params):
phi, theta, omega = params
c = math.cos(theta / 2)
s = math.sin(theta / 2)
return np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[
0,
0,
cmath.exp(-0.5j * (phi + omega)) * c,
-cmath.exp(0.5j * (phi - omega)) * s,
],
[
0,
0,
cmath.exp(-0.5j * (phi - omega)) * s,
cmath.exp(0.5j * (phi + omega)) * c,
],
]
)
@staticmethod
def decomposition(phi, theta, omega, wires):
decomp_ops = [
RZ((phi - omega) / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RZ(-(phi + omega) / 2, wires=wires[1]),
RY(-theta / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RY(theta / 2, wires=wires[1]),
RZ(omega, wires=wires[1]),
]
return decomp_ops
def adjoint(self):
phi, theta, omega = self.parameters
return CRot(-omega, -theta, -phi, wires=self.wires)
class U1(Operation):
r"""U1(phi)
U1 gate.
.. math:: U_1(\phi) = e^{i\phi/2}R_z(\phi) = \begin{bmatrix}
1 & 0 \\
0 & e^{i\phi}
\end{bmatrix}.
.. note::
The ``U1`` gate is an alias for the phase shift operation :class:`~.PhaseShift`.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}f(U_1(\phi)) = \frac{1}{2}\left[f(U_1(\phi+\pi/2)) - f(U_1(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`U_1(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 1
num_wires = 1
par_domain = "R"
grad_method = "A"
generator = [np.array([[0, 0], [0, 1]]), 1]
@classmethod
def _matrix(cls, *params):
phi = params[0]
return np.array([[1, 0], [0, cmath.exp(1j * phi)]])
@staticmethod
def decomposition(phi, wires):
return [PhaseShift(phi, wires=wires)]
def adjoint(self):
return U1(-self.data[0], wires=self.wires)
class U2(Operation):
r"""U2(phi, lambda, wires)
U2 gate.
.. math::
U_2(\phi, \lambda) = \frac{1}{\sqrt{2}}\begin{bmatrix} 1 & -\exp(i \lambda)
\\ \exp(i \phi) & \exp(i (\phi + \lambda)) \end{bmatrix}
The :math:`U_2` gate is related to the single-qubit rotation :math:`R` (:class:`Rot`) and the
:math:`R_\phi` (:class:`PhaseShift`) gates via the following relation:
.. math::
U_2(\phi, \lambda) = R_\phi(\phi+\lambda) R(\lambda,\pi/2,-\lambda)
.. note::
If the ``U2`` gate is not supported on the targeted device, PennyLane
will attempt to decompose the gate into :class:`~.Rot` and :class:`~.PhaseShift` gates.
**Details:**
* Number of wires: 1
* Number of parameters: 2
* Gradient recipe: :math:`\frac{d}{d\phi}f(U_2(\phi, \lambda)) = \frac{1}{2}\left[f(U_2(\phi+\pi/2, \lambda)) - f(U_2(\phi-\pi/2, \lambda))\right]`
where :math:`f` is an expectation value depending on :math:`U_2(\phi, \lambda)`.
This gradient recipe applies for each angle argument :math:`\{\phi, \lambda\}`.
Args:
phi (float): azimuthal angle :math:`\phi`
lambda (float): quantum phase :math:`\lambda`
wires (Sequence[int] or int): the subsystem the gate acts on
"""
num_params = 2
num_wires = 1
par_domain = "R"
grad_method = "A"
@classmethod
def _matrix(cls, *params):
phi, lam = params
return INV_SQRT2 * np.array(
[
[1, -cmath.exp(1j * lam)],
[cmath.exp(1j * phi), cmath.exp(1j * (phi + lam))],
]
)
@staticmethod
def decomposition(phi, lam, wires):
decomp_ops = [
Rot(lam, np.pi / 2, -lam, wires=wires),
PhaseShift(lam, wires=wires),
PhaseShift(phi, wires=wires),
]
return decomp_ops
def adjoint(self):
phi, lam = self.parameters
new_lam = (np.pi - phi) % (2 * np.pi)
new_phi = (np.pi - lam) % (2 * np.pi)
return U2(new_phi, new_lam, wires=self.wires)
class U3(Operation):
r"""U3(theta, phi, lambda, wires)
Arbitrary single qubit unitary.
.. math::
U_3(\theta, \phi, \lambda) = \begin{bmatrix} \cos(\theta/2) & -\exp(i \lambda)\sin(\theta/2) \\
\exp(i \phi)\sin(\theta/2) & \exp(i (\phi + \lambda))\cos(\theta/2) \end{bmatrix}
The :math:`U_3` gate is related to the single-qubit rotation :math:`R` (:class:`Rot`) and the
:math:`R_\phi` (:class:`PhaseShift`) gates via the following relation:
.. math::
U_3(\theta, \phi, \lambda) = R_\phi(\phi+\lambda) R(\lambda,\theta,-\lambda)
.. note::
If the ``U3`` gate is not supported on the targeted device, PennyLane
will attempt to decompose the gate into :class:`~.PhaseShift` and :class:`~.Rot` gates.
**Details:**
* Number of wires: 1
* Number of parameters: 3
* Gradient recipe: :math:`\frac{d}{d\phi}f(U_3(\theta, \phi, \lambda)) = \frac{1}{2}\left[f(U_3(\theta+\pi/2, \phi, \lambda)) - f(U_3(\theta-\pi/2, \phi, \lambda))\right]`
where :math:`f` is an expectation value depending on :math:`U_3(\theta, \phi, \lambda)`.
This gradient recipe applies for each angle argument :math:`\{\theta, \phi, \lambda\}`.
Args:
theta (float): polar angle :math:`\theta`
phi (float): azimuthal angle :math:`\phi`
lambda (float): quantum phase :math:`\lambda`
wires (Sequence[int] or int): the subsystem the gate acts on
"""
num_params = 3
num_wires = 1
par_domain = "R"
grad_method = "A"
@classmethod
def _matrix(cls, *params):
theta, phi, lam = params
c = math.cos(theta / 2)
s = math.sin(theta / 2)
return np.array(
[
[c, -s * cmath.exp(1j * lam)],
[s * cmath.exp(1j * phi), c * cmath.exp(1j * (phi + lam))],
]
)
@staticmethod
def decomposition(theta, phi, lam, wires):
decomp_ops = [
Rot(lam, theta, -lam, wires=wires),
PhaseShift(lam, wires=wires),
PhaseShift(phi, wires=wires),
]
return decomp_ops
def adjoint(self):
theta, phi, lam = self.parameters
new_lam = (np.pi - phi) % (2 * np.pi)
new_phi = (np.pi - lam) % (2 * np.pi)
return U3(theta, new_phi, new_lam, wires=self.wires)
class IsingXX(Operation):
r"""IsingXX(phi, wires)
Ising XX coupling gate
.. math:: XX(\phi) = \begin{bmatrix}
\cos(\phi / 2) & 0 & 0 & -i \sin(\phi / 2) \\
0 & \cos(\phi / 2) & -i \sin(\phi / 2) & 0 \\
0 & -i \sin(\phi / 2) & \cos(\phi / 2) & 0 \\
-i \sin(\phi / 2) & 0 & 0 & \cos(\phi / 2)
\end{bmatrix}.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}f(XX(\phi)) = \frac{1}{2}\left[f(XX(\phi +\pi/2)) - f(XX(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`XX(\phi)`.
Args:
phi (float): the phase angle
wires (int): the subsystem the gate acts on
"""
num_params = 1
num_wires = 2
par_domain = "R"
grad_method = "A"
generator = [
np.array([[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]]),
-1 / 2,
]
@classmethod
def _matrix(cls, *params):
phi = params[0]
c = math.cos(phi / 2)
s = math.sin(phi / 2)
return np.array(
[
[c, 0, 0, -1j * s],
[0, c, -1j * s, 0],
[0, -1j * s, c, 0],
[-1j * s, 0, 0, c],
]
)
@staticmethod
def decomposition(phi, wires):
decomp_ops = [
qml.CNOT(wires=wires),
RX(phi, wires=[wires[0]]),
qml.CNOT(wires=wires),
]
return decomp_ops
def adjoint(self):
(phi,) = self.parameters
return IsingXX(-phi, wires=self.wires)
class IsingYY(Operation):
r"""IsingYY(phi, wires)
Ising YY coupling gate
.. math:: \mathtt{YY}(\phi) = \begin{bmatrix}
\cos(\phi / 2) & 0 & 0 & i \sin(\phi / 2) \\
0 & \cos(\phi / 2) & -i \sin(\phi / 2) & 0 \\
0 & -i \sin(\phi / 2) & \cos(\phi / 2) & 0 \\
i \sin(\phi / 2) & 0 & 0 & \cos(\phi / 2)
\end{bmatrix}.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}f(YY(\phi)) = \frac{1}{2}\left[f(YY(\phi +\pi/2)) - f(YY(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`YY(\phi)`.
Args:
phi (float): the phase angle
wires (int): the subsystem the gate acts on
"""
num_params = 1
num_wires = 2
par_domain = "R"
grad_method = "A"
generator = [
np.array([[0, 0, 0, -1], [0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0]]),
-1 / 2,
]
@staticmethod
def decomposition(phi, wires):
return [
qml.CY(wires=wires),
qml.RY(phi, wires=[wires[0]]),
qml.CY(wires=wires),
]
@classmethod
def _matrix(cls, *params):
phi = params[0]
cos = np.cos(phi / 2)
isin = 1.0j * np.sin(phi / 2)
return np.array(
[
[cos, 0.0, 0.0, isin],
[0.0, cos, -isin, 0.0],
[0.0, -isin, cos, 0.0],
[isin, 0.0, 0.0, cos],
],
dtype=complex,
)
def adjoint(self):
(phi,) = self.parameters
return IsingYY(-phi, wires=self.wires)
class IsingZZ(Operation):
r""" IsingZZ(phi, wires)
Ising ZZ coupling gate
.. math:: ZZ(\phi) = \begin{bmatrix}
e^{-i \phi / 2} & 0 & 0 & 0 \\
0 & e^{i \phi / 2} & 0 & 0 \\
0 & 0 & e^{i \phi / 2} & 0 \\
0 & 0 & 0 & e^{-i \phi / 2}
\end{bmatrix}.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}f(ZZ(\phi)) = \frac{1}{2}\left[f(ZZ(\phi +\pi/2)) - f(ZZ(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`ZZ(\theta)`.
Args:
phi (float): the phase angle
wires (int): the subsystem the gate acts on
"""
num_params = 1
num_wires = 2
par_domain = "R"
grad_method = "A"
generator = [
np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]),
-1 / 2,
]
@staticmethod
def decomposition(phi, wires):
return [
qml.CNOT(wires=wires),
qml.RZ(phi, wires=[wires[1]]),
qml.CNOT(wires=wires),
]
@classmethod
def _matrix(cls, *params):
phi = params[0]
pos_phase = np.exp(1.0j * phi / 2)
neg_phase = np.exp(-1.0j * phi / 2)
return np.diag([neg_phase, pos_phase, pos_phase, neg_phase])
def adjoint(self):
(phi,) = self.parameters
return IsingZZ(-phi, wires=self.wires)
| [
"noreply@github.com"
] | johannesjmeyer.noreply@github.com |
279a76e53340519e4979dbc06d547c8df054147d | 93399b691ad3d1eb26940f31cbe3bfe298c43dfc | /venv/bin/easy_install | e8dd1c4c23d58bd1b1df9b082d4c647b477271a6 | [] | no_license | Rom1-J/projet-transverse | 38d635f38c501874813e5f64131085e2d3c937ab | 4a9ae0914d45a8f14d8cb7bc473a583e432cc50a | refs/heads/master | 2022-04-12T00:55:39.856232 | 2020-02-14T15:07:59 | 2020-02-14T15:07:59 | 239,025,210 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | #!/home/romain/Documents/efrei/L1/S2/projet-transverse/shooter/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"romain.ordi@gmail.com"
] | romain.ordi@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.