index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
985,800 | 7662db886454e78090614f95f5931aedf51aa1af | import datetime
from haystack import indexes
from celery_haystack.indexes import CelerySearchIndex
from .models import ReutersNews, BBCNews, AljazeeraNews, PoliticoNews, EconomistNews, ChristianScienceMonitor
from .models import ChristianScienceMonitor, WikiNews, GuardianNews
from articles.models import Articles
class NewsAdIndex(CelerySearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True, template_name="search/indexes/mysites/jobad_text.txt")
title = indexes.CharField(model_attr='title', boost=1.125)
description = indexes.CharField(model_attr='description')
url = indexes.CharField(model_attr='url')
date = indexes.DateTimeField(model_attr='date')
def get_model(self):
return BBCNews
class ReutersAdIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True, template_name="search/indexes/mysites/jobad_text.txt")
title = indexes.CharField(model_attr='title', boost=1.125)
description = indexes.CharField(model_attr='description')
url = indexes.CharField(model_attr='url')
date = indexes.DateTimeField(model_attr='date')
def get_model(self):
return ReutersNews
class AljazeeraAdIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True, template_name="search/indexes/mysites/jobad_text.txt")
title = indexes.CharField(model_attr='title', boost=1.125)
description = indexes.CharField(model_attr='description')
url = indexes.CharField(model_attr='url')
date = indexes.DateTimeField(model_attr='date')
def get_model(self):
return AljazeeraNews
class PoliticoAdIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True, template_name="search/indexes/mysites/jobad_text.txt")
title = indexes.CharField(model_attr='title', boost=1.125)
description = indexes.CharField(model_attr='description')
url = indexes.CharField(model_attr='url')
date = indexes.DateTimeField(model_attr='date')
def get_model(self):
return PoliticoNews
class ChristianScienceAdIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True, template_name="search/indexes/mysites/jobad_text.txt")
title = indexes.CharField(model_attr='title')
description = indexes.CharField(model_attr='description')
url = indexes.CharField(model_attr='url')
date = indexes.DateTimeField(model_attr='date')
def get_model(self):
return ChristianScienceMonitor
class WikiNewsAdIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True, template_name="search/indexes/mysites/jobad_text.txt")
title = indexes.CharField(model_attr='title', boost=1.125)
description = indexes.CharField(model_attr='description')
url = indexes.CharField(model_attr='url')
date = indexes.DateTimeField(model_attr='date')
def get_model(self):
return WikiNews
class GuardianAdIndex(indexes.SearchIndex, indexes.Indexable):
'''
In this case we are creating a search index basesd in the django models
of JobAd. Why JobAd? because that is where all the new jobs that we have
scraped are stored. In this way anybody can search for a particular job and
get a good estimate result. It inherits from haystack indexes and from the
class SearchIndex and Indexable, gets the relevant model and returns then
JobAd.
'''
text = indexes.CharField(document=True, use_template=True, template_name="search/indexes/mysites/jobad_text.txt")
title = indexes.CharField(model_attr='title', boost=1.125)
description = indexes.CharField(model_attr='description')
url = indexes.CharField(model_attr='url')
date = indexes.DateTimeField(model_attr='date')
def get_model(self):
return GuardianNews
class WikiNewsAdIndex(indexes.SearchIndex, indexes.Indexable):
'''
In this case we are creating a search index basesd in the django models
of JobAd. Why JobAd? because that is where all the new jobs that we have
scraped are stored. In this way anybody can search for a particular job and
get a good estimate result. It inherits from haystack indexes and from the
class SearchIndex and Indexable, gets the relevant model and returns then
JobAd.
'''
text = indexes.CharField(document=True, use_template=True, template_name="search/indexes/mysites/jobad_text.txt")
title = indexes.CharField(model_attr='title', boost=1.125)
description = indexes.CharField(model_attr='description')
url = indexes.CharField(model_attr='url')
date = indexes.DateTimeField(model_attr='date')
def get_model(self):
return WikiNews
|
985,801 | 557550a1df24a439ac1cdb898afaf43d1d5bbdb9 | '''# QUESTION 1
alphabets = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
spl_char = ['~', ':', "'", '+', '[', '\\', '@', '^', '{', '%', '(', '-', '"', '*', '|', ',', '&', '<', '`', '}', '.', '_', '=', ']', '!', '>', ';', '?', '#', '$', ')', '/']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
space = [' ']
array = []
char = []
digit = []
special = []
alpha = []
attempts = int(input("Enter the number of attempts: "))
for i in range(attempts):
string = input(f"Write the line {i+1}: ")
split_string = string.split(" ")
st = list(string)
for letter in st:
if letter in alphabets:
alpha.append(letter)
if letter in numbers:
digit.append(letter)
if letter in spl_char:
special.append(letter)
print("Total Characters: ",len(st))
print("Total Alphabets: ",len(alpha))
print("Total Digits: ",len(digit))
print("Total Special Characters: ",len(special))
print("Total Words : ",len(split_string) )
'''
"""# QUESTION 2
input_string='the brown fox'
array=input_string.split()
for word in array:
new_word = ''
input_string = ''.join(word[0].upper() + word[1:])
print(input_string)
"""
'''#QUESTION 3
input_string = input("Enter the string: ")
array = input_string.split()
simple_array=set(array)
st = ' '.join(simple_array)
print(st)
'''
'''#QUESTION 4
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
array = []
converted_array = []
input_string = input("Enter the string: ")
for num in list(input_string):
if num in numbers:
array.append(num)
for num in array:
converted_num = int(num)
converted_array.append(converted_num)
print(array)
print(sum(converted_array))
'''
'''#QUESTION 5
string = input("Write a sentence: ")
split_string = string.split()
hyphen_string = ';'.join(split_string)
print(hyphen_string)
''' |
985,802 | 80d3878f5bfb041c44dcad6803ec5374069619df | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 18 16:32:40 2017
@author: pragun
"""
def printMove(A='A', B='B'):
print('move from ' + str(A) + ' to ' + str(B))
def Towers(n, A='A', B='B', spare='S'):
global move #so funtion can rewrite it
if n == 1:
printMove(A, B)
move += 1
else:
Towers(n-1, A, spare, B)
Towers(1, A, B, spare)
Towers(n-1, spare, B, A)
move = 0
Towers(10)
print('it will take:', move, 'moves') |
985,803 | 6a9c20b415cfd4547b17b8aae98e3cf5835ff907 | """
aep_csm_component.py
Created by NWTC Systems Engineering Sub-Task on 2012-08-01.
Copyright (c) NREL. All rights reserved.
"""
import numpy as np
from openmdao.main.api import Component, Assembly, set_as_top, VariableTree
from openmdao.main.datatypes.api import Int, Bool, Float, Array, VarTree
from fusedwind.lib.utilities import hstack, vstack
from fusedwind.plant_flow.asym import BaseAEPModel, BaseAEPModel_NoFlow
from fusedwind.plant_flow.comp import BaseAEPAggregator, BaseAEPAggregator_NoFlow, CDFBase
from fusedwind.interface import implement_base
###################################################
# AEP where single turbine AEP is input
@implement_base(BaseAEPModel_NoFlow)
class aep_assembly(Assembly):
""" Basic assembly for aep estimation for an entire wind plant based on the AEP input from one turbine."""
# variables
AEP_one_turbine = Float(iotype='in', units='kW*h')
# parameters
array_losses = Float(0.059, iotype='in', desc='energy losses due to turbine interactions - across entire plant')
other_losses = Float(0.0, iotype='in', desc='energy losses due to blade soiling, electrical, etc')
availability = Float(0.94, iotype='in', desc='average annual availability of wind turbines at plant')
turbine_number = Int(100, iotype='in', desc='total number of wind turbines at the plant')
machine_rating = Float(5000.0, iotype='in', desc='machine rating of turbine')
# outputs
gross_aep = Float(iotype='out', desc='Gross Annual Energy Production before availability and loss impacts', units='kW*h')
net_aep = Float(iotype='out', desc='Net Annual Energy Production after availability and loss impacts', units='kW*h')
capacity_factor = Float(iotype='out', desc='plant capacity factor')
def configure(self):
super(aep_assembly, self).configure()
self.add('aep', BasicAEP())
self.driver.workflow.add(['aep'])
#inputs
self.connect('AEP_one_turbine', 'aep.AEP_one_turbine')
self.connect('array_losses', 'aep.array_losses')
self.connect('other_losses', 'aep.other_losses')
self.connect('availability', 'aep.availability')
self.connect('turbine_number', 'aep.turbine_number')
# outputs
self.connect('aep.gross_aep', 'gross_aep')
self.connect('aep.net_aep', 'net_aep')
self.connect('aep.capacity_factor','capacity_factor')
@implement_base(BaseAEPAggregator_NoFlow)
class BasicAEP(Component):
""" Basic component for aep estimation for an entire wind plant based on the AEP input from one turbine."""
# in
AEP_one_turbine = Float(iotype='in', units='kW*h')
# parameters
array_losses = Float(0.059, iotype='in', desc='energy losses due to turbine interactions - across entire plant')
other_losses = Float(0.0, iotype='in', desc='energy losses due to blade soiling, electrical, etc')
availability = Float(0.94, iotype='in', desc='average annual availability of wind turbines at plant')
turbine_number = Int(100, iotype='in', desc='total number of wind turbines at the plant')
machine_rating = Float(5000.0, iotype='in', desc='machine rating of turbine')
# outputs
gross_aep = Float(iotype='out', desc='Gross Annual Energy Production before availability and loss impacts', units='kW*h')
net_aep = Float(iotype='out', desc='Net Annual Energy Production after availability and loss impacts', units='kW*h')
capacity_factor = Float(iotype='out', desc='plant capacity factor')
def __init__(self):
Component.__init__(self)
self.missing_deriv_policy = 'assume_zero'
def execute(self):
self.gross_aep = self.turbine_number * self.AEP_one_turbine
self.net_aep = self.availability * (1-self.array_losses) * (1-self.other_losses) * self.gross_aep
self.capacity_factor = self.AEP_one_turbine / (8760. * self.machine_rating)
def list_deriv_vars(self):
inputs = ('AEP_one_turbine',)
outputs = ('gross_aep', 'net_aep')
return inputs, outputs
def provideJ(self):
J = np.array([[self.turbine_number], [self.availability * (1-self.array_losses) * (1-self.other_losses) * self.turbine_number]])
return J
################################
# AEP where power curve and environmental conditions are input
@implement_base(CDFBase)
class WeibullCDF(Component):
"""Weibull cumulative distribution function"""
# Inputs
A = Float(iotype='in', desc='scale factor')
k = Float(iotype='in', desc='shape or form factor')
x = Array(iotype='in', desc='input curve')
# Outputs
F = Array(iotype='out', desc='probabilities out')
def __init__(self):
super(WeibullCDF,self).__init__()
#controls what happens if derivatives are missing
self.missing_deriv_policy = 'assume_zero'
def execute(self):
self.F = 1.0 - np.exp(-(self.x/self.A)**self.k)
self.d_F_d_x = np.diag(- np.exp(-(self.x/self.A)**self.k) * (1./self.A) * (-self.k * ((self.x/self.A)**(self.k-1.0))))
self.d_F_d_A = - np.exp(-(self.x/self.A)**self.k) * (1./self.x) * (self.k * ((self.A/self.x)**(-self.k-1.0)))
self.d_F_d_k = - np.exp(-(self.x/self.A)**self.k) * -(self.x/self.A)**self.k * np.log(self.x/self.A)
def list_deriv_vars(self):
inputs = ['x', 'A', 'k']
outputs = ['F']
return inputs, outputs
def provideJ(self):
self.J = hstack((self.d_F_d_x, self.d_F_d_A, self.d_F_d_k))
return self.J
class RayleighCDF(CDFBase):
"""Rayleigh cumulative distribution function"""
# Inputs
xbar = Float(iotype='in', desc='mean value of distribution')
x = Array(iotype='in', desc='input curve')
# Outputs
F = Array(iotype='out', desc='probabilities out')
def __init__(self):
super(RayleighCDF,self).__init__()
#controls what happens if derivatives are missing
self.missing_deriv_policy = 'assume_zero'
def execute(self):
self.F = 1.0 - np.exp(-np.pi/4.0*(self.x/self.xbar)**2)
self.d_F_d_x = np.diag(- np.exp(-np.pi/4.0*(self.x/self.xbar)**2) * ((-np.pi/2.0)*(self.x/self.xbar)) * (1.0 / self.xbar))
self.d_F_d_xbar = - np.exp(-np.pi/4.0*(self.x/self.xbar)**2) * ((np.pi/2.0)*(self.xbar/self.x)**(-3)) * (1.0 / self.x)
def list_deriv_vars(self):
inputs = ['x', 'xbar']
outputs = ['F']
return inputs, outputs
def provideJ(self):
self.J = hstack((self.d_F_d_x, self.d_F_d_xbar))
return self.J
@implement_base(BaseAEPModel_NoFlow)
class aep_weibull_assembly(Assembly):
""" Basic assembly for aep estimation for an entire wind plant with the wind resource and single turbine power curve as inputs."""
# variables
A = Float(iotype='in', desc='scale factor')
k = Float(iotype='in', desc='shape or form factor')
wind_curve = Array(iotype='in', units='m/s', desc='wind curve')
power_curve = Array(iotype='in', units='W', desc='power curve (power)')
machine_rating = Float(units='kW', iotype='in', desc='machine power rating')
# parameters
array_losses = Float(0.059, iotype='in', desc = 'energy losses due to turbine interactions - across entire plant')
other_losses = Float(0.0, iotype='in', desc = 'energy losses due to blade soiling, electrical, etc')
availability = Float(0.94, iotype='in', desc = 'average annual availability of wind turbines at plant')
turbine_number = Int(100, iotype='in', desc = 'total number of wind turbines at the plant')
# outputs
gross_aep = Float(iotype='out', desc='Gross Annual Energy Production before availability and loss impacts', units='kW*h')
net_aep = Float(iotype='out', desc='Net Annual Energy Production after availability and loss impacts', units='kW*h')
capacity_factor = Float(iotype='out', desc='plant capacity factor')
def configure(self):
super(aep_weibull_assembly, self).configure()
self.add('aep', aep_component())
self.add('cdf', WeibullCDF())
self.driver.workflow.add(['aep', 'cdf'])
#inputs
self.connect('power_curve', 'aep.power_curve')
self.connect('array_losses', 'aep.array_losses')
self.connect('other_losses', 'aep.other_losses')
self.connect('availability', 'aep.availability')
self.connect('turbine_number', 'aep.turbine_number')
self.connect('machine_rating','aep.machine_rating')
self.connect('A','cdf.A')
self.connect('k','cdf.k')
self.connect('wind_curve','cdf.x')
# connections
self.connect('cdf.F', 'aep.CDF_V')
# outputs
self.connect('aep.gross_aep / 1000.0', 'gross_aep') #TODO: change aep outputs to Watts at this level
self.connect('aep.net_aep / 1000.0', 'net_aep')
self.connect('aep.capacity_factor','capacity_factor')
@implement_base(BaseAEPAggregator_NoFlow)
class aep_component(Component):
""" Basic component for aep estimation for an entire wind plant with the wind resource and single turbine power curve as inputs."""
# variables
CDF_V = Array(iotype='in')
power_curve = Array(iotype='in', units='W', desc='power curve (power)')
machine_rating = Float(units='kW', iotype='in', desc='machine power rating')
# parameters
array_losses = Float(0.059, iotype='in', desc='energy losses due to turbine interactions - across entire plant')
other_losses = Float(0.0, iotype='in', desc='energy losses due to blade soiling, electrical, etc')
availability = Float(0.94, iotype='in', desc='average annual availability of wind turbines at plant')
turbine_number = Int(100, iotype='in', desc='total number of wind turbines at the plant')
# outputs
gross_aep = Float(iotype='out', desc='Gross Annual Energy Production before availability and loss impacts', units='kW*h')
net_aep = Float(iotype='out', desc='Net Annual Energy Production after availability and loss impacts', units='kW*h')
capacity_factor = Float(iotype='out', desc='plant capacity factor')
def __init__(self):
Component.__init__(self)
#controls what happens if derivatives are missing
self.missing_deriv_policy = 'assume_zero'
def execute(self):
self.gross_aep = self.turbine_number * np.trapz(self.power_curve, self.CDF_V)*365.0*24.0 # in kWh
self.net_aep = self.availability * (1-self.array_losses) * (1-self.other_losses) * self.gross_aep
self.capacity_factor = self.net_aep / (8760. * self.machine_rating * 1000.0 * self.turbine_number)
def list_deriv_vars(self):
inputs = ['CDF_V', 'power_curve']
outputs = ['gross_aep', 'net_aep']
return inputs, outputs
def provideJ(self):
P = self.power_curve
CDF = self.CDF_V
factor = self.availability * (1-self.other_losses)*(1-self.array_losses)*365.0*24.0 * self.turbine_number
n = len(P)
dAEP_dP = np.gradient(CDF)
dAEP_dP[0] /= 2
dAEP_dP[-1] /= 2
d_gross_d_p = dAEP_dP * 365.0 * 24.0 * self.turbine_number
d_net_d_p = dAEP_dP * factor
dAEP_dCDF = -np.gradient(P)
dAEP_dCDF[0] = -0.5*(P[0] + P[1])
dAEP_dCDF[-1] = 0.5*(P[-1] + P[-2])
d_gross_d_cdf = dAEP_dCDF * 365.0 * 24.0 * self.turbine_number
d_net_d_cdf = dAEP_dCDF * factor
#loss_factor = self.availability * (1-self.array_losses) * (1-self.other_losses)
#dAEP_dlossFactor = np.array([self.net_aep/loss_factor])
self.J = np.zeros((2, 2*n))
self.J[0, 0:n] = d_gross_d_cdf
self.J[0, n:2*n] = d_gross_d_p
self.J[1, 0:n] = d_net_d_cdf
self.J[1, n:2*n] = d_net_d_p
#self.J[0, 2*n] = dAEP_dlossFactor
return self.J
def example():
aeptest = aep_weibull_assembly()
#print aeptest.aep.machine_rating
aeptest.wind_curve = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, \
11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0])
aeptest.power_curve = np.array([0.0, 0.0, 0.0, 187.0, 350.0, 658.30, 1087.4, 1658.3, 2391.5, 3307.0, 4415.70, \
5000.0, 5000.0, 5000.0, 5000.0, 5000.0, 5000.0, 5000.0, 5000.0, 5000.0, 5000.0, 5000.0, 5000.0, \
5000.0, 5000.0, 0.0])
aeptest.A = 8.35
aeptest.k = 2.15
aeptest.array_losses = 0.059
aeptest.other_losses = 0.0
aeptest.availability = 0.94
aeptest.turbine_number = 100
aeptest.run()
print "Annual energy production for an offshore wind plant with 100 NREL 5 MW reference turbines."
print "AEP gross output (before losses): {0:.1f} kWh".format(aeptest.gross_aep)
print "AEP net output (after losses): {0:.1f} kWh".format(aeptest.net_aep)
if __name__=="__main__":
example()
|
985,804 | abb23135b2c35e2e585e0fdc271dcdfc7573a3cc | import pprint
class ResponseObject(object):
def __init__(self, response):
response = response.json()
for field in response:
value = response[field] if field in response else None
self.__setattr__(field, value)
def json(self):
return pprint.pprint(vars(self))
|
985,805 | 9b14d30eb11a26661637bfe9a24e57a4b17182c1 | # Copyright (C) 2014 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
def main():
import argparse
# Just picks them up from `sys.argv`.
parser = argparse.ArgumentParser(description="Basic parser.")
parser.parse_args()
print("Manual entry point")
|
985,806 | 05dc8cda08dee69094f4c039dbd2a087066faa64 | from django.contrib import admin
from .models import CompanyId, MetricEvent, PermissionBuffer
# Register your models here.
admin.site.register(CompanyId)
admin.site.register(MetricEvent)
admin.site.register(PermissionBuffer)
|
985,807 | 811fa1dc649bae684e7ab1ab556283b155fbb18f | import sys
import tkinter
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton
from PyQt5.QtCore import pyqtSlot
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'The Trading Game'
self.left = 10
self.top = 10
self.width = 900
self.height = 600
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
button = QPushButton(QIcon('invest.png'),'Invest', self)
button.move(100,70)
button.clicked.connect(self.investwin)
savebut = QPushButton(QIcon('save.png'),'Save', self)
savebut.move(400,70)
savebut.clicked.connect(self.investwin)
progressbut = QPushButton(QIcon('progress.png'),'Progress', self)
progressbut.move(700,70)
progressbut.clicked.connect(self.investwin)
assetsbut = QPushButton(QIcon('assets.png'),'My Assets', self)
assetsbut.move(90,370)
assetsbut.clicked.connect(self.investwin)
but = QPushButton(QIcon('progress.png'),'Progress', self)
but.move(390,370)
but.clicked.connect(self.investwin)
but = QPushButton(QIcon('progress.png'),'Progress', self)
but.move(690,370)
but.clicked.connect(self.investwin)
self.show()
@pyqtSlot()
def investwin(self):
root = Tk()
root.geometry('900x600')
root.title('Invest')
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
|
985,808 | 9cf1f995c2a8449c47efa4c427291db51dd904a6 | # -*- coding: utf-8 -*-
"""
@author: v. sammartano
"""
#Libraries
import csv
import os
def main():
"""
Thi is the main program
"""
location = os.getcwd()
header = "Date,Time,Voltage,Current,Isolation,Range,SoC,Distance,Fan rpm,Fan Torque,Hyd. Pump rpm,Hyd. Pump Torque,SW Pump rpm,SW Pump Torque,Nozzle,Sidebrushes,WideSweepBrush,TempIGBT-Fan,Fan motor temp, Traction rpm, Traction torque,BMS1 Volts, BMS2 Volts"
header = header+"\n"
of ="outFile.csv"
outFile = open(of, "w")
outFile.write(header)
for file in os.listdir(location ):
try:
if file.endswith(".csv") and not(file.startswith("outFile")):
print("...reading {}".format(file))
fcsv = csv.reader(open(file, newline=''), delimiter=' ', quotechar='|')
for row in fcsv:
line = ', '.join(row)
if line[:4] == "Date":
d = line[5:13]
dd = d[6:9]+"/"+d[4:6]+"/"+d[:4]
next
elif line[12] == "*" or line[0] == "*":
next
elif line[0] == "T":
next
else:
L = dd + "," + line + "\n"
outFile.write(L)
except Exception as e:
raise e
print("No CSV files in here!")
try:
print("\nAll files have been merged into: {}".format(of))
outFile.close()
except Exception as ee:
raise ee
if __name__ == "__main__":
main()
input("Any key to exit!")
|
985,809 | 72dac0c99d293e3980365d6adcf36326c97ad8d1 | """
aqui temos um docstring
"""
import cv2
import cv2
captura = cv2.VideoCapture(0)
forc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('gravacao.avi', forc, 20.0, (680, 500))
print(captura.isOpened())
while captura.isOpened():
ret, frame, = captura.read()
if ret:
print(captura.get(cv2.CAP_PROP_FRAME_WIDTH))
print(captura.get(cv2.CAP_PROP_FRAME_HEIGHT))
out.write(frame)
out.write(frozenset)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
# aqui um comentario atoa
captura.release()
out.release()
cv2.destroyAllWindons()
# nao esta salvando o video em formato valido ou o video em si nao esta sendo processado
|
985,810 | cc7af0ed26ea42f9faea923fe4008abbfed7a618 | def list_comprehension():
my_list = [4, 10, 3, 1, -4]
# multiple every number in my_list by 2
my_list_2 = []
for num in my_list:
my_list_2.append(num * 2)
print(my_list_2)
my_list_comp = [10 * num for num in my_list]
print(my_list_comp)
import os
# Working with files (read and write)
# mode can be 'r' when the file will only be read,
# 'w' for only writing (an existing file with the same name will be erased),
# and 'a' opens the file for appending; any data written to the file is automatically added to the end.
# 'r+' opens the file for both reading and writing. The mode argument is optional; 'r' will be assumed if it’s omitted.
# path example:
# full path /home/daulet/dev/kbtu/pp2-spring-2020/week-2/week-3
# full path /tmp/example/pp2-spring-2020/week-2/week-3
# relative /pp2-spring-2020/week-2/week-3
# reading from file in relative path
def read():
currentPath = os.getcwd() # path where program is running
print(currentPath)
# opening and managing resources
with open(os.path.join(currentPath, 'week-2/data/numbers.txt'), 'r') as file:
line = file.readline()
nums = line.split(' ')
print('This is a list of nums from file: {0}'.format(nums))
numsSum = 0
for num in nums:
numsSum += int(num)
print('Sum of nums is: {0}'.format(numsSum))
return numsSum
# ensure that nums is a LIST
def write_to_file(nums: list):
'''Write numbers from input list to file.
Keyword arguments:
nums -- list of numbers
Returns: None
'''
current_path = os.getcwd()
with open(os.path.join(current_path, 'week-2/data/even_numbers.txt'), 'w') as file:
for num in nums:
if (num % 2 == 0): # num is not a string
file.write(str(num) + ' ')
# ensure that dir_path is a string
def show_files_and_dirs(dir_path: str):
'''Show directories and files in current directory
Keyword arguments:
dir_path -- path of directory
Returns: None
'''
with os.scandir(dir_path) as scan:
for entry in scan:
# print(entry.name)
if (entry.is_file()):
print(entry.name)
# subfolders = [ f.path for f in os.scandir(dir_path) if f.is_dir() ]
# print(subfolders)
# for root, dirs, files in os.walk(dir_path):
# print(root)
# print(dirs)
# print(files)
# F(n) = F(n - 1) + F(n - 2)
# 1, 1, (1 + 1) = 2, (1 + 2) = 3, (2 + 3) = 5, 8, 13, 21, 34, ...
# 1 1 2 3 5 8
# first, second
# first second
# first second
# first second
# ensure that returned param is list
# max > 1
# pros: can get a full list, retrieve value by index
# cons: full list takes more space
def fib1(max: int)-> list:
ans = []
first = 0
second = 1
while(second < max):
ans.append(second)
first, second = second, first + second
return ans
# generator
# pros: does not need much memory
# cons: cannot retrive value by index
def fib2(max: int):
first = 0
second = 1
while(second < max):
yield second
first, second = second, first + second
def min_max(nums: list) -> tuple:
return (min(nums), max(nums))
# bonus (1 point): how to make *args typed,
# example: i want args to be only int(s)
def args_example(*args):
for arg in args:
print(arg)
def kwargs_example(**kwargs):
if kwargs is not None:
for key, value in kwargs.items():
print('Key: {0}, value: {1}'.format(key, value))
# params: screen, keyboard, size, manufacturer
# CLASS is only a description
# object in an instance of a CLASS
class Notebook:
# constructor method
# self = this current object
def __init__(self, screen: str, keyboard: str, size: int, manufacturer: str):
self.screen = screen
self.keyboard = keyboard
self.size = size
self.manufacturer = manufacturer
def isBig(self) -> bool:
return self.size > 14
class Fib:
def __init__(self, maximum):
self.maximum = maximum
# iterator
def __iter__(self):
self.first = 0
self.second = 1
return self
def __next__(self):
ans = self.second
if ans > self.maximum:
raise StopIteration
self.first, self.second = self.second, self.first + self.second
return ans
if __name__ == '__main__':
# list_comprehension()
# result = read()
# print(result)
# write_to_file([8, 9, 5, -4, 100, 57, 72, 87, 44, 81])
# show_files_and_dirs('/home/daulet/dev/kbtu/pp2-spring-2020')
print(fib1(100))
# for fibNumber in fib2(100):
# print(fibNumber)
my_list = [8, 0, -5, 10, 43]
(my_min, my_max) = min_max(my_list)
#print('Min: {0}, Max: {1}'.format(my_min, my_max))
# args_example(9, 0, 3, 'paiouqwer', 0.4)
# kwargs_example(param1 = 'hello', param2 = 1990, param3 = 876)
notebook1 = Notebook('wide', 'full-sized', 14, 'Lenovo')
notebook2 = Notebook('normal', 'mini-sized', 17, 'Apple')
notebook3 = ['wide', 'full-sized', 17, 'IBD'] # no class
print(notebook1)
print(notebook1.manufacturer)
print(notebook2.manufacturer)
print(notebook1.isBig())
fib = Fib(1000)
for fibNum in fib:
print(fibNum)
|
985,811 | c11b4da467fb2f18b374368a3e6b1c8b0af00843 | # coding: utf-8
def copy(sources, targets):
return soft_update(sources, targets, 1.0)
def soft_update(sources, targets, tau):
for source, target in zip(sources, targets):
target.assign(tau * source + (1. - tau) * target)
|
985,812 | 3938fa67dd91e58c442a2ef35eb2c58183a61bf2 | ../3.0.0/_downloads/ginput_demo_sgskip.py |
985,813 | 70c40ff121310c8f6a5f06eac647d93b7fd8d1ca | # This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from django.db import models
class TAddress(models.Model):
address_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=20, blank=True, null=True)
detail_address = models.CharField(max_length=100, blank=True, null=True)
cellphone = models.CharField(max_length=20, blank=True, null=True)
post_code = models.CharField(max_length=6, blank=True, null=True)
telephone = models.CharField(max_length=11, blank=True, null=True)
user = models.ForeignKey('TUser', models.DO_NOTHING, blank=True, null=True)
class Meta:
db_table = 't_address'
class TCategory(models.Model):
category_id = models.IntegerField(primary_key=True)
category = models.CharField(max_length=20, blank=True, null=True)
parent_id = models.IntegerField(blank=True, null=True)
level = models.IntegerField(blank=True, null=True)
class Meta:
db_table = 't_category'
class TBook(models.Model):
book_id = models.IntegerField(primary_key=True)
category = models.ForeignKey('TCategory', models.DO_NOTHING, blank=True, null=True)
book_name = models.CharField(max_length=20, blank=True, null=True)
book_description = models.CharField(max_length=200, blank=True, null=True)
author = models.CharField(max_length=60, blank=True, null=True)
author_information = models.CharField(max_length=200, blank=True, null=True)
dangdang_price = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True)
editor = models.CharField(max_length=80, blank=True, null=True)
press = models.CharField(max_length=40, blank=True, null=True)
publication_time = models.DateTimeField(blank=True, null=True)
book_image = models.CharField(max_length=50, blank=True, null=True)
sales = models.CharField(max_length=20, blank=True, null=True)
inventory = models.CharField(max_length=20, blank=True, null=True)
discount = models.DecimalField(max_digits=3, decimal_places=2, blank=True, null=True)
comment = models.IntegerField(blank=True, null=True)
book_price = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True)
page_number = models.IntegerField(blank=True, null=True)
words = models.IntegerField(blank=True, null=True)
edition = models.IntegerField(blank=True, null=True)
format = models.CharField(max_length=10, blank=True, null=True)
isbn = models.CharField(db_column='ISBN', max_length=30, blank=True, null=True) # Field name made lowercase.
print_time = models.DateTimeField(blank=True, null=True)
impression = models.IntegerField(blank=True, null=True)
sponsor = models.DecimalField(max_digits=10, decimal_places=2, blank=True, null=True)
def calculate_discount(self):
return "%.2f" % (int(self.dangdang_price) / int(self.book_price)*10)
class Meta:
db_table = 't_book'
class TCar(models.Model):
car_id = models.AutoField(primary_key=True)
id = models.ForeignKey('TUser', models.DO_NOTHING, db_column='id', blank=True, null=True)
book = models.ForeignKey(TBook, models.DO_NOTHING, blank=True, null=True)
goods_number = models.CharField(max_length=20, blank=True, null=True)
class Meta:
db_table = 't_car'
class TOrder(models.Model):
id = models.IntegerField(primary_key=True)
order_id = models.IntegerField(blank=True, null=True)
create_time = models.DateTimeField(blank=True, null=True)
price = models.DecimalField(max_digits=14, decimal_places=2, blank=True, null=True)
user = models.ForeignKey('TUser', models.DO_NOTHING, blank=True, null=True)
address = models.ForeignKey(TAddress, models.DO_NOTHING, blank=True, null=True)
class Meta:
db_table = 't_order'
class TOrderItem(models.Model):
order_item_id = models.IntegerField(primary_key=True)
book = models.ForeignKey(TBook, models.DO_NOTHING, blank=True, null=True)
id = models.ForeignKey(TOrder, models.DO_NOTHING, db_column='id', blank=True, null=True)
count = models.IntegerField(blank=True, null=True)
class Meta:
db_table = 't_order_item'
class TUser(models.Model):
user_id = models.AutoField(primary_key=True)
user_name = models.CharField(max_length=20, blank=True, null=True)
password = models.CharField(max_length=20, blank=True, null=True)
email = models.CharField(max_length=20, blank=True, null=True)
cell_number = models.CharField(max_length=12, blank=True, null=True)
class Meta:
db_table = 't_user'
|
985,814 | 3c58e93f2a757ed60640a879c6c2eb9e6caae875 | """
Move the blocks on tower1 to tower3.
"""
def moveDisks(n, origin, destination, buffer):
if n <= 0:
return
moveDisks(n - 1, origin, buffer, destination)
moveTop(origin, destination)
moveDisks(n - 1, buffer, destination, origin) |
985,815 | cd9db6cce95f49deed7baae3b3516ddcdf8906d9 | "Encap Tests"
|
985,816 | 2eb5caf229ebc61c9b4cf8430631d3451eba8fdf | import librosa
import librosa.display as display
import matplotlib.pyplot as plt
import numpy as np
import math
import tensorflow as tf
from os import walk, mkdir
from glob import glob
from tf_record_single import convert_to
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def make_spectrograms_from_mp3s(dirpath):
SAMPLING_RATE = 16000
FFT_SIZE = 1022 #Frequency resolution
HOP_LENGTH = None # int(FFT_SIZE/6.5)
# DURATION = 8.15
DURATION = 2.0375
audios = glob(dirpath + '/*.wav')
for i, audio in enumerate(audios):
print("{}th audio : ".format(i), audio)
y, sr = librosa.core.load(audio, sr=SAMPLING_RATE, mono=True)
length = int(librosa.get_duration(y) / DURATION)
for j, cutpoint in enumerate(range(length)):
print("{}/{}".format(j, length))
block = y[cutpoint * SAMPLING_RATE:int((cutpoint + DURATION) * SAMPLING_RATE)]
D = librosa.stft(block, n_fft=FFT_SIZE)
D = np.abs(D) # since D is a complex number
print(D.shape)
D = np.expand_dims(D, axis=2)
name = audio[:-4] + '{}.spec'.format(j)
convert_to(D, name)
if __name__=='__main__':
make_spectrograms_from_mp3s('datasets/bass-flute_single') |
985,817 | a553a4e0967214e8cd1889b352b72f25381a5f25 | """This submodule handles all interaction with the LALSuite C API.
It provides a class for inputs and a class for outputs and functions for
reading/writing/comparing them. The inputs method has a 'run' method
for calling the C API.
"""
import numpy as np
import glob
import math
import lal_cuda
# Generate mocks for these if we are building for RTD
lal = lal_cuda.import_mock_RTD("lal")
lalsimulation = lal_cuda.import_mock_RTD("lalsimulation")
# Define a set of default model parameters. These are the ones in the
# 'default' fiducial inputs file in the 'data' directory.
chi1_default = 0.1
chi2_default = 0.2
m1_default = 30
m2_default = 30
chip_default = 0.34
thetaJ_default = 1.1
alpha0_default = 1.5
distance_default = 1000
phic_default = np.pi * 0.4
fref_default = 30
flow_default = 20
fhigh_default = 80
def to_string(inputs, outputs):
"""Convert a paired set of inputs/outputs to an ASCII table.
:param inputs: An instance of the inputs class.
:param outputs: An associated instance of the outputs class.
:return: A list of strings.
"""
r_val = '# Column 01: frequency\n'
r_val += '# 02: hp - real\n'
r_val += '# 03: hp - imaginary\n'
r_val += '# 04: hc - real\n'
r_val += '# 05: hc - imaginary\n'
for f_i, hp_i, hc_i in zip(inputs.freqs, outputs.hp, outputs.hc):
r_val += "%8.2f %12.5e %12.5e %12.5e %12.5e\n" % (f_i, hp_i.real, hp_i.imag, hc_i.real, hc_i.imag)
return r_val
def to_binary(inputs, outputs, filename_label=None):
"""Convert a paired set of inputs/outputs to binary files.
:param inputs: An instance of the inputs class.
:param outputs: An associated instance of the outputs class.
:param filename_label: An optional label for the output files.
:return: None
"""
inputs.write(filename_label, filename_label=filename_label)
outputs.write(filename_label, filename_label=filename_label)
def calc_frac_diff(x, y):
"""Calculate the fractional difference between two numbers.
If the reference value is 0 and the value to check is not, returns 1.
:param x: Value to check
:param y: Reference value
:return: Value
"""
if(y != 0):
return math.fabs((x - y) / y)
elif(x != 0.):
return 1.
else:
return 0.
def calc_difference_from_reference(inputs, outputs, verbose=True):
"""Look for a match between the given inputs and the stored reference
inputs. If found return a dictionary with absolute differences from the
given outputs, otherwise print a warning to stdout if verbose=True.
All reference inputs/outputs were computed by running the
PhenomPCore script of this package against a version of LALSuite
compiled from the commit with hash "3494e18e6d".
:param inputs: An instance of the inputs class
:param outputs: An associated instance of the outputs class
:param verbose: Boolean controlling whether logging information is reported
:return: None
"""
# Get a list of reference input/output files
filename_ref_inputs = glob.glob(lal_cuda.full_path_datafile("inputs.dat*"))
filename_ref_outputs = [
filename_ref_input_i.replace(
"inputs.dat",
"outputs.dat") for filename_ref_input_i in filename_ref_inputs]
# Look to see if the given inputs are in the stored reference inputs
filename_ref_output = None
for filename_ref_input_i, filename_ref_output_i in zip(filename_ref_inputs, filename_ref_outputs):
inputs_i = inputs.read(filename_ref_input_i)
# Check to see if this set of inputs matches the set that has been passed
if(inputs_i == inputs):
inputs_ref = inputs_i
filename_ref_output = filename_ref_output_i
break
# Perform check if a match has been found
if(not filename_ref_output):
lal_cuda.log.warning(
"Checking could not be performed: reference data set with given inputs (%s) not found." %
(inputs))
else:
if(verbose):
lal_cuda.log.open('Performing test...')
# Read reference dataset's outputs
outputs_ref = outputs.read(filename_ref_output)
# Compute statistics of difference from test reference
hpval_real_diff_avg = 0.
hpval_imag_diff_avg = 0.
hcval_real_diff_avg = 0.
hcval_imag_diff_avg = 0.
hpval_real_diff_max = 0.
hpval_imag_diff_max = 0.
hcval_real_diff_max = 0.
hcval_imag_diff_max = 0.
for (hp_i, hc_i, hp_ref_i, hc_ref_i) in zip(outputs.hp, outputs.hc, outputs_ref.hp, outputs_ref.hc):
hpval_real_diff_i = calc_frac_diff(hp_i.real, hp_ref_i.real)
hpval_imag_diff_i = calc_frac_diff(hp_i.imag, hp_ref_i.imag)
hcval_real_diff_i = calc_frac_diff(hc_i.real, hc_ref_i.real)
hcval_imag_diff_i = calc_frac_diff(hc_i.imag, hc_ref_i.imag)
hpval_real_diff_avg += hpval_real_diff_i
hpval_imag_diff_avg += hpval_imag_diff_i
hcval_real_diff_avg += hcval_real_diff_i
hcval_imag_diff_avg += hcval_imag_diff_i
hpval_real_diff_max = max([hpval_real_diff_max, hpval_real_diff_i])
hpval_imag_diff_max = max([hpval_imag_diff_max, hpval_imag_diff_i])
hcval_real_diff_max = max([hcval_real_diff_max, hcval_real_diff_i])
hcval_imag_diff_max = max([hcval_imag_diff_max, hcval_imag_diff_i])
hpval_real_diff_avg /= float(len(outputs.hp))
hpval_imag_diff_avg /= float(len(outputs.hp))
hcval_real_diff_avg /= float(len(outputs.hc))
hcval_imag_diff_avg /= float(len(outputs.hc))
# Report results
if(verbose):
lal_cuda.log.comment(' Average/maximum real(hp) fractional difference: %.2e/%.2e' %
(hpval_real_diff_avg, hpval_real_diff_max))
lal_cuda.log.comment(' Average/maximum imag(hp) fractional difference: %.2e/%.2e' %
(hpval_imag_diff_avg, hpval_imag_diff_max))
lal_cuda.log.comment(' Average/maximum real(hc) fractional difference: %.2e/%.2e' %
(hcval_real_diff_avg, hcval_real_diff_max))
lal_cuda.log.comment(' Average/maximum imag(hc) fractional difference: %.2e/%.2e' %
(hcval_imag_diff_avg, hcval_imag_diff_max))
lal_cuda.log.close("Done.")
return {
'hpval_real_diff_avg': hpval_real_diff_avg,
'hpval_real_diff_max': hpval_real_diff_max,
'hpval_imag_diff_avg': hpval_imag_diff_avg,
'hpval_imag_diff_max': hpval_imag_diff_max,
'hcval_real_diff_avg': hcval_real_diff_avg,
'hcval_real_diff_max': hcval_real_diff_max,
'hcval_imag_diff_avg': hcval_imag_diff_avg,
'hcval_imag_diff_max': hcval_imag_diff_max}
class outputs(object):
"""This class manages the output (hp and hc complex arrays) from a LALSUite
model call.
An instance can be created using the default constructor or the
:func:`read` method. An instance can be written using the
:func:`write` method. Equivalence of two instances is defined by
the element-wise equivalence of their hp and hc arrays.
"""
def __init__(self, return_from_SimIMRPhenomPFrequencySequence=None, hp=None, hc=None):
"""Create an instance of the outputs class. Optionally pass complex
arrays hp and hc to initialize from.
:param return_from_SimIMRPhenomPFrequencySequence: The data structure returned from the LALSUite C API.
:param hp: Complex floating point array
:param hc: Complex floating point array
"""
if((isinstance(hp, np.ndarray)) and (isinstance(hc, np.ndarray)) and (isinstance(return_from_SimIMRPhenomPFrequencySequence, type(None)))):
self.hp = hp
self.hc = hc
elif((isinstance(hp, type(None))) and (isinstance(hc, type(None))) and not (isinstance(return_from_SimIMRPhenomPFrequencySequence, type(None)))):
self.hp = return_from_SimIMRPhenomPFrequencySequence[0].data.data
self.hc = return_from_SimIMRPhenomPFrequencySequence[1].data.data
else:
lal_cuda.log.error("Invalid inputs to SimIMRPhenomPFrequencySequence outputs constructor.")
exit(1)
@classmethod
def read(cls, filename_datafile_in):
"""Create an instance of the outputs class from a binary file.
:param filename_datafile_in: Filename to read from.
:return: An instance of the outputs class.
"""
with open(lal_cuda.full_path_datafile(filename_datafile_in), "rb") as outputs_file:
n_freqs = np.asscalar(np.fromfile(outputs_file, dtype=np.int32, count=1))
hp = np.fromfile(outputs_file, dtype=np.complex128, count=n_freqs)
hc = np.fromfile(outputs_file, dtype=np.complex128, count=n_freqs)
return(cls(hp=hp, hc=hc))
def write(self, filename_outputs_out, filename_label=None, verbose=True):
"""Write the instance of the output class to a binary file.
:param filename_outputs_out: Filename to write to.
:param filename_label: Filename modifier.
:param verbose: Boolean flag indicating whether to write activity to the log.
:return: None
"""
# Set filename
if(filename_label):
filename_outputs_out = "outputs.dat." + filename_label
else:
filename_outputs_out = "outputs.dat"
if(verbose):
lal_cuda.log.open("Writing outputs to '%s'..." % (filename_outputs_out), end='')
with open(filename_outputs_out, "wb") as outputs_file:
np.array([len(self.hp)], dtype=np.int32).tofile(outputs_file)
self.hp.tofile(outputs_file)
self.hc.tofile(outputs_file)
if(verbose):
lal_cuda.log.close("Done.")
def __eq__(self, other):
"""Test for equivalence of two sets of outputs."""
return np.array_equal(self.hp, other.hp) and np.array_equal(self.hc, other.hc)
def __ne__(self, other):
"""Overrides the default implementation (unnecessary in Python 3)"""
return not self.__eq__(other)
class inputs(object):
def __init__(
self,
chi1=chi1_default,
chi2=chi2_default,
m1=m1_default,
m2=m2_default,
chip=chip_default,
thetaJ=thetaJ_default,
alpha0=alpha0_default,
distance=distance_default,
phic=phic_default,
fref=fref_default,
mode=1,
freqs=[
flow_default,
fhigh_default,
-1],
freqs_from_range=True,
convert_units=True):
"""Create an instance of the inputs class, for a given set of model
parameters.
:param chi1: See LALSuite documentation for a description of this model parameter.
:param chi2: See LALSuite documentation for a description of this model parameter.
:param m1: See LALSuite documentation for a description of this model parameter.
:param m2: See LALSuite documentation for a description of this model parameter.
:param chip: See LALSuite documentation for a description of this model parameter.
:param thetaJ: See LALSuite documentation for a description of this model parameter.
:param alpha0: See LALSuite documentation for a description of this model parameter.
:param distance: See LALSuite documentation for a description of this model parameter.
:param phic: See LALSuite documentation for a description of this model parameter.
:param fref: See LALSuite documentation for a description of this model parameter.
:param mode: See LALSuite documentation for a description of this model parameter.
:param freqs: Frequency array (either an element-wise array, or a 3-element description of range)
:param freqs_from_range: Set to True if the freqs describes a range, rather than an element-wise array
:param convert_units:
"""
self.chi1 = chi1
self.chi2 = chi2
self.m1 = m1
self.m2 = m2
self.distance = distance
self.thetaJ = thetaJ
self.alpha0 = alpha0
self.chip = chip
self.phic = phic
self.fref = fref
self.mode = mode
# Perform unit conversions, if requested
if(convert_units):
self.m1 = self.m1 * lal.lal.MSUN_SI
self.m2 = self.m2 * lal.lal.MSUN_SI
self.distance = self.distance * lal.lal.PC_SI * 100 * 1e6
# Generate frequency array
if(freqs_from_range):
flow = freqs[0]
fhigh = freqs[1]
n_freqs = freqs[2]
# If n_freqs<1, then assum dfreq=1.
if(n_freqs < 1):
self.freqs = np.linspace(flow, fhigh, (fhigh - flow) + 1)
self.n_freqs = len(self.freqs)
else:
self.n_freqs = n_freqs
self.freqs = np.linspace(flow, fhigh, self.n_freqs)
# If freqs_from_range is false, then assume that freqs specifies a list of frequencies
else:
self.freqs = freqs
self.n_freqs = len(self.freqs)
def np_floats(self):
"""A numpy array of all floating-point inputs.
:return: A numpy array of floats.
"""
# A numpy-array packaging of the floating-point input parameters
return np.array([self.chi1, self.chi2, self.chip, self.thetaJ, self.m1, self.m2,
self.distance, self.alpha0, self.phic, self.fref], dtype=np.float64)
def np_ints(self):
"""A numpy array of all integer inputs.
:return: A numpy array of integers.
"""
# A numpy-array packaging of the integer input parameters
return np.array([self.mode, self.n_freqs], dtype=np.int32)
@classmethod
def read(cls, filename_datafile_in):
"""Create an instance of a inputs method from a binary file.
:param filename_datafile_in: Filename storing inputs.
:return: A object of class inputs
"""
with open(lal_cuda.full_path_datafile(filename_datafile_in), "rb") as inputs_file:
# Read floating-point parameters
inputs_np_floats = np.fromfile(inputs_file, dtype=np.float64, count=len(cls().np_floats()))
chi1 = inputs_np_floats[0]
chi2 = inputs_np_floats[1]
chip = inputs_np_floats[2]
thetaJ = inputs_np_floats[3]
m1 = inputs_np_floats[4]
m2 = inputs_np_floats[5]
distance = inputs_np_floats[6]
alpha0 = inputs_np_floats[7]
phic = inputs_np_floats[8]
fref = inputs_np_floats[9]
# Read integer-type parameters
inputs_np_ints = np.fromfile(inputs_file, dtype=np.int32, count=len(cls().np_ints()))
mode = int(inputs_np_ints[0])
n_freqs = int(inputs_np_ints[1])
# Read frequency array
freqs = np.fromfile(inputs_file, dtype=np.float64, count=n_freqs)
return(cls(chi1=chi1, chi2=chi2, m1=m1, m2=m2, chip=chip, thetaJ=thetaJ, alpha0=alpha0, distance=distance, phic=phic, fref=fref, mode=mode, freqs=freqs, freqs_from_range=False, convert_units=False))
def write(self, filename_inputs_out, filename_label=None, verbose=True):
"""Write an instance of an object of class inputs to a binary file.
:param filename_inputs_out: Filename to write to
:param filename_label: Filename modifier.
:param verbose: Boolean flag indicating whether to write activity to the log.
:return:
"""
# Set filename
if(filename_label):
filename_inputs_out = "inputs.dat." + filename_label
else:
filename_inputs_out = "inputs.dat"
if(verbose):
lal_cuda.log.open("Writing inputs to '%s'..." % (filename_inputs_out), end='')
with open(filename_inputs_out, "wb") as inputs_file:
self.np_floats().tofile(inputs_file)
self.np_ints().tofile(inputs_file)
self.freqs.tofile(inputs_file)
if(verbose):
lal_cuda.log.close("Done.")
def run(self, buf=None, legacy=False):
"""Call the C-compiled model in lalsuite.
If legacy is true, then assume that the compiled version of
lalsuite we are using does not have PhenomP buffer support.
:param buf: A buffer, as generated by the ADACS version of LALSuite
:param legacy: True if using a version of LALSuite not compiled with the ADACS GPU buffer support
:return: An instance of the outputs class
"""
if(legacy):
return(outputs(return_from_SimIMRPhenomPFrequencySequence=lalsimulation.SimIMRPhenomPFrequencySequence(
self.freqs,
self.chi1,
self.chi2,
self.chip,
self.thetaJ,
self.m1,
self.m2,
self.distance,
self.alpha0,
self.phic,
self.fref,
self.mode,
None)))
# ... else, assume that we are working with a version of PhenomP that does have buffer support
else:
return(outputs(return_from_SimIMRPhenomPFrequencySequence=lalsimulation.SimIMRPhenomPFrequencySequence(
self.freqs,
self.chi1,
self.chi2,
self.chip,
self.thetaJ,
self.m1,
self.m2,
self.distance,
self.alpha0,
self.phic,
self.fref,
self.mode,
None,
buf)))
def __str__(self):
"""Return a string representation of the parameter set."""
return "chi1=%e chi2=%e m1=%e m2=%e distance=%e thetaJ=%e alpha0=%e chip=%e phic=%e fref=%e mode=%d freqs=[%e...%e]" % (
self.chi1, self.chi2, self.m1 / lal.lal.MSUN_SI, self.m2 / lal.lal.MSUN_SI, self.distance / (lal.lal.PC_SI * 100 * 1e6), self.thetaJ, self.alpha0, self.chip, self.phic, self.fref, self.mode, self.freqs[0], self.freqs[-1])
def __eq__(self, other):
"""Test for equivalence of two sets of inputs."""
return np.array_equal(
self.np_floats(),
other.np_floats()) and np.array_equal(
self.np_ints(),
other.np_ints()) and np.array_equal(
self.freqs,
other.freqs)
def __ne__(self, other):
"""Overrides the default implementation (unnecessary in Python 3)"""
return not self.__eq__(other)
|
985,818 | 769d337527d0b1c94560c5d2291fcf8130091224 | from graph import Graph
def Prim(G, s):
# Initialize all distances as infinite (represented with -1)
dist = [-1] * len(G)
# Initialize all paths as undefined
parents = [-1] * len(G)
dist[s] = 0
Q = list(range(len(G)))
while len(Q) != 0:
# Getting minimum distance edge in Q
u = -1
min_dist = -1
for v in Q:
if dist[v] == -1:
continue
if u == -1 or dist[v] < min_dist:
u = v
min_dist = dist[v]
for v, w in G.Adj[u]:
if v in Q and ( dist[v] == -1 or w < dist[v] ):
dist[v] = w
parents[v] = u
Q.remove(u)
E = []
for v, p in enumerate(parents):
if p == -1:
continue
E.append( (p, v) )
return E, sum(dist)
if __name__ == "__main__":
G = Graph()
G.addVertex(N=9, names=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i'])
G.addEdge(0, 1, False, 4)
G.addEdge(0, 7, False, 8)
G.addEdge(1, 2, False, 8)
G.addEdge(1, 7, False, 11)
G.addEdge(2, 3, False, 7)
G.addEdge(2, 5, False, 4)
G.addEdge(2, 8, False, 2)
G.addEdge(3, 4, False, 9)
G.addEdge(3, 5, False, 14)
G.addEdge(4, 5, False, 10)
G.addEdge(5, 6, False, 2)
G.addEdge(6, 7, False, 1)
G.addEdge(6, 8, False, 6)
G.addEdge(7, 8, False, 7)
#print(G)
E, weight = Prim(G, 0)
E_named = [(G.Names[u], G.Names[v]) for u, v in E]
print(E_named, weight) |
985,819 | 11cf594d2536d1aae0e4392a259f8ae5f00eb50b | from app import db
from flask_login import UserMixin
from User_login_db import get_db
# Here We arw showing the number of guests can sit in table.
DEFAULT_RESERVATION_LENGTH = 1
MAX_TABLE_CAPACITY = 6
class Guest(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), index=True)
phone_number = db.Column(db.String(64), index=True, unique=True)
def __repr__(self):
return '<Guest %r>' % (self.name)
class Table(db.Model):
id = db.Column(db.Integer, primary_key=True)
capacity = db.Column(db.Integer, index=True)
class Reservation(db.Model):
id = db.Column(db.Integer, primary_key=True)
guest_id = db.Column(db.Integer, db.ForeignKey('guest.id'))
guest = db.relationship('Guest')
table_id = db.Column(db.Integer, db.ForeignKey('table.id'))
table = db.relationship('Table')
num_guests = db.Column(db.Integer, index=True)
reservation_time = db.Column(db.DateTime, index=True)
class ReservationManager(object):
pass
class User(UserMixin):
def __init__(self, id_, name, email, profile_pic):
self.id = id_
self.name = name
self.email = email
self.profile_pic = profile_pic
@staticmethod
def get(user_id):
db = get_db()
user = db.execute(
"SELECT * FROM user WHERE id = ?", (user_id,)
).fetchone()
if not user:
return None
user = User(
id_=user[0], name=user[1], email=user[2], profile_pic=user[3]
)
return user
@staticmethod
def create(id_, name, email, profile_pic):
db = get_db()
db.execute(
"INSERT INTO user (id, name, email, profile_pic)"
" VALUES (?, ?, ?, ?)",
(id_, name, email, profile_pic),
)
db.commit()
|
985,820 | 857f6a2f1702e3cdbd397adc9f1a7bd763f58506 | #!/usr/bin/python
#-*- coding: UTF-8 -*-
from django.contrib import admin
from mysite.mmanapp.models import *
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.admin.widgets import FilteredSelectMultiple
class MManUserAdmin(admin.ModelAdmin):
list_display = ('nick', 'userid', 'reg_time')
list_filter = ('userid',)
class MManPicAdmin(admin.ModelAdmin):
list_display = ('pic_id','user','title',)
list_filter = ('pic_id',)
class MManCommentAdmin(admin.ModelAdmin):
list_display = ('userid','pic','time',)
list_filter = ('userid',)
admin.site.register(MManUser,MManUserAdmin)
admin.site.register(MManPic,MManPicAdmin)
admin.site.register(MManComment,MManCommentAdmin)
|
985,821 | 5fdb581702afa45c6912cf35e872158fa46f5129 |
# occiput
# Stefano Pedemonte
# Harvard University, Martinos Center for Biomedical Imaging
# Jan 2014, Boston, MA, USA
__all__ = ['RigidTransformationSSD']
from ilang.Models import Model
from ilang.Graphs import ProbabilisticGraphicalModel
from ilang.Samplers import Sampler
from occiput.Core import Image3D
from occiput.Visualization import MultipleVolumes
try:
from NiftyPy.NiftyReg import resample_image_rigid
from NiftyPy.NiftyReg import deriv_intensity_wrt_space_rigid
from NiftyPy.NiftyReg import deriv_intensity_wrt_transformation_rigid
from NiftyPy.NiftyReg import deriv_ssd_wrt_transformation_rigid
from NiftyPy.NiftyReg import gaussian_smoothing
except:
has_NiftyPy = False
print "Please install NiftyPy"
else:
has_NiftyPy = True
import numpy
class ModelRigidSSD(Model):
variables = {'source':'continuous','target':'continuous','transformation':'continuous'}
dependencies = [['source','target','directed'],['transformation','target','directed']]
preferred_samplers = {'transformation':['QuasiNewton_L_BFGS_B']}
# init
def init(self):
pass
# expose to sampler:
def log_conditional_probability_transformation(self,T):
source = self.get_value('source')
target = self.get_value('target')
#return -.5*numpy.dot(numpy.dot((x-mu),hessian),(x-mu).T)
FIXME
return 0
def log_conditional_probability_gradient_transformation(self,T):
source = self.get_value('source')
target = self.get_value('target')
#return -.5*numpy.dot((x-mu),hessian+hessian.T)
# FIXME
return numpy.random.rand(6)*4/30000
class RigidTransformationSSD():
def __init__(self,target,source):
self.target = target
self.source = source
self._resampled_source_cache = None
self._resampled_source_need_update = True
self._contruct_ilang_model()
self._set_transformation(numpy.zeros((1,6)))
def _contruct_ilang_model(self):
self.ilang_model = ModelRigidSSD('RigidSSD')
self.graph = ProbabilisticGraphicalModel(['target','source','transformation'])
self.graph.set_nodes_given(['target','source'],True)
self.graph.set_node_value('source',self.source.get_data())
self.graph.set_node_value('target',self.target.get_data())
self.graph.set_node_value('transformation',numpy.zeros((1,6)))
self.graph.add_dependence(self.ilang_model,{'target':'target','source':'source','transformation':'transformation'})
self.sampler = Sampler(self.graph)
def _set_transformation(self,transformation):
self.graph.set_node_value('transformation',transformation)
self._resampled_source_need_update = True
def _get_transformation(self):
return self.graph.get_node_value('transformation')
def estimate_transformation(self,method=None,iterations=30000,trace=True,parameters=None, display_progress=True):
if method!=None:
self.sampler.set_node_sampling_method_manual('transformation',method)
else:
self.sampler.set_node_sampling_method_auto('transformation')
last_sample = self.sampler.sample_node('transformation',nsamples=iterations,trace=trace)
# This is not necessary, but it triggers the cache of the resampled source image:
self._set_transformation(last_sample)
return last_sample
def resample_in_target_space(self,image):
# FIXME
#print "Resampling .."
transformation = self._get_transformation()
return image
def _get_resampled_source(self):
#FIXME: remove the next few lines
if self.transformation.sum() != 0:
return self.target
if self._resampled_source_need_update:
resampled_source = self.resample_in_target_space(self.source)
self._resampled_source_cache = resampled_source
self._resampled_source_need_update = False
else:
resampled_source = self._resampled_source_cache
return resampled_source
def display_in_browser(self,axis=0,shrink=256,rotate=90,subsample_slices=4,scale_factors=None):
self.display(axis,shrink,rotate,scale_factors,open_browser=True)
def display(self,axis=0,shrink=256,rotate=90,subsample_slices=4,scale_factors=None,open_browser=False):
resampled = self.resampled_source
D = MultipleVolumes([resampled,self.target], axis, shrink, rotate, subsample_slices, scale_factors, open_browser)
return D.display()
def _repr_html_(self):
return self.display()._repr_html_()
transformation = property(_get_transformation, _set_transformation, None)
resampled_source = property(_get_resampled_source, None, None)
|
985,822 | 754ebee0b6fab79b3b6e5c2c42d0a3530f3792c1 | {
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "habitats.py",
"provenance": [],
"collapsed_sections": [],
"authorship_tag": "ABX9TyM9sWj41L/DcsxJJ5ZOD6oO",
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/github/witalomonteiro/zookeeper/blob/main/habitats_py.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"metadata": {
"id": "6YzsvZb-EnJw",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "93a3922a-f2f8-465a-800c-6b70025397b6"
},
"source": [
"camel = r\"\"\"\n",
"Switching on the camera in the camel habitat...\n",
" ___.-''''-.\n",
"/___ @ |\n",
"',,,,. | _.'''''''._\n",
" ' | / \\\n",
" | \\ _.-' \\\n",
" | '.-' '-.\n",
" | ',\n",
" | '',\n",
" ',,-, ':;\n",
" ',,| ;,, ,' ;;\n",
" ! ; !'',,,',',,,,'! ; ;:\n",
" : ; ! ! ! ! ; ; :;\n",
" ; ; ! ! ! ! ; ; ;,\n",
" ; ; ! ! ! ! ; ;\n",
" ; ; ! ! ! ! ; ;\n",
" ;,, !,! !,! ;,;\n",
" /_I L_I L_I /_I\n",
"Look at that! Our little camel is sunbathing!\"\"\"\n",
"\n",
"lion = r\"\"\"\n",
"Switching on the camera in the lion habitat...\n",
" ,w.\n",
" ,YWMMw ,M ,\n",
" _.---.._ __..---._.'MMMMMw,wMWmW,\n",
" _.-\"\" ''' YP\"WMMMMMMMMMb,\n",
" .-' __.' .' MMMMW^WMMMM;\n",
" _, .'.-'\"; `, /` .--\"\" :MMM[==MWMW^;\n",
" ,mM^\" ,-'.' / ; ; / , MMMMb_wMW\" @\\\n",
",MM:. .'.-' .' ; `\\ ; `, MMMMMMMW `\"=./`-,\n",
"WMMm__,-'.' / _.\\ F'''-+,, ;_,_.dMMMMMMMM[,_ / `=_}\n",
"\"^MP__.-' ,-' _.--\"\" `-, ; \\ ; ;MMMMMMMMMMW^``; __|\n",
" / .' ; ; ) )`{ \\ `\"^W^`, \\ :\n",
" / .' / ( .' / Ww._ `. `\"\n",
" / Y, `, `-,=,_{ ; MMMP`\"\"-, `-._.-,\n",
" (--, ) `,_ / `) \\/\"\") ^\" `-, -;\"\\:\n",
"The lion is roaring!\"\"\"\n",
"\n",
"deer = r\"\"\"\n",
"Switching on the camera in the deer habitat...\n",
" /| |\\\n",
"`__\\\\ //__'\n",
" || ||\n",
" \\__`\\ |'__/\n",
" `_\\\\ //_'\n",
" _.,:---;,._\n",
" \\_: :_/\n",
" |@. .@|\n",
" | |\n",
" ,\\.-./ \\\n",
" ;;`-' `---__________-----.-.\n",
" ;;; \\_\\\n",
" ';;; |\n",
" ; | ;\n",
" \\ \\ \\ | /\n",
" \\_, \\ / \\ |\\\n",
" |';| |,,,,,,,,/ \\ \\ \\_\n",
" | | | \\ / |\n",
" \\ \\ | | / \\ |\n",
" | || | | | | |\n",
" | || | | | | |\n",
" | || | | | | |\n",
" |_||_| |_| |_|\n",
" /_//_/ /_/ /_/\n",
"Our 'Bambi' looks hungry. Let's go to feed it!\"\"\"\n",
"\n",
"goose = r\"\"\"\n",
"Switching on the camera in the goose habitat...\n",
"\n",
" _\n",
" ,-\"\" \"\".\n",
" ,' ____ `.\n",
" ,' ,' `. `._\n",
" (`. _..--.._ ,' ,' \\ \\\n",
" (`-.\\ .-\"\" \"\"' / ( d _b\n",
" (`._ `-\"\" ,._ ( `-( \\\n",
" <_ ` ( <`< \\ `-._\\\n",
" <`- (__< < :\n",
" (__ (_<_< ;\n",
" `------------------------------------------\n",
"The goose is staring intently at you... Maybe it's time to change the channel?\"\"\"\n",
"\n",
"bat = r\"\"\"\n",
"Switching on the camera in the bat habitat...\n",
"_________________ _________________\n",
" ~-. \\ |\\___/| / .-~\n",
" ~-. \\ / o o \\ / .-~\n",
" > \\\\ W // <\n",
" / /~---~\\ \\\n",
" /_ | | _\\\n",
" ~-. | | .-~\n",
" ; \\ / i\n",
" /___ /\\ /\\ ___\\\n",
" ~-. / \\_/ \\ .-~\n",
" V V\n",
"This bat looks like it's doing fine.\"\"\"\n",
"\n",
"rabbit = r\"\"\"\n",
"Switching on the camera in the rabbit habitat...\n",
" ,\n",
" /| __\n",
" / | ,-~ /\n",
" Y :| // /\n",
" | jj /( .^\n",
" >-\"~\"-v\"\n",
" / Y\n",
" jo o |\n",
" ( ~T~ j\n",
" >._-' _./\n",
" / \"~\" |\n",
" Y _, |\n",
" /| ;-\"~ _ l\n",
"/ l/ ,-\"~ \\\n",
"\\//\\/ .- \\\n",
" Y / Y\n",
" l I !\n",
" ]\\ _\\ /\"\\\n",
"(\" ~----( ~ Y. )\n",
"It looks like we will soon have more rabbits!\"\"\"\n",
"\n",
"\n",
"habitats = [camel, lion, deer, goose, bat, rabbit]\n",
"\n",
"x = 0\n",
"while True:\n",
" x = input(\"Please enter the number of the habitat you would like to view: \")\n",
" if x != 'exit':\n",
" print(f\"{habitats[int(x)]}\")\n",
" else:\n",
" print(\"See you later!\")\n",
" break"
],
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"text": [
"Please enter the number of the habitat you would like to view: 0\n",
"\n",
"Switching on the camera in the camel habitat...\n",
" ___.-''''-.\n",
"/___ @ |\n",
"',,,,. | _.'''''''._\n",
" ' | / \\\n",
" | \\ _.-' \\\n",
" | '.-' '-.\n",
" | ',\n",
" | '',\n",
" ',,-, ':;\n",
" ',,| ;,, ,' ;;\n",
" ! ; !'',,,',',,,,'! ; ;:\n",
" : ; ! ! ! ! ; ; :;\n",
" ; ; ! ! ! ! ; ; ;,\n",
" ; ; ! ! ! ! ; ;\n",
" ; ; ! ! ! ! ; ;\n",
" ;,, !,! !,! ;,;\n",
" /_I L_I L_I /_I\n",
"Look at that! Our little camel is sunbathing!\n",
"Please enter the number of the habitat you would like to view: exit\n",
"See you later!\n"
],
"name": "stdout"
}
]
}
]
}
|
985,823 | 10d7debadc2534d515da4804c872c3c2aca5a503 | #!/usr/bin/python
## COraTt703.py
#
# Copyright 2010 Joxean Koret <joxeankoret@yahoo.es>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import sys
import socket
from lib.libexploit import CIngumaModule
"""
Oracle TimesTen 7.02 Remote DOS #3 for Inguma
Tested against Oracle TimesTen 7.02 on GNU/Linux.
"""
name = "oratt70dos3"
brief_description = "Oracle Times Ten 70 infinite loop #3"
type = "dos"
affects = ["Oracle TimesTen 7.02 and 7.03"]
description = """
Oracle TimesTen 7.0X is vulnerable to multiple remote denial of service conditions.
That one exploits a vulnerability found only in Linux versions.
"""
patch = "Fixed in version 7.0.5"
category = "dos"
discoverer = "Joxean Koret"
author = "Joxean Koret <joxeankoret@yahoo.es>"
class COraTt703(CIngumaModule):
target = "" # Main target
port = 17000
waitTime = 0
timeout = 1
exploitType = 1
services = {}
results = {}
dict = None
interactive = True
def run(self):
return True
def printSummary(self):
""" If the method run of the module returns True printSummary will called after """
pass
|
985,824 | 21983675a9ba198a13ebc7e9cae25b33b080745b | lim = 1000
triangles = {}
m = 2
while m**2 < lim:
for n in range(1, m):
k = 1
perimeter = 2 * k * (m ** 2) + 2 * k * m * n
while perimeter <= lim:
if perimeter not in triangles:
triangles[perimeter] = []
triangle = sorted([k*(m**2 - n**2), k*(m**2 + n**2), 2*k*m*n])
if triangle not in triangles[perimeter]:
triangles[perimeter].append(triangle)
k += 1
perimeter = 2 * k * (m ** 2) + 2 * k * m * n
m += 1
triangle_count = {perimeter: len(triangle_list) for perimeter, triangle_list in triangles.items()}
max_value_key = max(triangle_count, key=triangle_count.get)
print(max_value_key)
|
985,825 | 09b6a48876c58fced1a91b99048197d8bcc7054b | from flask import Flask, render_template, request, url_for
from flask_pymongo import PyMongo
from flask_wtf import FlaskForm
from wtforms import StringField, DecimalField, SelectField, DateField
app = Flask(__name__)
app.config["SECRET_KEY"] = "dnAD67vf68PcDArJ"
app.config[
"MONGO_URI"] = "mongodb+srv://lalbe019:0dVfX6Foolm6lhX2@cluster0.pfmkn.mongodb.net/<dbname>?retryWrites=true&w=majority"
mongo = PyMongo(app)
class Expenses(FlaskForm):
description = StringField("Description")
category = SelectField("Category", choices=[
("rent", "Rent"),
("electricity", "Electricity"),
("phone", "Phone"),
("groceries", "Groceries"),
("entertainment", "Entertainment"),
("restaurants", "Restaurants"),
("gas", "Gas")])
cost = DecimalField("Cost")
date = DateField("Date", format='%m-%d-%Y')
def get_total_expenses(category):
my_expenses = mongo.db.expenses.find({"category": category})
total_cost = 0
for i in my_expenses:
total_cost += float(i["cost"])
return total_cost
@app.route('/')
def index():
my_expenses = mongo.db.expenses.find()
total_cost = 0
for i in my_expenses:
total_cost += float(i["cost"])
expensesByCategory = [
("rent", get_total_expenses("rent")),
("electricity", get_total_expenses("electricity")),
("phone", get_total_expenses("phone")),
("groceries", get_total_expenses("groceries")),
("entertainment", get_total_expenses("entertainment")),
("restaurants", get_total_expenses("restaurants")),
("gas", get_total_expenses("gas"))
]
return render_template("index.html", expenses=total_cost, expensesByCategory=expensesByCategory)
@app.route('/addExpenses', methods=["GET", "POST"])
def addExpenses():
expensesForm = Expenses(request.form)
if request.method == "POST":
mongo.db.expenses.insert_one(
{"description": request.form["description"],
"category": request.form["category"],
"cost": float(request.form["cost"]),
"date": request.form["date"]}
)
return render_template("expenseAdded.html")
return render_template("addExpenses.html", form=expensesForm)
app.run()
|
985,826 | 2f91320a35a457592b207a545f09210fc3a039de | number = float(input())
count = int(input())
print(f'{number:.{count}f}')
|
985,827 | 9470a3fc3afbf2c43b373950690b80566d2c24e3 | # Generated by Django 2.0.2 on 2018-03-06 23:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20180306_1014'),
]
operations = [
migrations.AlterModelOptions(
name='emailverifyrecord',
options={'verbose_name': '邮箱验证码', 'verbose_name_plural': '邮箱验证码'},
),
]
|
985,828 | aad25f9ff7b62341750fbca7705e2ea60cfbe87e | from cr_scene.models import CrEventScene, CrScene
def get_scene_by_id(scene_id, web=True):
if scene_id is None:
return None
if web:
cr_event = CrEventScene.objects.filter(cr_scene_instance=scene_id).first()
if cr_event is None:
cr_event = CrEventScene.objects.filter(id=scene_id).first()
if cr_event:
return CrScene.objects.filter(id=cr_event.cr_scene_id).first()
else:
return None
else:
cr_scene = CrScene.objects.filter(scene_id=scene_id).first()
if cr_scene is None:
cr_scene = CrScene.objects.filter(id=scene_id).first()
return cr_scene
def get_cr_scene_by_id(cr_scene_id, cr_event_id):
if cr_event_id:
cr_event_scene = CrEventScene.objects.filter(id=cr_event_id).first()
if cr_event_scene:
return cr_event_scene.cr_scene
return None
return CrScene.objects.filter(id=cr_scene_id).first()
|
985,829 | 8b9381d1b8b276704836d71617e8f46e8551b35a | from gladier import GladierBaseTool, generate_flow_definition
def https_download_file(**data):
"""Download a file from HTTPS server"""
import os
import requests
##minimal data inputs payload
server_url = data.get('server_url', '')
file_name = data.get('file_name', '')
file_path = data.get('file_path', '')
headers = data.get('headers', '')
##extra data inputs payload
##
##
if server_url==None:
raise(NameError('No `server URL` specified'))
if file_name==None:
raise(NameError('No `file_name` specified'))
file_url = os.path.join(server_url,file_name)
if not os.path.exists(file_path):
os.mkdir(file_path)
full_name = os.path.join(file_path,file_name)
if not os.path.isfile(full_name):
r = requests.get(file_url, headers=headers)
if not r.status_code==200:
raise r.raise_for_status()
open(full_name , 'wb').write(r.content)
return full_name
@generate_flow_definition
class HttpsDownloadFile(GladierBaseTool):
compute_functions = [https_download_file]
flow_input = {
'headers':'',
}
required_input = [
'server_url',
'file_name',
'file_path',
'headers',
'compute_endpoint'
]
|
985,830 | ebfe6e213237f6057b437538308905265fdd2ac2 | from urllib.request import urlretrieve
import os
def create_directory(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
def downloader(urlarray):
foldername = urlarray[0].split('/')[-1]
urlarray.pop(0)
create_directory(foldername)
download_directory = foldername+"/"
count = 0
for link in urlarray:
filename = download_directory+str(count)+".jpg"
urlretrieve(link,filename)
print("{} is successfully downloaded".format(link))
count+=1 |
985,831 | 8c0a7ee4bc3f8bff16ea3ff3002a27e58de95f3c | # Task:
# In the language of your choice, please write a function that takes in a list of unique people and returns a list of the people sorted.
# People have a name, age, and social security number. Their social security number is guaranteed to be unique.
# The people should be sorted by name (alphabetically) and age (oldest to youngest).
# When people have the same name and age, they should be sorted in reverse order of how they are in the original list.
# (When we say “list” you can interpret it as array, list, collection, etc.)
import json
import collections
import argparse
import sys
import pprint
from classes.quick_sort import QuickSort
TEST_FILENAME = 'test_people.json'
def sort_people_from_file():
# option to select file from command line
parser = argparse.ArgumentParser()
parser.add_argument('--filename', help="Select a file of people for sorting")
args = parser.parse_args()
filename = args.filename or TEST_FILENAME
# read file, sort it and print the sorted data
with open(filename) as json_file:
people_json = json.load(json_file)
sorted_people = sort_people(people_json)
print("Sorted people in %s:" % filename)
pprint.pprint(sorted_people)
# standardizes the data in preparation for sorting
def transform_to_sort_dimensions(raw_people):
#desired shape of data
# {name: {
# age: [ ssn, ssn ],
# age: [ ssn, ssn ]
# }
# }
formatted = {}
for person in raw_people:
if person['name'] not in formatted.keys():
formatted[person['name']] = { person['age']: [person['ssn']] }
elif person['age'] not in formatted[person['name']].keys():
formatted[person['name']][person['age']] = [ person['ssn'] ]
else:
formatted[person['name']][person['age']].append(person['ssn'])
return formatted
# this returns the results to the i/o format after sorting
def format_sort_results(sorted_people):
#input/output format
# [
# {
# "ssn": "123-45-6789"
# "name": "test",
# "age": 100,
# },
# {
# "ssn": "111-22-3333"
# "name": "test",
# "age": 100,
# }
# ]
# name dimension
formatted_people = []
for name, name_dim in sorted_people.items():
for age, ssn_arr in name_dim.items():
for ssn in (ssn_arr if isinstance(ssn_arr, list) else [ssn_arr]):
formatted_people.append({"ssn": ssn, "name": name, "age": age})
return formatted_people
# sorting helper function to keep primary script clean
def quick_sort_list(unsorted_list, sort_function = None):
sorter = QuickSort()
if sort_function:
sorter.sort_function = sort_function
return sorter.sort(unsorted_list, 0, len(unsorted_list) - 1)
# sorting implementation
def sort_people(raw_people):
#ensure list is not empty
if not raw_people:
return []
# create list of people indexed by name, then indexed by age (see function for format)
unsorted_people = transform_to_sort_dimensions(raw_people)
# sort by name, sorts asc by default
sorted_names_list = quick_sort_list(list(set(unsorted_people.keys())))
# ensure the final sort order is maintained
sorted_people = collections.OrderedDict()
for name in sorted_names_list:
# get the sorted ages in desc order within a given name
sorted_ages_list = quick_sort_list(list(set(unsorted_people[name].keys())), lambda a,b: a > b)
sorted_people[name] = {}
for age in sorted_ages_list:
# storing everything in sorted order by name and age
sorted_people[name][age] = unsorted_people[name][age]
# reversing original order of SSNs given
sorted_people[name][age].reverse()
# returning the results to their original format (totally optional)
return format_sort_results(sorted_people)
# for readability to put primary code at beginning of file
if __name__ == '__main__':
sort_people_from_file()
|
985,832 | 29033d7836ecd7661454953980e1870469de9784 | def rev1(str1):
lis = str1.split(" ")
lis.reverse()
print(lis)
a=" "
s2=a.join(lis)
print(s2)
def rev(str):
lis=str.split(" ")
lis2=" "
for i in lis:
lis2+=i[::-1]
lis2+=" "
print(lis2)
str1=input('enter string')
rev1(str1)
rev(str1)
|
985,833 | 8b6374b98acdf3672d25630ea9fd500971ff0903 | # -*- coding: utf-8 -*-
__author__ = 'Jackie'
class Role(object):
def __init__(self, max_health, atk, name=''):
self.__max_health = max_health
self.__cur_health = max_health
self.__atk = atk
self.__is_alive = False
self.__name = name
def get_name(self):
return self.__name
def is_alive(self):
return self.__cur_health > 0
def get_atk(self):
return self.__atk
def get_health(self):
return self.__cur_health
def add_health(self, v):
if self.__cur_health + v < self.__max_health:
self.__cur_health += v
else:
self.__cur_health = self.__max_health # 回满血
def rem_health(self, v):
self.__cur_health -= v
if self.__cur_health < 0:
self.__cur_health = 0
self.__is_alive = False
def __str__(self):
return "Max: %d, Current: %d, Attack: %d" % (self.__max_health, self.__cur_health, self.__atk)
def __repr__(self):
return "Max: %d, Current: %d, Attack: %d" % (self.__max_health, self.__cur_health, self.__atk)
|
985,834 | 4d8ee70476d58656892082ac6207e6572e2096e0 | # Generated by Django 2.2.6 on 2021-04-07 10:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0018_auto_20210406_1716'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(default='ваш текст', help_text='Напишите, что вы думаете по этой теме.', verbose_name='комментарий')),
('created', models.DateTimeField(auto_now_add=True, help_text='дата и время публикации комментария на сайте.', verbose_name='дата и время публикации')),
('author', models.ForeignKey(help_text='Пользователь оставивший комментарий.', on_delete=django.db.models.deletion.CASCADE, related_name='author_comment', to=settings.AUTH_USER_MODEL, verbose_name='автор')),
('post', models.ForeignKey(help_text='Сообщение.', on_delete=django.db.models.deletion.CASCADE, related_name='comment_post', to='posts.Post', verbose_name='пост')),
],
),
]
|
985,835 | 1efd36c4732ea37b1f7b9aa5dc85768752680108 | from datetime import date
from django.db import models
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from wagtail.core.models import Page, Orderable
from wagtail.core.fields import RichTextField
from wagtail.admin.edit_handlers import FieldPanel, MultiFieldPanel, InlinePanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.images.models import Image
from wagtail.search import index
from common.models import CarouselItem, RelatedLink, LinkFields
from modelcluster.fields import ParentalKey
from modelcluster.tags import ClusterTaggableManager
from taggit.models import Tag, TaggedItemBase
# Individual Blog Page
class BlogPageCarouselItem(Orderable, CarouselItem):
page = ParentalKey(
"blog.BlogPage", on_delete=models.CASCADE, related_name="carousel_items"
)
class BlogPageRelatedLink(Orderable, RelatedLink):
page = ParentalKey(
"blog.BlogPage", on_delete=models.CASCADE, related_name="related_links"
)
class BlogPageTag(TaggedItemBase):
content_object = ParentalKey(
"blog.BlogPage", on_delete=models.CASCADE, related_name="tagged_items"
)
class BlogPage(Page):
body = RichTextField()
tags = ClusterTaggableManager(through=BlogPageTag, blank=True)
date = models.DateField("Post date")
parent_page_types = ["blog.BlogIndexPage"]
feed_image = models.ForeignKey(
"wagtailimages.Image",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
)
search_fields = Page.search_fields + [index.SearchField("body")]
@property
def blog_index(self):
# Find closest ancestor which is a blog index
return self.get_ancestors().type(BlogIndexPage).last()
BlogPage.content_panels = [
FieldPanel("title", classname="full title"),
FieldPanel("date"),
FieldPanel("body", classname="full"),
InlinePanel("carousel_items", label="Carousel items"),
]
BlogPage.promote_panels = Page.promote_panels + [
ImageChooserPanel("feed_image"),
FieldPanel("tags"),
]
# Blog index page
class BlogIndexPageRelatedLink(Orderable, RelatedLink):
page = ParentalKey(
"blog.BlogIndexPage", on_delete=models.CASCADE, related_name="related_links"
)
class BlogIndexPage(Page):
intro = RichTextField(blank=True)
subpage_types = ["blog.BlogPage"]
search_fields = Page.search_fields + [index.SearchField("intro")]
@property
def blogs(self):
# Get list of live blog pages that are descendants of this page
blogs = BlogPage.objects.live().descendant_of(self)
# Order by most recent date first
blogs = blogs.order_by("-date")
return blogs
def get_context(self, request):
# Get blogs
blogs = self.blogs
# Filter by tag
tag = request.GET.get("tag")
if tag:
blogs = blogs.filter(tags__name=tag)
# Pagination
page = request.GET.get("page")
paginator = Paginator(blogs, 10) # Show 10 blogs per page
try:
blogs = paginator.page(page)
except PageNotAnInteger:
blogs = paginator.page(1)
except EmptyPage:
blogs = paginator.page(paginator.num_pages)
# Update template context
context = super(BlogIndexPage, self).get_context(request)
context["blogs"] = blogs
return context
BlogIndexPage.content_panels = [
FieldPanel("title", classname="full title"),
FieldPanel("intro", classname="full"),
]
BlogIndexPage.promote_panels = Page.promote_panels
# Related links
class RelatedLink(LinkFields):
title = models.CharField(max_length=255, help_text="Link title")
panels = [FieldPanel("title"), MultiFieldPanel(LinkFields.panels, "Link")]
class Meta:
abstract = True
|
985,836 | 2c39004997bb62931acaea1641a20a4f9829a40d | # No shebang
"""
Collection of utilities for processing CCSM/POP model output.
Created by Ivan Lima on Wed Aug 25 10:01:03 EDT 2004
"""
import numpy as N
import numpy.ma as MA
import os, Nio
import numpy.linalg as LA
POPDIAGPY2 = os.environ['POPDIAG']
if POPDIAGPY2 == 'TRUE':
mappdir = os.environ['ECODATADIR']+'/mapping'
#else:
# datadir = '/fiji/home/ivan/data'
# days per month
dpm = N.array([31.,28.,31.,30.,31.,30.,31.,31.,30.,31.,30.,31.])
month_name = {
1 : 'January' , 2 : 'February' , 3 : 'March' ,
4 : 'April' , 5 : 'May' , 6 : 'June' ,
7 : 'July' , 8 : 'August' , 9 : 'September' ,
10 : 'October' , 11 : 'November' , 12 : 'December'
}
# seconds per day
spd = 60. * 60. * 24.
deg2rad = N.pi/180.
# model region indices
region_ind = {
1 : 'Southern Ocean' ,
2 : 'Pacific Ocean' ,
3 : 'Indian Ocean' ,
-4 : 'Persian Gulf' ,
-5 : 'Red Sea' ,
6 : 'Atlantic Ocean' ,
-7 : 'Mediterranean Sea' ,
8 : 'Labrador Sea' ,
9 : 'GIN Sea' ,
10 : 'Arctic Ocean' ,
11 : 'Hudson Bay' ,
-12 : 'Baltic Sea' ,
-13 : 'Black Sea' ,
-14 : 'Caspian Sea'
}
# model region indices for new/alternative region mask
region_ind_new = {
1 : 'Pacific Ocean' ,
2 : 'Indian Ocean' ,
3 : 'Atlantic Ocean' ,
}
def read_BEC_region_mask(grid):
"""
Read BEC sub-regions mask for given grid (gx3v5 gx1v6).
Returns:
region_mask : mask
nreg : number of regions
region_lname : region long name
region_sname : region short name
"""
region_mask_file = '/home/ivan/Python/data/BEC_REGION_MASK_%s.nc'%grid
fpreg = Nio.open_file(region_mask_file, 'r')
nreg = fpreg.dimensions['nreg']
region_mask = fpreg.variables['REGION_MASK'][:]
# get region long names
region_lname = [''.join(fpreg.variables['REGION_lname'][n,:])
for n in range(nreg)]
# get region short names
region_sname = [''.join(fpreg.variables['REGION_sname'][n,:])
for n in range(nreg)]
fpreg.close()
return region_mask, nreg, region_lname, region_sname
def read_BEC_region_mask_popdiag(grid):
"""
Read BEC sub-regions mask for given grid (gx3v5 gx1v6).
Returns:
region_mask : mask
nreg : number of regions
region_lname : region long name
region_sname : region short name
"""
# region_mask_file = '/CESM/bgcwg/obgc_diag/mapping/model_grid/BEC_REGION_MASK_%s.nc'%grid
region_mask_file = '/glade/p/cesm/bgcwg/obgc_diag/mapping/model_grid/BEC_REGION_MASK_%s.nc'%grid
fpreg = Nio.open_file(region_mask_file, 'r')
nreg = fpreg.dimensions['nreg']
region_mask = fpreg.variables['REGION_MASK'][:]
# get region long names
region_lname = [''.join(fpreg.variables['REGION_lname'][n,:])
for n in range(nreg)]
# get region short names
region_sname = [''.join(fpreg.variables['REGION_sname'][n,:])
for n in range(nreg)]
fpreg.close()
return region_mask, nreg, region_lname, region_sname
def read_new_region_mask():
"""
Read alternative CCSM/POP region mask with only the Pacific, Indian and
Atlantic oceans.
Returns:
region_mask : mask
nreg : number of regions
region_lname : region long name
region_sname : region short name
"""
nreg = len(region_ind_new)
# region_mask_file = '/home/ivan/Python/new_REGION_MASK_gx3v5.nc'
region_mask_file = '/glade/home/emunoz/Python/mapping/model_grid/new_REGION_MASK_gx3v5.nc'
fpreg = Nio.open_file(region_mask_file, 'r')
region_mask = fpreg.variables['REGION_MASK'][:]
fpreg.close()
region_lname = (N.take(region_ind_new.values(),
N.argsort(N.abs(region_ind_new.keys()))).tolist())
region_sname = [region[:3].lower() for region in region_lname]
return region_mask, nreg, region_lname, region_sname
def read_region_mask(grid):
"""
Read standard CCSM/POP region mask.
Returns:
region_mask : mask
nreg : number of regions
region_lname : region long name
region_sname : region short name
"""
nreg = len(region_ind)
region_mask_file = '/home/ivan/Python/data/%s.nc'%grid
fpreg = Nio.open_file(region_mask_file, 'r')
region_mask = fpreg.variables['REGION_MASK'][:]
fpreg.close()
region_lname = (N.take(region_ind.values(),
N.argsort(N.abs(region_ind.keys()))).tolist())
region_sname = [region[:3].lower() for region in region_lname]
return region_mask, nreg, region_lname, region_sname
def read_region_mask_popdiag(grid):
"""
Read standard CCSM/POP region mask.
Returns:
region_mask : mask
nreg : number of regions
region_lname : region long name
region_sname : region short name
"""
nreg = len(region_ind)
# region_mask_file = '/CESM/bgcwg/obgc_diag/mapping/model_grid/%s.nc'%grid
region_mask_file = '/glade/p/cesm/bgcwg/obgc_diag/mapping/model_grid/%s.nc'%grid
fpreg = Nio.open_file(region_mask_file, 'r')
region_mask = fpreg.variables['REGION_MASK'][:]
fpreg.close()
region_lname = (N.take(region_ind.values(),
N.argsort(N.abs(region_ind.keys()))).tolist())
region_sname = [region[:3].lower() for region in region_lname]
return region_mask, nreg, region_lname, region_sname
def gc_dist(ref_lon, ref_lat, tlon, tlat):
"""
Compute great circle distance in degrees between the
point at ref_lon, ref_lat and point(s) at tlon, tlat.
ref_lon, ref_lat are scalars and tlon, tlat can be scalars or arrays
"""
dlon = (tlon - ref_lon) * deg2rad
dlat = (tlat - ref_lat) * deg2rad
a = ((N.sin(dlat/2))**2 + N.cos(ref_lat*deg2rad) *
N.cos(tlat*deg2rad) * (N.sin(dlon/2))**2)
a[a>1] = 1. # avoid roundoff errors
dist = 2. * N.arcsin(N.sqrt(a)) / deg2rad
return dist
def find_closest_pt(ref_lon, ref_lat, tlon, tlat):
"""
Find the [i,j] indices of the closest grid point to a
given location.
Input:
ref_lon = longitude of location
ref_lat = latitude of location
tlon = model longitude grid (numpy array)
tlat = model latitude grid (numpy array)
Output:
ii = i index
jj = j index
"""
# compute great circle distance from location to model grid points
dist = gc_dist(ref_lon, ref_lat, tlon, tlat)
# find j index of closest grid point
work = N.take(dist,N.argmin(dist,0),0).diagonal()
jj = N.argsort(work)[0]
# find i index of closest grid point
work = N.take(dist,N.argmin(dist,1),1).diagonal()
ii = N.argsort(work)[0]
return ii, jj
def find_stn_idx(ref_lon, ref_lat, tlon, tlat):
"""
Finds the [i,j] indices of the 4 model grid points around
a given location.
Input:
ref_lon = longitude of location
ref_lat = latitude of location
tlon = model longitude grid (numpy array)
tlat = model latitude grid (numpy array)
Output:
Ilist = list of i indices
Jlist = list of j indices
"""
# compute great circle distance from location to model grid points
dist = gc_dist(ref_lon, ref_lat, tlon, tlat)
# find the indices of the two closest grid points with distinct longitudes
work = N.take(dist,N.argmin(dist,0),0).diagonal()
Jlist = N.argsort(work)[:2]
del work
# find the indices of the two closest grid points with distinct latitudes
work = N.take(dist,N.argmin(dist,1),1).diagonal()
Ilist = N.argsort(work)[:2]
del work
return Ilist, Jlist
def find_stn(ref_lon, ref_lat, tlon, tlat):
"""
Finds the logitude and latitude of the 4 model grid points
around a given location.
Input:
ref_lon = longitude of location
ref_lat = latitude of location
tlon = model longitude grid (numpy array)
tlat = model latitude grid (numpy array)
Output:
lonlist = list of latitudes
latlist = list of longitudes
"""
# find the indices of the 4 model grid points around the location
Ilist, Jlist = find_stn_idx(ref_lon, ref_lat, tlon, tlat)
# get the 4 model grid points longitudes and latitudes
lonlist = []
latlist = []
for i in Ilist:
for j in Jlist:
lonlist.append(tlon[i,j])
latlist.append(tlat[i,j])
# convert Python lists to numpy arrays
lonlist = N.array(lonlist)
latlist = N.array(latlist)
return lonlist, latlist
def extract_loc(ref_lon, ref_lat, tlon, tlat, var):
"""
Extract CCSM/POP model output for a given location (lat, lon).
It finds the 4 model grid points around the location and computes
their weighted average (weights = inverse of the distance). If a
location is next to land, the function returns the weighted
average of the closest grid points that are not on land.
Input:
ref_lon = longitude of position to be extracted (scalar)
ref_lat = latitude of position to be extracted (scalar)
tlon = model longitude grid (numpy array)
tlat = model latitude grid (numpy array)
var = variable to be extracted (Masked 2-D or 3-D array)
Output:
wavg = weighted average (scalar or 1-D array)
"""
if var.ndim == 3: # 3D variable
zmax, imax, jmax = var.shape
threeD = True
elif var.ndim == 2: # 2D variable
imax, jmax = var.shape
threeD = False
else:
print 'extract_loc: check variable dimensions'
return
# find the indices of the 4 model grid points around the location
Ilist, Jlist = find_stn_idx(ref_lon, ref_lat, tlon, tlat)
# compute great circle distance from location to model grid points
dist = gc_dist(ref_lon, ref_lat, tlon, tlat)
dist[dist==0] = 1.e-15 # avoid division by zero
# arrays to store weights and data to be averaged
if threeD: # 3D variable
wghts = MA.zeros((zmax,len(Ilist)*len(Jlist)),float)
data = MA.zeros((zmax,len(Ilist)*len(Jlist)),float)
if MA.isMA(var): # mask weights
dist_m = MA.array(N.resize(dist,var.shape),mask=var.mask)
else:
dist_m = N.array(N.resize(dist,var.shape))
else: # 2D variable
wghts = MA.zeros((len(Ilist)*len(Jlist)),float)
data = MA.zeros((len(Ilist)*len(Jlist)),float)
if MA.isMA(var):
dist_m = MA.array(dist,mask=var.mask) # mask weights
else:
dist_m = N.array(dist)
# get the 4 model grid points and compute weights
n = 0
for i in Ilist:
for j in Jlist:
wghts[...,n] = 1./dist_m[...,i,j]
data[...,n] = var[...,i,j]
n += 1
# compute weighted average
wavg = MA.average(data,axis=-1,weights=wghts)
return wavg
def extract_loc_vec(ref_lon, ref_lat, tlon, tlat, indata):
"""
Vectorized version of extract_loc. Extracts full time series
simultaneously. Much faster that original version above.
Inputs:
ref_lon : longitude of point to be extracted
ref_lat : latitude of point to be extracted
tlon : grid longitudes
tlat : grid latitudes
indata : array/field to extract point from
Output:
wavg : weighted average of the 4 model grid points around position
"""
# find the indices of the 4 model grid points around the location
Ilist, Jlist = find_stn_idx(ref_lon, ref_lat, tlon, tlat)
# compute great circle distance from location to model grid points
dist = gc_dist(ref_lon, ref_lat, tlon, tlat)
dist[dist==0] = 1.e-15 # avoid division by zero
ibeg, iend = Ilist.min(), Ilist.max()
jbeg, jend = Jlist.min(), Jlist.max()
work = indata[...,ibeg:iend+1,jbeg:jend+1]
dist = dist[...,ibeg:iend+1,jbeg:jend+1]
wghts = 1./N.resize(dist,work.shape)
wavg = MA.average(work.reshape(work.shape[:-2]+(-1,)),
weights=wghts.reshape(work.shape[:-2]+(-1,)),axis=-1)
return wavg
def pop_remap(x, gridsrc, griddst, method, areatype, fillvalue):
"""
Remap from one grid to another. Uses remap files produced by SCRIP.
Mapping is done by wrapped Fortran code so it's fast.
Input:
x = 2-D array to be remaped
gridsrc = source grid
griddst = destination grid
method = interpolation method
areatype = normalization option
Output:
xout = remaped array
"""
import remap # Fortran subroutine
# create name and path of remap file from user input
dir = '/home/ivan/Tools/scrip/mapping/scrip1.3/'
if (len(areatype)==0):
remap_file = (os.path.join(dir, 'map_' + gridsrc + '_to_'
+ griddst + '_' + method + '.nc'))
else:
remap_file = os.path.join(dir, 'map_' + gridsrc + '_to_'
+ griddst + '_' + method + '_' + areatype + '.nc')
# read remap file
fpin = Nio.open_file(remap_file,'r')
src_grid_size = fpin.dimensions['src_grid_size']
dst_grid_size = fpin.dimensions['dst_grid_size']
num_wgts = fpin.dimensions['num_wgts']
num_links = fpin.dimensions['num_links']
dst_address = fpin.variables['dst_address'][:]
src_address = fpin.variables['src_address'][:]
remap_matrix = fpin.variables['remap_matrix'][:]
fpin.close()
#nlink, nw = remap_matrix.shape
xin = N.ravel(x)
xout = N.ones((dst_grid_size),N.float)
xout.fill(fillvalue)
if (len(xin)!=src_grid_size):
print 'WARNING: input grid size does not match'
# Fortran is column major so transpose input array
remap_matrix = N.transpose(remap_matrix)
nw, nlink = remap_matrix.shape
# call wrapped Fortran subroutine
xout = (remap.dpopremap(remap_matrix,dst_address,src_address,xin,
dst_grid_size,nlink,nw,len(xin),fillvalue))
return xout
def pop_remap_popdiag(x, gridsrc, griddst, method, areatype, fillvalue):
"""
Remap from one grid to another. Uses remap files produced by SCRIP.
Mapping is done by wrapped Fortran code so it's fast.
Input:
x = 2-D array to be remaped
gridsrc = source grid
griddst = destination grid
method = interpolation method
areatype = normalization option
Output:
xout = remaped array
"""
import remap # Fortran subroutine
# create name and path of remap file from user input
# dir = '/CESM/bgcwg/obgc_diag/mapping/ncremaps/'
dir = '/glade/p/cesm/bgcwg/obgc_diag/mapping/ncremaps/'
if (len(areatype)==0):
remap_file = (os.path.join(dir, 'map_' + gridsrc + '_to_'
+ griddst + '_' + method + '.nc'))
else:
remap_file = os.path.join(dir, 'map_' + gridsrc + '_to_'
+ griddst + '_' + method + '_' + areatype + '.nc')
# read remap file
fpin = Nio.open_file(remap_file,'r')
src_grid_size = fpin.dimensions['src_grid_size']
dst_grid_size = fpin.dimensions['dst_grid_size']
num_wgts = fpin.dimensions['num_wgts']
num_links = fpin.dimensions['num_links']
dst_address = fpin.variables['dst_address'][:]
src_address = fpin.variables['src_address'][:]
remap_matrix = fpin.variables['remap_matrix'][:]
fpin.close()
#nlink, nw = remap_matrix.shape
xin = N.ravel(x)
xout = N.ones((dst_grid_size),N.float)
xout.fill(fillvalue)
if (len(xin)!=src_grid_size):
print 'WARNING: input grid size does not match'
# Fortran is column major so transpose input array
remap_matrix = N.transpose(remap_matrix)
nw, nlink = remap_matrix.shape
# call wrapped Fortran subroutine
xout = (remap.dpopremap(remap_matrix,dst_address,src_address,xin,
dst_grid_size,nlink,nw,len(xin),fillvalue))
return xout
def unfold_grid(var):
"""
Unfolds the POP grid moving the Gulf of Mexico to the right place.
"""
if (len(var.shape)==2): # 2-D variable
work = N.concatenate((N.zeros((var.shape[0],24),float),var),1)
work[39:68,0:24] = work[39:68,var.shape[1]:]
work[39:68,var.shape[1]:] = 0.0
elif (len(var.shape)==3): # 3-D variable
work = (N.concatenate((N.zeros((var.shape[0],var.shape[1],24),float),
var),2))
work[:,39:68,0:24] = work[:,39:68,var.shape[2]:]
work[:,39:68,var.shape[2]:] = 0.0
return work
#------------------------------------------------------------------------------
# IO functions
def get_file_year(filepath):
"""
Get year string from file name
Output: year, month
"""
filename = os.path.split(filepath)[-1]
date = filename.split('.')[-2]
year = int(date.split('-')[0])
month = int(date.split('-')[1])
return year, month
def create_file_list(case):
"""
Create a list of model output files for given case.
Output: list of file names
"""
for server in ['bonaire','barbados','caiapo']:
for basedir in ['data0/ivan/archive','data1/ivan/archive',
'data2/ivan/archive','data3/ivan/archive',
'/bonaire/data2/data/SODA-POP','data0',
'/barbados/data3/CCSM3-BGC']:
if 'SODA-POP' in basedir:
path = os.path.join('/',server,basedir,case)
elif 'CCSM3-BGC' in basedir:
path = os.path.join('/',server,basedir,case,'ocn/hist')
else:
path = os.path.join('/',server,basedir,case,'ocn2')
if os.path.isdir(path):
indir = path
allfiles = os.listdir(indir)
else:
continue
filelist = [os.path.join(indir,file) for file in allfiles
if file.endswith('.nc')]
filelist.sort()
return filelist
def create_file_list_popdiag(case,workdir):
"""
Create a list of model output files for given case.
Output: list of file names
"""
indir = os.path.join('/',workdir)
allfiles = os.listdir(indir)
suffix = ('-01.nc','-02.nc','-03.nc','-04.nc','-05.nc','-06.nc', \
'-07.nc','-08.nc','-09.nc','-10.nc','-11.nc','-12.nc')
filelist = [os.path.join(indir,file) for file in allfiles
if file.startswith(case) and file.endswith(suffix)]
filelist.sort()
return filelist
def create_file_list_period(case,mod_year0,mod_year1,month0=1,month1=12):
"""
Create a list of model output files for time period from mon0/year0
to mon1/year1 for given case. Default is 01/year0 to 12/year1. Uses
model years and assumes monthly output files.
Output: list of file names
"""
filelist = create_file_list(case)
dates = [get_file_year(file) for file in filelist]
ind_beg = dates.index((mod_year0,month0)) # index of start of period
ind_end = dates.index((mod_year1,month1)) # index of end of period
filelist = filelist[ind_beg:ind_end+1]
return filelist
def read_model_coord_var(case,varname):
"""
Read coordinate variables (not time varying) from a given model case.
Output: array
"""
filelist = create_file_list(case)
fpin = Nio.open_file(filelist[0],'r')
data = fpin.variables[varname][:]
fpin.close()
return data
def read_model_var_period_fast(case,varlist,year0,year1,month0=1,month1=12,
fulldepth=True,zlev=0):
"""
Read 2-D or 3-D variable from given case for time period
from mon0/year0 to mon1/year1. If variable is 3-D it reads
full depth by default. Set fulldepth=False and zlev to
desired z level index To read values at given depth.
Test using list comprehensions.
Output: arrays time and dictionary containing arrays.
"""
print ('reading case %-24s (%d/%.2d-%d/%.2d)'%
(case,year0,month0,year1,month1))
#print varlist
filelist = create_file_list_period(case,year0,year1,month0,month1)
days = N.array([Nio.open_file(file,'r').variables['time'][:]
for file in filelist])
vardict = {} # container for variables
for var in varlist:
print var
fpin = Nio.open_file(filelist[0],'r')
if fpin.variables[var][0,...].ndim == 2: # 2-D field
vardict[var] = MA.array([Nio.open_file(file,'r').variables[var]
[0,...] for file in filelist])
elif fpin.variables[var][0,...].ndim == 3: # 3-D field
if fulldepth: # read full depth
vardict[var] = MA.array([Nio.open_file(file,'r').variables[var]
[0,...] for file in filelist])
else: # read level zlev
vardict[var] = MA.array([Nio.open_file(file,'r').variables[var]
[0,zlev,...] for file in filelist])
return days, vardict
def read_model_var_period(case,varlist,year0,year1,month0=1,month1=12,
fulldepth=True,zlev=0):
"""
Read 2-D or 3-D variable from given case for time period
from mon0/year0 to mon1/year1. If variable is 3-D it reads
full depth by default. Set fulldepth=False and zlev to
desired z level index To read values at given depth.
Output: arrays time and dictionary containing arrays.
"""
print ('reading case %-24s (%d/%.2d-%d/%.2d)'%
(case,year0,month0,year1,month1))
#for var in varlist:
# print var,
#print ''
filelist = create_file_list_period(case,year0,year1,month0,month1)
ntime = len(filelist)
fpin = Nio.open_file(filelist[0],'r')
nz = fpin.dimensions['z_t']
nlon = fpin.dimensions['nlon']
nlat = fpin.dimensions['nlat']
vardict = {} # container for variables
for var in varlist:
if fpin.variables[var][0,...].ndim == 2 or not fulldepth:
vardict[var] = MA.zeros((ntime,nlat,nlon),float)
elif fpin.variables[var][0,...].ndim == 3 and fulldepth:
vardict[var] = MA.zeros((ntime,nz,nlat,nlon),float)
fpin.close()
days = N.zeros((ntime),float)
for t in range(ntime):
fpin = Nio.open_file(filelist[t],'r')
days[t] = fpin.variables['time'][:]
for var in varlist:
if fpin.variables[var][0,...].ndim == 2:
vardict[var][t,:,:] = fpin.variables[var][0,...]
elif fpin.variables[var][0,...].ndim == 3:
if fulldepth:
vardict[var][t,:,:,:] = fpin.variables[var][0,...]
else: # read zlev
vardict[var][t,:,:] = fpin.variables[var][0,zlev,...]
else:
print 'Unknown number of dimensions.'
os.sys.exit()
fpin.close()
return days, vardict
#------------------------------------------------------------------------------
def grid_area(ulon,ulat):
"""
Compute area of grid cells in square meters.
ulon and ulat are 1-D arrays containing the edges of the grid cells
in degrees.
Note: total Earth area ~ 5.10e+14 m^2.
Output: array with grid cell areas
"""
R = 6371. * 1000. # radius of Earth in meters
dlon = N.diff(ulon)
dlat = N.diff(ulat)
dx = N.outer(deg2rad * R * N.cos(deg2rad * ulat),dlon) # dx (meters)
dy = 60. * 1852. * dlat # dy (meters)
area = (dx[1:] + dx[:-1]) / 2. * dy[:,N.newaxis] # area of grid cells
return area
def get_grid_data(grid):
"""
Read grid dimensions and lat & lon.
"""
indir = '/home/ivan/Tools/scrip/mapping/grids'
infile = os.path.join(indir, grid + '.nc')
fp = Nio.open_file(infile,'r')
nlon, nlat = fp.variables['grid_dims'][:]
tlat = fp.variables['grid_center_lat'][:]
tlon = fp.variables['grid_center_lon'][:]
fp.close()
tlat = N.reshape(tlat,(nlat,nlon))[:,0]
tlon = N.reshape(tlon,(nlat,nlon))[0,:]
return nlon, nlat, tlon, tlat
def get_grid_data_popdiag(grid):
"""
Read grid dimensions and lat & lon.
"""
# indir = '/CESM/bgcwg/obgc_diag/mapping/grids'
indir = '/glade/p/cesm/bgcwg/obgc_diag/mapping/grids'
infile = os.path.join(indir, grid + '.nc')
fp = Nio.open_file(infile,'r')
nlon, nlat = fp.variables['grid_dims'][:]
tlat = fp.variables['grid_center_lat'][:]
tlon = fp.variables['grid_center_lon'][:]
fp.close()
tlat = N.reshape(tlat,(nlat,nlon))[:,0]
tlon = N.reshape(tlon,(nlat,nlon))[0,:]
return nlon, nlat, tlon, tlat
def zonal_avg(data,Log=False):
"""
Compute the zonal average of field on POP gx3v5 grid.
Shape of input data is expected to be either [nfoo,nlat,nlon]
or [nlat,nlon]. Log=True computes the geometric average.
Output: arrays zavg and lat
"""
print 'computing zonal average'
# get lat and lon for new regular grid
# fpin = Nio.open_file('/home/ivan/Python/data/lat_t.nc','r')
fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/lat_t.nc','r')
lat_t = fpin.variables['lat_t'][:]
lat_t_edges = fpin.variables['lat_t_edges'][:]
fpin.close()
# fpin = Nio.open_file('/home/ivan/Python/data/gx3v5.nc','r')
fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/gx3v5.nc','r')
lon_t = N.sort(fpin.variables['TLONG'][0,:])
ulon = N.sort(fpin.variables['ULONG'][0,:])
lon_t_edges = N.concatenate((ulon,ulon[0,N.newaxis]+360.),0)
# get gx3v5 lat and lon
tlon = fpin.variables['TLONG'][:]
tlat = fpin.variables['TLAT'][:]
fpin.close()
# compute area of cells in new regular grid
area = grid_area(lon_t_edges,lat_t_edges)
nlat = lat_t.shape[0]
nlon = lon_t.shape[0]
if data.ndim == 3:
new_data = MA.zeros((data.shape[0],nlat,nlon),dtype=float)
elif data.ndim == 2:
new_data = MA.zeros((nlat,nlon),dtype=float)
else:
print 'Check field dimensions'
sys.exit()
# geometric mean?
if Log:
work = MA.log(data)
else:
work = data
# remap data to new regular grid
for i in range(nlat):
#print 'lat = %.2f'%(lat_t[i])
for j in range(nlon):
new_data[:,i,j] = extract_loc(lon_t[j],lat_t[i],tlon,tlat,work)
# compute zonal average
if Log:
za_data = (MA.exp(MA.average(new_data,axis=-1,
weights=N.resize(area,new_data.shape))))
else:
za_data = (MA.average(new_data,axis=-1,
weights=N.resize(area,new_data.shape)))
return za_data, lat_t
def zonal_avg2(data,Log=False):
"""
Compute the zonal average of field on POP gx3v5 grid.
Shape of input data is expected to be either [nfoo,nlat,nlon]
or [nlat,nlon]. Log=True computes the geometric average.
Output: arrays zavg and lat
Trying to make it faster
The steps are:
1) set up the destination grid
2) compute averaging weights for each grid cell
3) compute normalizing weights for each basin (if required)
4) compute basin zonal averages
"""
print 'setting up the destination grid'
# get lat and lon for new regular grid
# fpin = Nio.open_file('/home/ivan/Python/data/lat_t.nc','r')
fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/lat_t.nc','r')
lat_t = fpin.variables['lat_t'][:]
lat_t_edges = fpin.variables['lat_t_edges'][:]
fpin.close()
# fpin = Nio.open_file('/home/ivan/Python/data/gx3v5.nc','r')
fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/gx3v5.nc','r')
lon_t = N.sort(fpin.variables['TLONG'][0,:])
ulon = N.sort(fpin.variables['ULONG'][0,:])
lon_t_edges = N.concatenate((ulon,ulon[0,N.newaxis]+360.),0)
# get gx3v5 lat and lon
tlon = fpin.variables['TLONG'][:]
tlat = fpin.variables['TLAT'][:]
fpin.close()
# compute area of cells in new regular grid
area = grid_area(lon_t_edges,lat_t_edges)
nlat = lat_t.shape[0]
nlon = lon_t.shape[0]
print 'computing weights for grid cell'
ilist = []
jlist = []
wghts2D = []
wghts3D = []
for i in range(nlat):
for j in range(nlon):
i_inds, j_inds = find_stn_idx(lon_t[j], lat_t[i], tlon, tlat)
ilist.append(i_inds)
jlist.append(j_inds)
dist = gc_dist(lon_t[i], lat_t[i], tlon, tlat)
# make weights=0 on land
work2D = 1./MA.array(dist,mask=data[0,...].mask)
wghts2D.append(MA.filled(N.take(N.take(work2D,i_inds,0),j_inds,1)
,0))
work3D = 1./MA.array(N.resize(dist,data.shape),mask=data.mask)
wghts3D.append(MA.filled(N.take(N.take(work3D,i_inds,-2),j_inds,-1)
,0))
#print 'computing zonal average'
return lon_t, lat_t, ilist, jlist, wghts2D, wghts3D
def mean_annual_cycle(data):
"""
Compute the mean annual cycle of variable.
Assumes data is masked array with shape [nmonth,nlat,nlon].
Output: array
"""
ntime, nlat, nlon = data.shape
# reshape from [nmonth,nlat,nlon] to [nyear,12,nlat,nlon]
work = MA.reshape(data,(-1,12,nlat,nlon))
# compute mean annual cycle
mean_data = MA.average(work,0)
return mean_data
def monthly_anom(data):
"""
Compute monthly anomalies from mean annual cycle.
Assumes data is masked array with shape [nmonth,nlat,nlon]
Output: array
"""
ntime, nlat, nlon = data.shape
# reshape from [nmonth,nlat,nlon] to [nyear,nmonth,nlat,nlon]
work = MA.reshape(data,(-1,12,nlat,nlon))
# compute mean annual cycle
mean_work = MA.average(work,0)
# compute anomalies from mean annual cycle
#anom = MA.reshape(work-mean_work[N.newaxis,...],(-1,nlat,nlon))
anom = work-mean_work[N.newaxis,...]
return anom
#------------------------------------------------------------------------------
# Tools for multi-variate analysis (EOF, PCA)
def standardize(data,weights,mode=None):
"""
Standardize data Xnew = (X - mean) / std.
mode = 'col': use column-wise (time) means and stds.
mode = 'row': use row-wise (space) means and stds.
Otherwise use total space-time mean and std of data.
Assumes data is masked array with shape [ntime,nspace] and
weights is array with shape [nspace,]
NOTE on standardization:
In a temporal EOF, time is your dependent variable. Therefore,
standardization of your [ntime X nspace] data matrix, should be
done across space (row-wise): for each time (row) subtract the
spatial (row-wise) mean and divide it by the spatial (row-wise)
std. [ntime X ntime] covariance matrix = covariance between time
slices.
Conversely, in a spatial EOF, space is your dependent variable,
and standardization of your [ntime X nspace] data matrix should be
done across time (column-wise): for each point in space (column)
subtract the temporal (column-wise) mean and divide it by the
temporal (column-wise) std. [nspace X nspace] covariance matrix =
covariance between spatial fields.
Ivan Lima - Thu Mar 17 16:11:56 EDT 2011
"""
wght = MA.resize(weights,data.shape)
if mode == 'row': # space
mean = MA.average(data,weights=wght,axis=1)
std = MA.sqrt(MA.average((data-mean[:,N.newaxis])**2,weights=wght,
axis=1))
norm_data = ((data-mean[:,N.newaxis])/std[:,N.newaxis])
elif mode == 'col': # time
mean = MA.average(data,weights=wght,axis=0)
std = MA.sqrt(MA.average((data-mean)**2,weights=wght,axis=0))
norm_data = (data - mean) / std
else: # total space-time
mean = MA.average(data,weights=wght)
std = MA.sqrt(MA.average((data-mean)**2,weights=wght))
norm_data = (data - mean) / std
return norm_data
def temporal_eof(data):
"""
Compute EOFs in time and Principal Components in space.
Assumes input data is masked array with shape [ntime,nspace].
"""
mat = N.matrix(data.filled(0))
# compute covariance matrix
covm = (mat * N.transpose(mat)) / mat.shape[1]
# compute EOFS
eigval, eigvec = LA.eig(covm)
# sort by in decreasing order of eigenvalues
inds = N.argsort(eigval)[::-1]
eigvec = eigvec[:,inds]
eigval = eigval[inds]
# compute percentage of explained variances by each EOF mode
var = eigval.real / N.sum(eigval.real) * 100.
# compute principal components
pc = N.transpose(mat) * eigvec
# eigvec and pc are matrices, NOT numpy arrays!
return eigvec, pc, var
def temporal_eof_w(data,weights):
"""
Compute EOFs in time and Principal Components in space.
Covariance matrix is computed using weights (area).
Assumes input data is masked array with shape [ntime,nspace]
and weights has shape [nspace,].
"""
wght = MA.filled(MA.array(MA.resize(weights,data.shape),mask=data.mask),0)
mat1 = N.matrix(MA.filled(data*wght,0))
mat2 = N.matrix(MA.filled(data,0))
# compute covariance matrix
covm = (mat1 * N.transpose(mat2)) / wght[0,...].sum()
# compute EOFS
eigval, eigvec = LA.eig(covm)
# sort by in decreasing order of eigenvalues
inds = N.argsort(eigval)[::-1]
eigvec = eigvec[:,inds]
eigval = eigval[inds]
# compute percentage of explained variances by each EOF mode
var = eigval.real / N.sum(eigval.real) * 100.
# compute principal components
pc = N.transpose(mat2) * eigvec
# eigvec and pc are matrices, NOT numpy arrays!
return eigvec, pc, var
#------------------------------------------------------------------------------
|
985,837 | c430dbecacd8bdfcb381f8d7c7c9c24f6b3da287 | a = int(input('Give me a number: '))
b = int(input('Give me another number'))
if a >> b:
print(str(a) + ' is the bigger number.')
elif a << b:
print(str(b) + ' is the bigger number.')
else:
print('The numbers are equal!')
|
985,838 | 8bf62e56669d9dfbcfb82b73346d93b7ed16da09 | num = int(input("ป้อนตัวเลข : "))
print('"A"\n'*num)
|
985,839 | d84120c58eece9146bebce94c96cae1f3345e334 | # Icons-50 dataset
# https://www.kaggle.com/danhendrycks/icons50?select=Icons-50.npy
'''
Dictionary with keys:
'class', with 10000 elements in {0,1,…,49};
'style', with 10000 elements in {'microsoft', 'apple', …, 'facebook'};
'image' with 10000 3x32x32 images representing the icons;
'rendition' , with 10000 strings where each indicates the icon's version;
'subtype', with 10000 elements which specify the subtype of a class such as 'whale' or 'shark' for the marine animals class.
'''
from Constants import *
from Functions import *
from fastText import *
import numpy as np
import glob
import os
def get_image_folders(path=ICONS50_IMAGES_DIR):
path = r"{}".format(path)
all_folders = os.listdir(path)
all_folders.remove('README.txt')
all_folders.remove('.DS_Store')
all_folders.sort()
return all_folders
def get_image_files(path):
path = r"{}".format(path)
all_files = glob.glob(path + "/*.png")
return all_files
def searchTerm(dict,class_names,term):
icon_list = []
# term matches class names
for c_name in class_names:
if term in c_name.split('_'):
icon_list.append(c_name)
# term matches subtype names
for s_name in dict['subtype']:
if term in s_name.split('_'):
icon_list.append(s_name)
return icon_list
def get_key(dict,icon_name):
if icon_name in dict['subtype']:
key = 'subtype'
else:
key = 'class'
return key
def split_at(file, delimiter, n):
words = file.split(delimiter)
return delimiter.join(words[n:])
def searchSource(class_names,class_data,subtype_names,icon_name):
# find folder where the searched subtype is located
for c, s in zip(class_data, subtype_names):
if icon_name == s:
folder_number = c
folder = class_names[folder_number]
# find image file name among images in folder
path = ICONS50_IMAGES_DIR + folder
imgs = get_image_files(path)
for img in imgs:
img_name = split_at(img, '/', 4) # split at fourth bar
pic_name = split_at(img_name, '_', 2) # split at second underscore
name, ext = os.path.splitext(pic_name)
if icon_name == name:
break
return folder, img_name
# semi-automatic search - user choice
def semi(term,icon_type):
icons_dict = np.load(ICONS50_NPY_DIR, allow_pickle=True).item()
# get 'class' names
class_names = get_image_folders()
# get 'class' data in dict
class_data = icons_dict['class']
class_data = list(class_data)
# get 'subtype' names
subtype_names = icons_dict['subtype']
subtype_names = list(subtype_names)
icon_list = searchTerm(icons_dict,class_names,term)
if not icon_list:
return None
print('Search results for icons with the term',term,':')
jprint(icon_list)
# request input of the name of the chosen icon from the list of matched icons
icon_name = input('Type name of chosen icon: ')
# key of chosen icon
key = get_key(icons_dict,icon_name)
# by default, if matched icon with class name, search for first icon in folder
if key == 'class':
imgs = get_image_files(ICONS50_IMAGES_DIR + icon_name)
pic_name = imgs[0]
source = ICONS50_IMAGES_DIR + icon_name + pic_name
else:
folder, pic_name = searchSource(class_names,class_data,subtype_names,icon_name)
source = ICONS50_IMAGES_DIR + folder + '/' + pic_name
return source, icon_name, icon_type
# automatic search using fastText
def auto(term,type,icon_type):
icons_dict = np.load(ICONS50_NPY_DIR, allow_pickle=True).item()
# get 'class' names
class_names = get_image_folders()
# get 'class' data in dict
class_data = icons_dict['class']
class_data = list(class_data)
# get 'subtype' names
subtype_names = icons_dict['subtype']
subtype_names = list(subtype_names)
icon_list = searchTerm(icons_dict,class_names,term)
if not icon_list:
return None
jprint(icon_list)
if type == 'opposite':
if icon_type == 'least':
icon_name = getLeastSimilar(term,icon_list)
else:
icon_name = get2MostSimilar(term,icon_list)
else:
icon_name = getMostSimilar(term,icon_list)
print('Icon chosen by fastText: ', icon_name)
# key of chosen icon
key = get_key(icons_dict,icon_name)
# by default, if matched icon with class name, search for first icon in folder
if key == 'class':
imgs = get_image_files(ICONS50_IMAGES_DIR + icon_name)
pic_name = imgs[0]
source = ICONS50_IMAGES_DIR + icon_name + pic_name
else:
folder, pic_name = searchSource(class_names,class_data,subtype_names,icon_name)
source = ICONS50_IMAGES_DIR + folder + '/' + pic_name
return source, icon_name, icon_type
def searchIcons50(term,type,icon_type):
if type == 'semi':
return semi(term,icon_type)
# auto and opposite go through here
else:
return auto(term,type,icon_type) |
985,840 | ea6d479cfafc695ffb685271e88e103d27978bea | def ladderLength( beginWord, endWord, wordList):
"""
Disscussion Method
算法:生成&BFS
思路:
3 4
dot -- dog
1 2 / | | \ 5
hit -- hot | | cog
\ | | /
lot -- log
首先对问题的认识同XiaoXiangMethod,将问题构建为图,但是不同的地方是,这里直接生成所有可能的转换,
即对一个word,依次将word[i]替换成别的字母形成新的单词new_one,再去判断new_one是否在not_visited中
也就是说查找某个词可否转换的方式变成了用两层for循环,而不是生成图的数据结构在graph中遍历,然后用
队列来保障进行的是BFS!
设置队列word_q,num_q记录可以转化为的单词,以及到这步单词所用的step
如果某次转换中某个词不能继续往下转了,那么就说明向下不可达,因为要到达目标单词的话要不停的循环,如果
中间某一步就无法向下转化,word_q为空的话说明不能继续遍历,不可达
复杂度分析:
时间:O(len(word)*26),时间复杂度主要取决于词的长度,XiaoXiangMethod在case29过不去
空间:ON,两个queue和set的空间
"""
import string
hash_not_visited = set(wordList)
if endWord not in hash_not_visited:
return 0
word_q = []
num_q = []
Find = False
word = beginWord
many = 0
while not Find:
for e in range(len(word)):
for c in string.ascii_lowercase:
new_one = word[:e] + c + word[e + 1:]
if new_one in hash_not_visited:
word_q.append(new_one)
num_q.append(many + 1)
hash_not_visited.remove(new_one)
if not word_q:
many = 0
break
many = num_q.pop(0)
word = word_q.pop(0)
if word == endWord:
Find = True
many = many + 1
return many
def ladderLength1(beginWord, endWord, wordList):
"""
XiaoXiang Method
❌Python 超时了,
思路:
首先要将问题转化为合适的数据结构
可以把转化的过程画出来,可以容易发现,这是一个图的结构
因此用两个词能否转化来构建图
构建好图后对图采用BFS找endWord,这个过程中用队列来维持状态,以及visit哈希表来记录是否访问过
避免不必要的访问
队列中记录的元素是word和到达当前word所用的step步数,即(word,step)
"""
def check_link(s1, s2):
if len(s1) != len(s2):
return False
else:
diff_count = 0
for i in range(len(s1)):
if s1[i] != s2[i]:
diff_count += 1
return diff_count == 1
graph = [[] for _ in range(len(wordList))]
for i in range(len(graph)):
for j in range(i + 1, len(graph)):
if check_link(wordList[i], wordList[j]):
graph[i].append(j)
graph[j].append(i)
queue = []
visit = {beginWord: 1}
for i in range(len(wordList)):
if check_link(beginWord, wordList[i]):
queue.append((i, 2))
visit[wordList[i]] = 1
while queue:
i, step = queue.pop(0)
if wordList[i] == endWord:
return step
for j in graph[i]:
if wordList[j] not in visit:
queue.append((j, step + 1))
visit[wordList[j]] = 1
return 0
if __name__ == '__main__':
print(ladderLength("hit","cog",["hot","dot","dog","lot","log","cog"])) |
985,841 | d912725de2afeeafe8f39c9acfacbed6d7c9b77d | storeNum = int(input())
stores = []
for i in range(storeNum):
line = input().split(" ")
stores.append(list(line))
count = 0
seaLevel = 0
for i in range(storeNum):
min(stores[i])
print(stores) |
985,842 | 96fbccf3e5ab545d8407f2e2c701659aca3bfdfe | import pdb
from time import sleep
import scrapy
from scrapy.selector import Selector
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
class Spider_ICLR18(scrapy.Spider):
name = "iclr18"
start_urls = [
'https://openreview.net/group?id=ICLR.cc/2018/Conference#accepted-oral-papers',
]
def __init__(self):
# add chrome driver to win10 PATH
self.driver = webdriver.Chrome()
def parse(self, response):
self.driver.get(response.request.url)
WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located((By.XPATH, "//div[contains(@id, 'accepted-oral-papers')]//h4"))
)
selenium_response_text = self.driver.page_source
hxs = Selector(text=selenium_response_text)
articles = hxs.xpath("//div[contains(@id, 'accepted-oral-papers')]//h4/a[1]/text()") + hxs.xpath("//div[contains(@id, 'accepted-poster-papers')]//h4/a[1]/text()")
for article in articles:
if article.extract().strip() != '':
yield {
'title': article.extract().strip(),
'year': '2018',
'conf': 'ICLR',
'conf_long': 'International Conference on Machine Learning'
} |
985,843 | 7f84f457aac0825beffdc08305572b97ad4439ea | from enum import Enum
from .header import MgmtHeader, MgmtGroup, MgmtOp, MgmtErr, CmdBase, RequestBase, ResponseBase
import cbor
class MgmtIdOS(Enum):
ECHO = 0
CONS_ECHO_CTRL = 1
TASKSTAT = 2
MPSTAT = 3
DATETIME_STR = 4
RESET = 5
class CmdOS(CmdBase):
_group = MgmtGroup.OS
_group.registerGroupIDs(MgmtIdOS)
@staticmethod
def echo(echo_str, seq=0):
hdr = MgmtHeader(MgmtOp.WRITE, MgmtGroup.OS, MgmtIdOS.ECHO, seq=seq)
return CmdOS(hdr, {'d': echo_str})
@staticmethod
def reset(seq=0):
hdr = MgmtHeader(MgmtOp.WRITE, MgmtGroup.OS, MgmtIdOS.RESET, seq=seq)
return CmdOS(hdr, {})
@staticmethod
def cons_echo_ctrl(seq=0):
raise NotImplementedError('cons_echo_ctrl')
@staticmethod
def taskstat(seq=0):
raise NotImplementedError('cons_echo_ctrl')
class Echo(RequestBase):
def __init__(self, text):
super().__init__()
self.text = text
def message(self):
if not self.response_data:
return CmdOS.echo(self.text)
return None
def parse_response(self, rsp):
hdr = MgmtHeader.decode(rsp)
if hdr.op != MgmtOp.WRITE_RSP or hdr.group != MgmtGroup.OS or hdr.id != MgmtIdOS.ECHO:
raise ValueError('Not a echo command response: {}'.format(str(hdr)))
# is guaranteed
# if len(rsp) > hdr.size:
# raise ValueError('Echo command response to short: {}'.format(str(hdr)))
dec_msg = cbor.loads(rsp[hdr.size:])
if self.__class__._debug:
print(dec_msg)
if not 'r' in dec_msg:
raise ValueError('Echo response missing \"r\" key: {}'.format(str(dec_msg)))
self.response_data = ResponseBase(MgmtErr.EOK, dec_msg, dec_msg['r'])
return self.response_data
def __str__(self):
return '{}(text={})'.format(self.__class__.__name__, self.text)
class Reset(RequestBase):
def message(self):
if not self.response_data:
return CmdOS.reset()
return None
def parse_response(self, rsp):
hdr = MgmtHeader.decode(rsp)
if hdr.op != MgmtOp.WRITE_RSP or hdr.group != MgmtGroup.OS or hdr.id != MgmtIdOS.RESET:
raise ValueError('Not a reset command response: {}'.format(str(hdr)))
# is guaranteed
# if len(rsp) > hdr.size:
# raise ValueError('Reset command response to short: {}'.format(str(hdr)))
dec_msg = cbor.loads(rsp[hdr.size:])
if self.__class__._debug:
print(dec_msg)
self.response_data = ResponseBase(MgmtErr.EOK, dec_msg)
return self.response_data
def registerOSCommandArguments(sub_parsers):
# ECHO
echo_parser = sub_parsers.add_parser('echo', help='Send data to a device and display the echoed back data')
echo_parser.add_argument('text', type=str, default=None)
# RESET
sub_parsers.add_parser('reset', help='Perform a soft reset of a device')
return sub_parsers
|
985,844 | 46f972b9767a8d27bb1f4e72dd7e0eb30e0dfcc7 | import requests
# 声明一个Session对象
s = requests.Session()
# 修改代理信息
s.proxies = {
"http": "http://127.0.0.1:8888/",
"https": "http://127.0.0.1:8888/"
}
# 设置自定义证书
# # 假设证书在代码目录下的cert文件夹里,文件名为FiddlerRoot.pem
# # 直接使用相对路径
s.verify = r'cert\FiddlerRoot.pem'
# # 如果使用了c_rehash.pl对cert文件夹做了处理,可以修改为如下一行代码
s.verify = r'cert'
print(s.get('https://httpbin.org/user-agent').text)
|
985,845 | e39b7b2e6cef57a82f53ff0bdd57f80379ce2b64 | # import pyowm
# owm = pyowm.OWM('ef2206ff5da67de63306d0b143e20872') # You MUST provide a valid API key
# # Have a pro subscription? Then use:
# # owm = pyowm.OWM(API_key='your-API-key', subscription_type='pro')
# # Search for current weather in London (Great Britain)
# city = input ('Enter a city: ')
# observation = owm.weather_at_place(city)
# w = observation.get_weather()
# print(w) # <Weather - reference time=2013-12-18 09:20,
# # status=Clouds>
# # Weather details
# print(w.get_wind()['deg']) # {'speed': 4.6, 'deg': 330}
# print(w.get_humidity()) # 87
# print(w.get_temperature('celsius')['temp']) # {'temp_max': 10.5, 'temp': 9.7, 'temp_min': 9.0}
# Search current weather observations in the surroundings of
# lat=22.57W, lon=43.12S (Rio de Janeiro, BR)
# observation_list = owm.weather_around_coords(-22.57, -43.12)
# Напишіть скрипт-гру, яка генерує випадковим чином число з діапазону чисел від 1 до 100 і пропонує користувачу вгадати це число. Програма зчитує числа,
# які вводить користувач і видає користувачу підказки про те чи загадане число більше чи менше за введене користувачем. Гра має тривати до моменту поки користувач
# не введе число, яке загадане програмою, тоді друкує повідомлення привітання.
# (для виконання завдання необхідно імпортувати модуль random, а з нього функцію randint())
#
# from random import randint
# def game (comp_number = randint(1,100), user_number = int(input('Guess a number: '))):
# while user_number != comp_number:
# if user_number < comp_number:
# print("Too small")
# user_number = int(input('Guess a number: '))
# else:
# print("Too big")
# user_number = int(input('Guess a number: '))
# print('You guessed!')
# game()
#2. Напишіть скрипт, який обчислює площу прямокутника a*b, площу трикутника 0.5*h*a, площу кола pi*r**2.
#(для виконання завдання необхідно імпортувати модуль math, а з нього функцію pow() та значення змінної пі).
# import math
# def rectangle (a = int(input("Enter side A: ")), b = int(input("Enter side B: "))):
# print("area is: ", a * b)
# rectangle()
# def triangle (h = float(input("Enter height: ")), a = float(input("Enter base: "))):
# print("area is: ", 0.5 * h *a)
# triangle()
# def circle (r = float(input("Enter radius: "))):
# print("area is: ", math.pi*r*r)
# circle()
|
985,846 | 04d91891a7a3aa8231b4a9cc3c4e718b0edb350a | import random
def gen_pairs(the_names, number = 10):
for i in range(0, number):
name1 = random.choice(the_names).split()[0]
name2 = random.choice(the_names).split()[0]
while name2 == name1:
name2 = random.choice(the_names).split()[0]
yield f"{name1} teams up with {name2}"
def gen_pairs_theirs():
first_name2 = [name.split()[0].title() for name in NAMES]
while True:
first, second = None, None
while first == second:
first, second = random.sample(first_name2, 2)
yield f'{first} teams up with {second}'
NAMES = ['arnold schwarzenegger', 'alec baldwin', 'bob belderbos', \
'julian sequeira', 'sandra bullock', 'keanu reeves', \
'julbob pybites', 'bob belderbos', 'julian sequeira', \
'al pacino', 'brad pitt', 'matt damon', 'brad pitt']
print(NAMES)
capital_names = [name.title() for name in NAMES]
print(capital_names)
swapped_names = [temp.split()[1] + " " + temp.split()[0] for temp in NAMES]
print(swapped_names)
def reverse_first_last_names(name):
first, last = name.split()
return f'{last} {first}'
swapped_names2 = [reverse_first_last_names(name) for name in NAMES]
print(swapped_names2)
pairs = gen_pairs(capital_names)
for _ in range(10):
print(next(pairs))
pairs2 = gen_pairs_theirs()
for _ in range(10):
print(next(pairs2))
import itertools
itertools.islice(pairs2, 10)
list(itertools.islice(pairs2, 10))
|
985,847 | 01abcfc47b9645cfc2ae20c7ce2d5753fb48351b | import os
from icrawler.builtin import GoogleImageCrawler
from icrawler.builtin import BingImageCrawler
from icrawler.builtin import BaiduImageCrawler
from icrawler.builtin import FlickrImageCrawler
from icrawler.builtin import GreedyImageCrawler
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-k", "--keyword", action="store", type="string", dest="keyword", default="", help="keyword to search, you should use _ instead of space, example: spiral_galaxy")
parser.add_option("-d", "--dir", action="store", type="string", dest="dir", default="crawled", help="target directory")
parser.add_option("-e", "--engine", action="store", type="string", dest="engine", default="google", help="which engine: google, bing, baidu, flickr, greedy")
parser.add_option("-o", "--offset", action="store", type="string", dest="offset", default="0", help="offset")
parser.add_option("-l", "--limit", action="store", type="string", dest="limit", default="10000", help="limit")
(options, args) = parser.parse_args()
keyword = options.keyword
engine = options.engine
dir = options.dir
offset = int(options.offset)
limit = int(options.limit)
if engine == "google":
crawler = GoogleImageCrawler(storage={'root_dir': 'dir'})
elif engine == "bing":
crawler = BingImageCrawler(storage={'root_dir': 'dir'})
elif engine == "baidu":
crawler = BaiduImageCrawler(storage={'root_dir': 'dir'})
elif engine == "flickr":
crawler = FlickrImageCrawler(storage={'root_dir': 'dir'})
elif engine == "greedy":
crawler = GreedyImageCrawler(storage={'root_dir': 'dir'})
else:
crawler = GoogleImageCrawler(storage={'root_dir': 'dir'})
crawler.crawl(keyword=keyword.replace("_", " "), offset=offset, max_num=limit)
# bing_crawler = BingImageCrawler('crawl_target/bing')
# bing_crawler.crawl(keyword='spiral galaxy', offset=0, max_num=1000,
# feeder_thr_num=1, parser_thr_num=1, downloader_thr_num=4,
# min_size=None, max_size=None)
# baidu_crawler = BaiduImageCrawler('crawl_target/baidu')
# baidu_crawler.crawl(keyword='spiral galaxy', offset=0, max_num=1000,
# feeder_thr_num=1, parser_thr_num=1, downloader_thr_num=4,
# min_size=None, max_size=None) |
985,848 | c773624c5f30f448b9db619182406351307bb444 | from sys import stdin, stdout
from collections import defaultdict
from math import factorial
for line in stdin:
line = line.strip()
cnt = defaultdict(int)
for c in line:
cnt[c] += 1
ans = factorial(len(line))
for k,v in cnt.items():
ans //= factorial(v)
stdout.write(str(ans)+'\n')
|
985,849 | b1f49e27d36d97eb4e07a590d5f08d423c04ea0e | from flask import Blueprint, render_template, flash, request, redirect, url_for
from www.extensions import cache
from srmanager.sr import SR
from www.extensions import mongo
main = Blueprint('main', __name__)
@main.route('/')
@cache.cached(timeout=1000)
def home():
srm = SR()
topo = []
for node in srm.get_topology():
topo.append(str(node))
return render_template('index.html', topo=topo)
@main.route('/services')
@cache.cached(timeout=1000)
def services():
services = mongo.db.services.find()
return render_template('services.html', services=services)
|
985,850 | decaf88c28b7ba041c1f845c272dfd2118860d2b | # Module to resize an image
import cv2
image = cv2.imread('lena.jpg')
cv2.imshow('Original', image)
height, width, _ = image.shape
proportion = 100.0 / width
new_size = (100, int(height * proportion))
resized_image = cv2.resize(image, new_size, interpolation=cv2.INTER_AREA)
cv2.imshow('Resized image', resized_image)
cv2.waitKey(0)
|
985,851 | 6d33d07aa654f0785f0b7306fa9ba31354982ae1 | def take_odd(password):
result = ""
for i in range(len(password)):
if i % 2 != 0:
result += password[i]
return result
def cut(index_1, length_1, password):
sub_string = password[index_1:index_1+length_1]
password = password.replace(sub_string, "", 1)
return password
def substitute(old, new, password):
if old in password:
password = password.replace(old, new)
return password
else:
return "Nothing to replace!"
string = input()
command = input()
while command != "Done":
data = command.split(maxsplit=1)
if data[0] == "TakeOdd":
string = take_odd(string)
print(string)
elif data[0] == "Cut":
index, length = data[1].split()
index = int(index)
length = int(length)
string = cut(index, length, string)
print(string)
elif data[0] == "Substitute":
sub_str, replace_str = data[1].split()
if substitute(sub_str, replace_str, string) == "Nothing to replace!":
print(substitute(sub_str, replace_str, string))
else:
string = substitute(sub_str, replace_str, string)
print(string)
command = input()
print(f"Your password is: {string}")
|
985,852 | e72f3caf34f59467b15f244b373405d449c5672c | from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
import json
from api.models import Course, Instructor, LabGroup, Student
from api import permissions
class EnrollTest(APITestCase):
"""
Test cases for POST requests on EnrollView.
"""
def setUp(self):
# create test users
self.student_username = 'student'
self.instructor_username = 'teacher'
self.password = 'test'
self.student_user = User.objects.create_user(username=self.student_username, password=self.password)
self.instructor_user = User.objects.create_user(username=self.instructor_username, password=self.password)
group = permissions.get_or_create_instructor_permissions()
group.user_set.add(self.instructor_user)
group = permissions.get_or_create_student_permissions()
group.user_set.add(self.student_user)
self.client.login(username=self.student_username, password=self.password)
# populate the database
self.student = Student(labgroup=None, user=self.student_user, wwuid='1111111')
self.student.save()
self.instructor = Instructor(user=self.instructor_user, wwuid='2222222')
self.instructor.save()
self.course = Course(name='test name')
self.course.save()
self.labgroup = LabGroup(course=self.course,
instructor=self.instructor,
group_name='A',
term='FALL2018',
enroll_key='ABC')
self.labgroup.save()
# retrieve the view
self.view_name = 'api:enroll'
def test_enroll(self):
"""
Tests that a labgroup can be properly enrolled in.
"""
# request
request_body = {
'wwuid': self.student.wwuid,
'labgroup': self.labgroup.id,
'enroll_key': self.labgroup.enroll_key
}
response = self.client.post(reverse(self.view_name), request_body)
# test response
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
# test database
self.assertEqual(Student.objects.first().user, self.student_user)
self.assertEqual(Student.objects.first().labgroup, self.labgroup)
self.assertEqual(Student.objects.first().wwuid, self.student.wwuid)
def test_enroll_bad_labgroup(self):
"""
Tests that entering a bad labgroup is properly handled.
"""
# request
request_body = {
'wwuid': self.student.wwuid,
'labgroup': 0,
'enroll_key': self.labgroup.enroll_key
}
response = self.client.post(reverse(self.view_name), request_body)
# test response
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
# test database
self.assertEqual(Student.objects.first().labgroup, None)
def test_enroll_bad_key(self):
"""
Tests that a labgroup is not enrolled in with a bad key.
"""
# request
request_body = {
'wwuid': self.student.wwuid,
'labgroup': self.labgroup.id,
'enroll_key': ''
}
response = self.client.post(reverse(self.view_name), request_body)
# test response
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# test database
self.assertEqual(Student.objects.first().labgroup, None)
def test_missing_parameters(self):
"""
Tests that a missing parameter causes the request to do nothing.
"""
# request
request_body = {
'wwuid': self.student.wwuid,
'enroll_key': self.labgroup.enroll_key
}
response = self.client.post(reverse(self.view_name), request_body)
# test response
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# test database
self.assertEqual(Student.objects.first().labgroup, None)
def test_invalid_student(self):
"""
Tests that entering invalid student does nothing.
"""
# request
request_body = {
'wwuid': '123456789', # too long
'labgroup': self.labgroup.id,
'enroll_key': self.labgroup.enroll_key
}
response = self.client.post(reverse(self.view_name), request_body)
# test response
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# test database
self.assertEqual(len(Student.objects.all()), 0)
def test_enroll_status(self):
"""
Tests that the enrollment status of a user can be retrieved.
"""
# enroll request
request_body = {
'user': self.student_user,
'student': self.student,
}
self.client.post(reverse(self.view_name), request_body)
# enroll status request
response = self.client.get(reverse(self.view_name))
response_body = json.loads(response.content.decode('utf-8'))
# test response
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response_body['user']['username'], self.student_user.username)
self.assertEqual(response_body['user']['email'], self.student_user.email)
self.assertEqual(response_body['user']['first_name'], self.student_user.first_name)
self.assertEqual(response_body['user']['last_name'], self.student_user.last_name)
self.assertEqual(response_body['student']['pk'], self.student.id)
self.assertEqual(response_body['student']['labgroup'], self.student.labgroup)
self.assertEqual(response_body['student']['user'], self.student.user.id)
self.assertEqual(response_body['student']['wwuid'], self.student.wwuid)
def test_enroll_status_not_enrolled(self):
"""
Tests that no enrollment status is retrieved for an un-enrolled user.
"""
# un-enroll user
self.student.delete()
# enroll status request
response = self.client.get(reverse(self.view_name))
response_body = json.loads(response.content.decode('utf-8'))
# test response
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response_body['user']['username'], self.student_user.username)
self.assertEqual(response_body['user']['email'], self.student_user.email)
self.assertEqual(response_body['user']['first_name'], self.student_user.first_name)
self.assertEqual(response_body['user']['last_name'], self.student_user.last_name)
self.assertEqual(response_body['student'], None)
|
985,853 | b0ac83ee0af6629295e6a786d5df83f20b8f0056 | def choose_class(name):
if name == 'foo':
class Foo(object):
pass
return Foo # 返回的是类,不是类的实例
else:
class Bar(object):
pass
return Bar
MyClass = choose_class('foo')
print(MyClass) # 函数返回的是类,不是类的实例
# <class '__main__.choose_class.<locals>.Foo'>
print(MyClass()) # 你可以通过这个类的实例创建对象
# <__main__.choose_class.<locals>.Foo object at 0x000001BFE67CC470>
|
985,854 | 6261cc75790cf6f7e66a44cb4df8e96179aa7145 | # Main file responsible for synchronous stream processing.
from ..matcher.matcher import *
from ..mySDR.mySDR import *
from ..fmDemod.fmDemod import *
from ..myAudio.myAudio import *
from ..db.db import *
import Queue
import threading, time
from ..utils.dbScrape import *
from ..utils.parser import *
import sys, os
from ..execute.executor import *
from numpy import *
from matplotlib.pyplot import *
from Tkinter import Tk, Frame, BOTH, Label
from ttk import Frame, Button, Style
class Example(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.initUI()
def run(self):
t_action = threading.Thread(target = self.action, args=())
t_action.start()
def action(self):
args = parse(sys.argv[1])
progress = open(sys.argv[2], 'w')
myMatcher = matcher(args['match_thres'], args['m'], args['fs'])
myExecutor = executor()
Q_record = Queue.Queue()
Q_process = Queue.Queue()
sdr = mySDR()
sdr.set_up(args['fs'], args['fc'] - args['offset'], args['gain'])
# Initiate database responsible for acquiring query data.
database = db([], args['t'], args['m'], args['cutoff'], sdr, Q_record, fmDemod(), myAudio())
t_record = threading.Thread(target = database.recordContinously, args = (progress,))
t_record.start()
time.sleep(args['process_delay'])
# Stores data_ds in Q_process.
t_process = threading.Thread(target = database.processContinously, args = (Q_process, progress, args['buff_size'], args['taps']))
t_process.start()
time.sleep(args['matcher_delay'])
myMatcher.match_Queue(Q_process, myExecutor, args['diff_thres'])
def close(self):
print 'Exiting...'
os._exit(0)
def initUI(self):
self.parent.title("HamPi")
self.pack(fill=BOTH, expand=2)
# self.parent.title("Process button")
self.style = Style()
self.style.theme_use("default")
self.pack(fill=BOTH, expand=1)
processButton = Button(self, text="Process", command=self.run)
processButton.place(x=60, y=10)
quitButton = Button(self, text="Quit", command=self.close)
quitButton.place(x=140, y=10)
rcParams['figure.max_open_warning'] = 200
def usage():
print 'python -m lib.process.py [args_file] [progress_file] [debug_file]'
if (len(sys.argv) != 3):
usage()
exit(0)
root = Tk()
root.geometry("300x50")
app = Example(root)
root.mainloop()
# Hackish listener that exits from keyboard interrupt.
#try:
# while (True):
# continue
#except KeyboardInterrupt:
# print 'Exiting...'
# os._exit(0)
|
985,855 | 934204d15d2beda66159d54980578b34602bb664 | import fire
from WannabeC import WannabeCCompiler
if __name__ == '__main__':
fire.Fire(WannabeCCompiler)
|
985,856 | 4f4077cca58edf0f9fce60d79b3237b55cb386f9 | from model.debts_model import Debts
class DebtsRepository:
def __init__(self, session):
self._session = session
def insert(self, debts: Debts):
self._session.add(debts)
def get_id(self, id):
return self._session.query(Debts).get(id)
def get_all(self):
return self._session.query(Debts).all()
def update(self, update_debt):
self._session.add(update_debt)
def patch_id(self, id, value, description, person_id):
update_debts = self._session.query(Debts).get(id)
if value:
update_debts.value = value
if description:
update_debts.description = description
if person_id:
update_debts.person_id = person_id
self._session.add(update_debts)
def delete_id(self, id):
debts_delete = self._session.query(Debts).get(id)
self._session.delete(debts_delete)
def get_debts_by_person_id(self, id):
return self._session.query(Debts).filter(Debts.person_id == id).all
|
985,857 | 1b2d3f97cfaff96edb2612bf20fc332673fbc7dc | #!/usr/bin/env python3
import logging
import subprocess
logger = logging.getLogger(__name__)
def run_cmd(args_list):
print('Running system command: {0}'.format(' '.join(args_list)))
proc = subprocess.Popen(args_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output, errors) = proc.communicate()
if proc.returncode:
raise RuntimeError(
'Error running command: %s. Return code: %d, Error: %s' % (
' '.join(args_list), proc.returncode, errors))
return (output, errors)
|
985,858 | e89982fedc8a2b8f90f4656767c3c51d2b3c9f2d | # Generated by Django 3.2.3 on 2021-06-04 02:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('apps', '0004_auto_20210523_1107'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='full_name',
new_name='name',
),
migrations.RemoveField(
model_name='profile',
name='website',
),
migrations.AddField(
model_name='profile',
name='photo',
field=models.ImageField(blank=True, null=True, upload_to='images/'),
),
]
|
985,859 | 8a64a0b955f3c04e4c3031e184764cf6c2ca03f2 | # -*- coding:utf-8*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import os.path
import time
time1=time.time()
def MergeTxt(filepath,outfile):
k = open(filepath+outfile, 'a+')
for parent, dirnames, filenames in os.walk(filepath):
for filepath in filenames:
txtPath = os.path.join(parent, filepath) # txtpath就是所有文件夹的路径
f = open(txtPath)
k.write(f.read()+"\n")
k.close()
if __name__ == '__main__':
filepath="/Users/zhuangzhuanghuang/Code/stock_crawler/a_stock_news/"
outfile="result.txt"
MergeTxt(filepath,outfile)
time2 = time.time()
|
985,860 | 0ed2cbb9f9d59bd2610bc6f9c8e566a8d2268b8c | # 치환문 예
a = 1
b = a+1
print(a, b, sep=' , ')
# 한줄에 동시에 대입하고싶음
# 세미클론으로 치환문을 구분할 수 있다,
e = 3.5 ; f= 5.3
print(e, f)
# 여러 개를 한번에 치환하기
e, f = 3.5, 5.3
print(e,f) # packing unpacking이 일어나는 것
# 같은 값을 여러 변수에 대입
x = y = z = 10
# c 스타일은 지원 x
# x = (y=10) 같은
print(x,y,z)
# 동적 타이핑 : 변수에 새로운 값이 할당되면 값을 버리고 새로운 값으로 치환된다
a = 1 # 동적으로 type이 결정된다. 자바로는 Integer a= 1.
print(a, type(a))
a = 2
print(a, type(a)) # 위에 꺼랑 다른거다 위에 a는 가비지컬렉터가 삭제시킴, 그냥 이름테이블에서 a 쓰는 느낌
a = 'hello'
print(a, type(a))
# 확장 치환문
a = 10
a += 10 # a = a + 10
|
985,861 | 7ed75507a58d17fdf0f331066e5c55e40ac1f727 | import pytz
from django.utils import timezone
from django.contrib.sessions.middleware import SessionMiddleware
from django.utils.deprecation import MiddlewareMixin
class SessionCustomMiddleware(SessionMiddleware):
def process_request(self, request):
super().process_request(request)
if not request.session.session_key:
request.session.cycle_key()
request.session['django_timezone'] = 'Europe/Moscow'
class TimezoneMiddleware(MiddlewareMixin):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
tzname = request.session.get('django_timezone')
if tzname:
timezone.activate(pytz.timezone(tzname))
else:
timezone.deactivate()
return self.get_response(request)
|
985,862 | e7b078d153b1a54a42ac95bacab1d22ab57281b1 | from keras.engine import Layer, InputSpec
from keras import initializations, regularizers
from keras import backend as K
import tensorflow as tf
#!!!!Change axis to adapt for different than dim_ordering=theano and backend=tf!!!!
class Bias(Layer):
'''
Simple bias layer
'''
def __init__(self, axis=1, momentum = 0.9, beta_init='zero', **kwargs):
self.momentum = momentum
self.axis = axis
if type(beta_init) == int:
self.beta_factor = beta_init
self.beta_init = initializations.get('one')
else:
self.beta_init = initializations.get(beta_init)
self.beta_factor = 1
super(Bias, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
shape = (input_shape[self.axis],)
self.beta = self.beta_init(shape, name='{}_beta'.format(self.name))
self.trainable_weights = [self.beta]
def call(self, x, mask=None):
input_shape = self.input_spec[0].shape
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis]
b = K.reshape(self.beta, broadcast_shape)*self.beta_factor
b = tf.Print(b, [b], summarize=2000, message="bias: ")
out = x + b
return out
def get_config(self):
config = {"momentum": self.momentum}
base_config = super(Bias, self).get_config()
return dict(list(base_config.items()) + list(config.items())) |
985,863 | d48411d0d54ac477b0c104bd69057a3806ae69ad | from django.db import models
class Feature (models.Model):
creator = models.ForeignKey('account.User')
title = models.CharField(max_length=120)
desc = models.TextField('Description')
approved = models.BooleanField(default=False)
closed = models.BooleanField(default=False)
votes = models.IntegerField(default=0)
created = models.DateTimeField(auto_now_add=True)
def __unicode__ (self):
return self.title
|
985,864 | f93fa0d6d3d9e29540f3cdfeda13712adb1d4b09 | # algorithm quicksort(A, lo, hi):
# if lo < hi then
# p ← pivot(A, lo, hi)
# left, right ← three-way-partition(A, p, lo, hi)
# quicksort(A, lo, left - 1)
# quicksort(A, right, hi)
# procedure three-way-partition(A, pivot, lo, hi):
# l ← lo
# r ← lo
# u ← hi
# while r ≤ u:
# if A[r] < pivot:
# swap A[l] and A[r]
# l ← l + 1
# r ← r + 1
# else if A[r] > pivot:
# swap A[r] and A[u]
# u ← u - 1
# else: // the element is equal to pivot
# r ← r + 1
# return l, r
class AdvQuickSort(object):
def __init__(self):
pass
def three_way_paritition(self, a, pivot, low, high):
mid = low
while mid <= high:
if a[mid] < pivot:
a[mid], a[low] = a[low], a[mid]
low += 1
mid += 1
elif a[mid] > pivot:
a[mid], a[high] = a[high], a[mid]
high -= 1
elif a[mid] == pivot:
mid += 1
return low, mid
def sort(self, array, low, high):
if low < high:
pivot = array[high]
left, right = self.three_way_paritition(array, pivot, low, high)
self.sort(array, low, left-1)
self.sort(array, right, high)
array = [1, 4, 2, 4, 5, 20, 0, 2, 4, 2, 4, 5]
obj = AdvQuickSort()
obj.sort(array, 0, 11)
print(array)
|
985,865 | ccbbcc5961ff1470f8dadfc206d0037014bf60c2 | # Taxis agent construct a probability distribution for the best spot on the board
# each spot on the board has some values associated with it
import random
import math
import sys
sys.path.insert(0, './../utils')
import configurations as cf
import utilities as ut
from scipy import stats
import matplotlib.pyplot as plt
import numpy as np
class Taxis_agent():
def __init__(self, legal_actions = None, exploration_prob = cf.EXPLORATION_RATE, verbose = cf.AGENT_VERBOSE):
# still explore
self.num_iters = 0
self.exploration_prob = exploration_prob
self.verbose = verbose
# the agent has the right to know the size of the board and the radius of the radar
self.radar_radius = cf.MINIMUM_RADAR_SIZE
self.board_size = cf.WORLD_SIZE
# a dictionary, using the pokemon_id as the key, maps to the probability grid
# of this type of pokemon, each prob is also a dictionary, with the coordinates
# of all of the positions on the board as the key, and the probability of
# that pokemon appears on that position as the value
self.probs = {}
# used to generalize my own model of pokemon preference
self.pokemon_count = {}
# propose a new counter, just count when the pokemon is caught at a place
self.mixed_probs = self._new_prob()
def _sigmoid(self, x):
return 1.0 / (1 + math.exp(x))
def _inverse_sigmoid(self, y):
return - math.log(1.0 / y + 1)
def _sigmoid_gradient(self, sig):
return sig * (1.0 - sig)
def _new_prob(self, zero_out = False):
new_prob = {}
for x in range(self.board_size):
for y in range(self.board_size):
if zero_out:
new_prob[(x, y)] = 0
else:
new_prob[(x, y)] = 0.5
return new_prob
def _normalize_prob(self, prob):
total = sum(prob.values())
for position in prob:
prob[position] /= total
def _exp_prob(self, prob):
for position in prob:
prob[position] = math.exp(prob[position])
def _max_in_prob(self, prob):
max_prob = -1
best_position = None
for position in prob:
if prob[position] > max_prob:
max_prob = prob[position]
best_position = position
return position
# borders mark out the four values of the region border
# only used to increase the probabilities
def _increment_probs(self, borders, pokemon_id, learning_rate):
# lazy construction
if pokemon_id not in self.probs:
self.probs[pokemon_id] = self._new_prob()
(start_x, start_y, end_x, end_y) = borders
# learning_rate /= ((end_x - start_x) * (end_y - start_y))
prob = self.probs[pokemon_id]
for x in range(start_x, end_x):
for y in range(start_y, end_y):
# sigmoid_x = self._inverse_sigmoid(prob[(x, y)])
# prob[(x, y)] = self._sigmoid(sigmoid_x + learning_rate)
prob[(x, y)] += learning_rate
# print prob[(x, y)]
# self._normalize_prob(prob)
# position is only one position
# could also be used to decrement the probability of that position
# need to check the probability is still larger than 0
def _increment_prob(self, position, pokemon_id, learning_rate):
# lazy construction
if pokemon_id not in self.probs:
self.probs[pokemon_id] = self._new_prob()
prob = self.probs[pokemon_id]
# if prob[position] == 0:
# print prob
# print sum(prob.values())
prob[position] += learning_rate
# sigmoid_x = self._inverse_sigmoid(prob[position])
# prob[position] = self._sigmoid(sigmoid_x + learning_rate)
# prob[position] += learning_rate * self._sigmoid_gradient(prob[position])
# prob[position] = max(prob[position], 0)
# self._normalize_prob(prob)
# knowing the source and destination of the agent, what is the action to take?
def _get_direction(self, source, destination):
if source == destination: return random.choice(self.legal_actions())
actions = []
if destination[0] > source[0]: actions.append('Right')
if destination[0] < source[0]: actions.append('Left')
if destination[1] > source[1]: actions.append('Up')
if destination[1] < source[1]: actions.append('Down')
return random.choice(actions)
# the legal_actions function is provided by the board, no input is needed, the board
# knows where the agent is now
def get_action(self, state):
self.num_iters += 1
if self.num_iters % 10000 == 0:
image = [[0 for _ in range(self.board_size)] for _ in range(self.board_size)]
for (x, y) in self.probs[16]:
image[x][y] = self.probs[16][(x,y)]
plt.imshow(image, cmap='hot', interpolation='nearest')
plt.show()
# if self.num_iters % 10 == 0: print state[0]
# explore
if random.random() < self.exploration_prob or len(self.probs) == 0:
return random.choice(self.legal_actions())
(agent_position, radar) = state
# get the best action
# right now, just consider only the pokemons in radar
# later, 2 things could be added,
# (1) weight the pokemons in radar, instead of just adding them
# (2) also consider the other pokemons not in the radar,
voting_prob = self._new_prob(zero_out = True)
for pokemon_id in radar:
if pokemon_id in self.probs:
(start_x, start_y, end_x, end_y) = ut.get_radar_region(agent_position, self.radar_radius, self.board_size)
for x in range(start_x, end_x):
for y in range(start_y, end_y):
voting_prob[(x, y)] += self.probs[pokemon_id][(x, y)]
# / math.log(self.pokemon_count[pokemon_id] + 1)
if sum(voting_prob.values()) == 0: return random.choice(self.legal_actions())
# self._exp_prob(voting_prob)
self._normalize_prob(voting_prob)
destination_selector = stats.rv_discrete(values = (range(self.board_size * self.board_size), voting_prob.values()))
return self._get_direction(agent_position, voting_prob.keys()[destination_selector.rvs()])
def incorperate_feedback(self, state, action, reward, pokemons_caught, new_state):
# the game could last forever, so there is no end state defined
(agent_position, radar) = new_state
if len(radar) > 0:
borders = ut.get_radar_region(agent_position, self.radar_radius, self.board_size)
learning_rate = 1.0 / math.sqrt(self.num_iters)
for pokemon_id in radar:
# increment count for this pokemon
if pokemon_id not in self.pokemon_count: self.pokemon_count[pokemon_id] = 0
self.pokemon_count[pokemon_id] += 1
# increase the probability of this pokemon for nearby regions
self._increment_probs(borders, pokemon_id, 0.001) # 0.001
# self._increment_prob(agent_position, pokemon_id, 0.001) # 0.1
# decrease the probability of finding the pokemon in the radar but not here
# if pokemon_id not in pokemons_caught:
# self._increment_prob(agent_position, pokemon_id, -0.5)
for pokemon_id in pokemons_caught:
print pokemon_id
if pokemon_id not in self.probs:
self.probs[pokemon_id] = self._new_prob()
# self._increment_prob(agent_position, pokemon_id, 1)
for position in self.mixed_probs:
mht_dis = abs(agent_position[0] - position[0]) + abs(agent_position[1] - position[1])
self.probs[pokemon_id][position] += 0.1 * math.exp(- mht_dis)
|
985,866 | b579c007bc1b845e64442fc342068f8f954da35f | #!/usr/bin/env python
from __future__ import division # make sure division yields floating
import sys, re
import argparse
import numpy as np
def main(args):
is_include = {}
is_test = {}
for ii, line in enumerate(args.m):
fields = line.strip().split('\t')
if (ii == 0):
groups = fields[6:]
for g in groups:
is_include[g] = True
if g in args.u.split(','):
is_test[g] = True
else:
is_test[g] = False
if args.i != '':
for g in groups:
if g in args.i.split(','):
is_include[g] = True
else:
is_include[g] = False
if args.x != '':
for g in args.x.split(','):
is_include[g] = False
print(line.strip(), end='\n')
continue
min_v = np.min([float(fields[k]) for k in range(6, len(fields)) if fields[k] != "NA" and is_include[groups[k-6]]])
n_notna = len([k for k in range(6, len(fields)) if fields[k] != "NA" and is_include[groups[k-6]]])
n_included = len([k for k in range(6, len(fields)) if is_include[groups[k-6]]])
if n_notna / n_included < args.pn:
continue
cnt_in_pass = 0.0
cnt_in_all = 0.0
cnt_out_pass = 0.0
cnt_out_all = 0.0
for jj in range(6, len(fields)):
g = groups[jj-6]
if (not is_include[g]) or fields[jj] == "NA":
continue
if is_test[g]:
cnt_in_all += 1
if float(fields[jj])-min_v < args.fu:
cnt_in_pass += 1
else:
cnt_out_all += 1
# print(min_v, fields[jj], args.fm)
if float(fields[jj])-min_v > args.fm:
cnt_out_pass += 1
# print(cnt_in_pass, cnt_in_all, cnt_out_pass, cnt_out_all)
if (cnt_in_all == 0 or cnt_out_all == 0):
continue
if (cnt_in_pass / cnt_in_all >= args.pu) and (cnt_out_pass / cnt_out_all >= args.pm):
print(line.strip(), end='\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser('search methylation signature')
parser.add_argument('-m', help='methylation matrix', type = argparse.FileType('r'), default='-')
parser.add_argument('-u', help='signature unmethylation group', default = '')
parser.add_argument('-x', help='group to ignore', default = '')
parser.add_argument('-i', help='group to include', default = '')
parser.add_argument('--fu', help='def of unmethylated', type=float, default = 0.2)
parser.add_argument('--fm', help='def of methylated', type=float, default = 0.7)
parser.add_argument('--pu', help='minimum percentage of unmethylated', type=float, default = 1)
parser.add_argument('--pm', help='minimum percentage of methylated', type=float, default = 1)
parser.add_argument('--pn', help='minimum fraction of non-na', type=float, default=0.8)
parser.set_defaults(func=main)
args = parser.parse_args()
args.func(args)
|
985,867 | 45d4625794f7ba3e364a8963a2a945101151c94e | import datetime
import pandas as pd
import pandas_datareader as web
import plotille
# select start date for correlation window as well as list of tickers
start = datetime.datetime(2020, 1, 1)
end = datetime.datetime(2020, 3, 4)
symbols_list = ['SPY', 'AAPL']
# array to store prices
symbols = []
# pull price using iex for each symbol in list defined above
for ticker in symbols_list:
r = web.DataReader(ticker, 'yahoo', start, end)
# add a symbol column
r['Symbol'] = ticker
symbols.append(r)
# concatenate into df
df = pd.concat(symbols)
df = df.reset_index()
df = df[['Date', 'Close', 'Symbol']]
print(df.head())
df_pivot = df.pivot('Date', 'Symbol', 'Close').reset_index()
stock_table = df_pivot
def short_date(val, chars, delta, left=False):
res = val.strftime('%x')
return res
fig = plotille.Figure()
fig.width = 60
fig.height = 30
fig.register_label_formatter(pd._libs.tslibs.timestamps.Timestamp, short_date)
for symbol in stock_table.columns:
if(symbol != 'Date'):
fig.plot(stock_table['Date'], stock_table[symbol], label=symbol)
print(fig.show(legend=True))
|
985,868 | 53124f592c2cf87e4e27d78793648b840a19372f | from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
# Create your views here.
def login_page(request):
if request.method == 'POST':
user = authenticate(username=request.POST['uname'], password=request.POST['passwd'])
if user is not None and user.is_active:
login(request, user)
return redirect('/home', {'current_user': request.user})
else:
return redirect('/login')
else:
return render(request, 'r2n2/login.html')
def home_page(request):
return render(request, 'r2n2/index.html', {'current_user': request.user})
def logout_page(request):
logout(request)
return render(request, 'r2n2/login.html')
|
985,869 | a5f4a26d347bbfea4c8a31e974f451c6b891f9c9 | import math
import numpy as np
from numpy.linalg import norm, lstsq, inv, cholesky
from scipy.linalg import toeplitz
from scipy.optimize import least_squares
import matplotlib.pyplot as plt
from sklearn.base import BaseEstimator
def convert_deriv_to_mag(J,dpred):
"""
Convert jacobian matrix of complex derivatives of predicted values wrt model params
to a jacobian matrix of derivatives of the magnitudes of the predicted values wrt
model parameters.
Requires inputs of both jacobian matrix J and predicted data dpred associated with J.
(ie since typically this is used for a locally-linearized nonlinear model.)
Assumes dims NxM for N predicted values and M model parameters
"""
# FIXME: reverify this equation!
# Jmag = [np.conj(m) * J + m * np.conj(J)] /2/np.abs(m)
Jmag = np.add( np.dot(np.conj(dpred.T),J) , np.dot(dpred.T,np.conj(J)) ) /2/np.abs(dpred)
Jmag = np.real(Jmag) # just stripping off the +0.j's and setting type to real
# That /2/np.abs(dpred) divides element-wise into every column the matrix via Python's broadcasting.
return Jmag
def convert_deriv_to_inv(J,dpred):
"""
Convert jacobian matrix of complex derivatives of predicted values wrt model params
to a jacobian matrix of derivatives of the inverse of the predicted values wrt
model parameters.
Requires inputs of both jacobian matrix J and predicted data dpred associated with J.
(ie since typically this is used for a locally-linearized nonlinear model.)
Assumes dims NxM for N predicted values and M model parameters
"""
Jinv = -J /dpred**2
# That /dpred**2 divides element-wise into every column the matrix via Python's broadcasting.
return Jinv
def convert_deriv_to_log10(J,dpred):
"""
Convert jacobian matrix of complex derivatives of predicted values wrt model params
to a jacobian matrix of derivatives of the log10 of the predicted values wrt
model parameters.
Requires inputs of both jacobian matrix J and predicted data dpred associated with J.
(ie since typically this is used for a locally-linearized nonlinear model.)
Assumes dims NxM for N predicted values and M model parameters.
Assumes dpred and J are already real-valued.
"""
Jlog = np.log10(np.exp(1.0)) * J /dpred
# That /dpred divides element-wise into every column the matrix via Python's broadcasting.
return Jlog
def separate_cplx(x):
# (check if x is complex here...)
if x.ndim==1:
x2 = np.concatenate((np.real(x),np.imag(x)),axis=0)
elif x.ndim==2:
x2 = np.concatenate((np.real(x),np.imag(x)),axis=0)
else:
print('separate_cplx: error: input dim is not 1 or 2.')
return x2
def cplxMSE(a,b):
# mean squared error that can handle complex data
r = np.subtract(a,b)
mse = np.dot( np.conjugate(r).T, r )
mse = mse/r.size
mse = np.real(mse).item() # result above was real but had +0j in a complex
# typed var so take real part, and then came back
# as singleton array so item() converts that to
# scalar type.
return mse
def jacfindiff(fwdfunc,x,dx=1.0e-8):
# first order fwddiffs calc for jacobian matrix estimation
M = len(x)
dx = dx * np.ones((M,1))
x = x.reshape(len(x),1)
# Now make dx and exactly representable number vis a vis machine precision
# so that all error in derivs are from numerator (see Num Rec 2nd ed, sec 5.7)
temp = x+dx; # note temp is vector of same length as model vector
dx = temp-x; # Believe it or not this is actually a slightly different dx now,
# different by an error of order of machine precision.
# This effect may or may not make it into fwdprob input which
# could have limited input precision, but in any case will have
# an effect via denominator at end of this script.
mask=np.eye(M);
F = fwdfunc(x)
J = np.ndarray(shape=(len(F),M), dtype=float)
for j in range(M):
d = np.multiply(dx, mask[:,j].reshape(M,1))
xtmp = x + d
J[:,j] = (fwdfunc(xtmp) - F) / dx[j]
print('.', end='')
return J
#def check_derivs()
def fd_mtx(mlen,type='fwd',order=2,bounds=False):
"""Produces finite difference matrices of specified type & order.
type = {'fwd','bkwd','ctr'}
order = {1,2} for first or second diffs
bounds = {True,False} include/not the boundary rows at top or bottom.
If True then matrix is square.
"""
r = np.zeros(mlen)
c = np.zeros(mlen)
if type=='fwd':
if order==1:
r[0] = -1
r[1] = 1
c[0] = -1
elif order==2:
r[0] = 1
r[1] = -2
r[2] = 1
c[0] = 1
elif type=='bkwd':
if order==1:
r[0] = 1
c[0] = 1
c[1] = -1
elif order==2:
r[0] = 1
c[0] = 1
c[1] = -2
c[2] = 1
elif type=='ctr':
if order==1:
r[1] = .5
c[1] = -.5
elif order==2:
r[0] = -2
r[1] = 1
c[0] = -2
c[1] = 1
T = toeplitz(c,r)
if bounds==False:
if order==1 and type=='fwd':
T = np.delete(T, (mlen-1), axis=0)
elif order==1 and type=='bkwd':
T = np.delete(T, (0), axis=0)
elif order==1 and type=='ctr':
T = np.delete(T, (0,mlen-1), axis=0)
elif order==2 and type=='fwd':
T = np.delete(T, (mlen-2,mlen-1), axis=0)
elif order==2 and type=='bkwd':
T = np.delete(T, (0,1), axis=0)
elif order==2 and type=='ctr':
T = np.delete(T, (0,mlen-1), axis=0)
# Need the rest of those here but those will get things started...
return T
def create_findiff_mtx(mlen,beta):
L = fd_mtx(mlen,'fwd',2,bounds=False) # 2nd finite diff matrix
L = np.delete(L, (0), axis=0) # don't smooth first param at btm interface
#L = np.concatenate((L,beta*np.eye(mlen)),axis=0) # append ridge regr too
return L
class InvTF(BaseEstimator):
""" Frequentist inversion for weakly nonlinear problems using Tikhonov regularization.
Parameters
----------
demo_param : str, optional
A parameter used for demonstation of how to pass and store paramters.
"""
def __init__(self, fwd_deriv_code, minit, alpha=0.01, beta=50., max_nfev=5,
dmtol=0.001, usefindiff=False, showplot=True, verbose=True):
self.fwd_deriv_code = fwd_deriv_code
self.minit = minit
self.max_nfev = max_nfev # only relevant after expanding this to nonlinear
self.alpha = alpha
self.beta = beta
self.dmtol = dmtol # only relevant after expanding this to nonlinear
self.usefindiff = usefindiff
self.showplot = showplot
self.verbose = verbose
#super().__init__(fit_intercept=False, copy_X=True)
#super(InvT0, self).__init__(self, fit_intercept=False, copy_X=True)
def fit(self, ymeas, mmeas=None, alg='optls'): # alg: {'optls','mine'}
"""A reference implementation of a fitting function
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
# [ X ]*dm = [ dy ]
# [ a ] [ 0 ] <-- using built-in Ridge model does this
#
# [ X ]*dm = [ dy ]
# [-a ] [ a*m ] <-- but I want this for iterated nonlin problem
#
# [ X ]*dm = [ dy ]
# [-aL ] [ a*L*m ] <-- and more generally I want this (higher-order Tihk)
#
# which can be rewritten:
# G * dm = D (and then loop that from m0 with m=m+dm...)
# X is the Jacobian matrix of derivs of predicted data points wrt model
# params m, as given by ypred,X=self.fwd_deriv_code(m)...
if alg=='optls':
# https://docs.scipy.org/doc/scipy/reference/optimize.html
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html
def fun(m):
mlen = m.size
L = create_findiff_mtx(mlen,self.beta)
ypred,J = self.fwd_deriv_code(m) # m: model params vector, J: derivs matrix
resids = ymeas-ypred
modelfunc = self.alpha * np.dot(L,m)
modelfunc = modelfunc.reshape(len(modelfunc),1)
f = np.squeeze(np.concatenate((resids,modelfunc),axis=0))
return f
def jac(m):
mlen = m.size
L = create_findiff_mtx(mlen,self.beta)
ypred,J = self.fwd_deriv_code(m) # m: model params vector, J: derivs matrix
Jreg = self.alpha * L
Jout = np.concatenate((J,Jreg))
return Jout
if self.usefindiff:
jacfn='2-point'
else:
jacfn=jac
if self.verbose:
verblevel=2
else:
verblevel=0
res = least_squares(fun, np.squeeze(self.minit), jac=jacfn,
bounds=(0., 3.5), diff_step=None, verbose=verblevel, max_nfev=self.max_nfev,
method='trf', ftol=1e-08, xtol=1e-08, gtol=1e-08, x_scale=1.0)
#ftol=1e-4, xtol=1e-1, gtol=1e-8, x_scale=1.0)
#ftol=1e0, xtol=1e-01, gtol=1e-01, x_scale=1.0)
#ftol=1e-08, xtol=1e-08, gtol=1e-08, x_scale=1.0)
if mmeas is not None:
testMSE = cplxMSE(res.x.reshape(len(res.x),1),mmeas)
else:
testMSE = npl.nan
ypred,J = self.fwd_deriv_code(res.x.reshape(len(res.x),1))
ypred=np.log10(ypred)
residnorm = norm(ypred-ymeas)
print('resid norm',residnorm)
L = create_findiff_mtx(len(self.minit),self.beta)
print('maxeig JJ',np.real(np.amax(np.linalg.eigvals(np.dot(J.T,J))))) # J'J has real eigvals but kept cplx type
print('maxeig LL',np.amax(np.linalg.eigvals(np.dot(L.T,L))))
if self.showplot:
f, ax = plt.subplots(1, 2, figsize=(11,4))
# plot the meas and pred data:
# print('ypred',ypred)
# print('ymeas',ymeas)
ax[0].plot(ypred,'r.-')
ax[0].plot(ymeas,'k.-')
ax[0].grid()
#ax[0].set_ylabel('cost')
#ax[0].set_xlabel('iterations')
ax[0].set_title('Measured (blk) and predicted (blu) data')
# plot the init, true, and final model param vectors:
ax[1].plot(self.minit,'g.-')
ax[1].plot(res.x,'r.--')
ax[1].plot(mmeas,'k.--')
ax[1].grid()
#ax[1].set_ylabel('model value')
#ax[1].set_xlabel('indep var')
ax[1].set_title('Model vectors (true=blk, init=grn, soln=red)')
# return m,cost,misfit,modelnorm,norm(dm),testMSE
return res.x,res.cost,np.nan,np.nan,np.nan,testMSE
elif alg=='mine':
cost = []
m = self.minit
mlen = len(m)
if self.verbose:
print('iter alpha cost norm(dd) norm(dm) dmtol')
for i in range(self.max_nfev):
ypred,X = self.fwd_deriv_code(m) # m: model params vector, X: derivs matrix
if self.usefindiff:
def tmpfwdcode(m):
return np.squeeze(self.fwd_deriv_code(m)[0])
X = jacfindiff(tmpfwdcode,m,dx=1.0e-6) # dx=1.0e-6 is problem dependent!
L = create_findiff_mtx(mlen,self.beta)
G = np.concatenate((X, -self.alpha*L),axis=0)
D = np.concatenate((ymeas-ypred, self.alpha*np.dot(L,m)),axis=0)
misfit = cplxMSE(ymeas, ypred)
modelnorm = norm(np.dot(L,m))**2
current_cost = misfit + pow(self.alpha,2)*modelnorm
dm,res,rnk,sv = lstsq(G,D)
m = m + dm
cost.append(current_cost)
if self.verbose:
print('%3d %6.1g %10.3f %10.3f %10.2g %6.3g' %
(i, self.alpha, current_cost, norm(ymeas-ypred), norm(dm), self.dmtol))
if norm(dm) < self.dmtol:
break
self.G = G
self.ypred = ypred
if mmeas is not None:
testMSE = cplxMSE(m,mmeas)
else:
testMSE = npl.nan
print('maxeig JJ',np.real(np.amax(np.linalg.eigvals(np.dot(X.T,X))))) # X'X has real eigvals but kept cplx type
print('maxeig LL',np.amax(np.linalg.eigvals(np.dot(L.T,L))))
if self.showplot:
f, ax = plt.subplots(1, 2, figsize=(11,4))
# plot the cost (ie loss) per iterations:
ax[0].semilogy(cost,'.-') # (last element of cost)
ax[0].grid()
ax[0].set_ylabel('cost')
ax[0].set_xlabel('iterations')
ax[0].set_title('Cost history (misfit^2 + alpha^2*modelnorm^2)')
# plot the init, true, final, and evolution of model params:
#print('m',np.squeeze(m.T))
ax[1].plot(mmeas,'k')
ax[1].plot(self.minit,'g')
ax[1].plot(m,'r')
ax[1].grid()
#ax[1].set_ylabel('model value')
ax[1].set_xlabel('indep var')
ax[1].set_title('Model vectors')
return m,cost[-1],misfit,modelnorm,norm(dm),testMSE
def get_hyperparams(self):
return (self.max_nfev, self.dmtol, self.alpha)
class InvTB(BaseEstimator):
""" Bayesian inversion for weakly nonlinear problem using Tikhonov regularization.
Parameters
----------
demo_param : str, optional
A parameter used for demonstation of how to pass and store paramters.
"""
def __init__(self, fwd_deriv_code, minit, mprior, Cprior, lb=-np.inf, ub=np.inf,
max_nfev=5, dmtol=1e-8, diff_step=None, usefindiff=False, showplot=True, verbose=True):
if Cprior.ndim==1:
self.Cprior = np.diagflat(Cprior)
else:
self.Cprior = Cprior
self.fwd_deriv_code = fwd_deriv_code
self.minit = minit
self.mprior = mprior
self.lb = lb
self.ub = ub
self.max_nfev = max_nfev
self.xtol = dmtol
self.diff_step = diff_step
self.usefindiff = usefindiff
self.showplot = showplot
self.verbose = verbose
def fit(self, ymeas, ysigma, mmeas=None):
"""A reference implementation of a fitting function
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
if np.isscalar(ysigma):
ysigma = ysigma*np.ones(len(ymeas))
elif len(ysigma)!=len(ymeas):
print('InvTB::fit() error: len(ysigma)!=len(ymeas)')
ysigma = ysigma.reshape(len(ysigma),1) # ensure it's a Nx1 ndarray not a 1d array
Dinvsqrt = np.diagflat(1.0/ysigma) # (data inv covariance mtx)^(-1/2)
Cinvsqrt = cholesky(inv(self.Cprior)).T # (model inv covariance mtx)^(-1/2)
def fun(m):
mlen = m.size
ypred,J = self.fwd_deriv_code(m) # m: model params vector, J: derivs matrix
resids = ypred-ymeas
resids = np.dot(Dinvsqrt,resids)
modelfunc = np.dot(Cinvsqrt,np.subtract(m,np.squeeze(self.mprior)))
modelfunc = modelfunc.reshape(len(modelfunc),1)
f = np.squeeze(np.concatenate((resids,modelfunc),axis=0))
return f
def jac(m):
mlen = m.size
ypred,J = self.fwd_deriv_code(m) # m: model params vector, J: derivs matrix
J = np.flipud(J) # was this needed?
J = np.dot(Dinvsqrt,J)
Jreg = Cinvsqrt
Jout = np.concatenate((J,Jreg))
return Jout
if self.usefindiff:
jacfn='2-point'
else:
jacfn=jac
if self.verbose:
verblevel=2
else:
verblevel=0
res = least_squares(fun, np.squeeze(self.minit), jac=jacfn,
bounds=(self.lb, self.ub), diff_step=self.diff_step, verbose=verblevel,
max_nfev=self.max_nfev, method='trf', ftol=1e-08, xtol=self.xtol, gtol=1e-08, x_scale=1.0)
# https://docs.scipy.org/doc/scipy/reference/optimize.html
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html
if mmeas is not None:
testMSE = cplxMSE(res.x.reshape(len(res.x),1),mmeas)
else:
testMSE = npl.nan
ypred,J = self.fwd_deriv_code(res.x.reshape(len(res.x),1))
resids = ymeas-ypred
resids = np.dot(Dinvsqrt,resids)
residnorm = norm(resids)
#print('resid norm',residnorm)
#print('maxeig JJ',np.real(np.amax(np.linalg.eigvals(np.dot(J.T,J))))) # J'J has real eigvals but kept cplx type
#print('maxeig Cinv',np.amax(np.linalg.eigvals(np.dot(Cinvsqrt.T,Cinvsqrt))))
if self.showplot:
f, ax = plt.subplots(1, 2, figsize=(11,4))
# plot the meas and pred data:
ax[0].plot(ypred,'r.-')
ax[0].plot(ymeas,'k.-')
ax[0].grid()
ax[0].set_title('Measured (blk) and predicted (red) data')
# plot the init, true, and final model param vectors:
ax[1].plot(self.minit,'g.-')
ax[1].plot(self.mprior,'c')
ax[1].plot(res.x,'r.--')
ax[1].plot(mmeas,'k.--')
ax[1].grid()
ax[1].set_title('Model vectors (meas=blk, pri=blu, ini=grn, sol=red)')
return res.x,res.cost,residnorm,np.nan,np.nan,testMSE,ypred,ymeas,J
def get_hyperparams(self):
return (self.max_nfev, self.dmtol)
|
985,870 | c6d33a0c7c54d00f9380ecdbc9cf26fb215609ce | # Train a statistical tagger.
# Mark Amiguous data
# Make a decision tree of each amabigious class
from features import data_or_empty, set_encoder, encode_features,extract_feature
import analytics,corpus
from corpus import load_corpus
import numpy as np
import sys
try:
TEST = int(sys.argv[1])
except ValueError:
print("Enter 0,1,2 to specify testing document")
exit()
except IndexError:
TEST = 2
class AmbigiousClass:
def __init__(self,name):
self.name = name
self.X = []
self.Y = []
self.word_list = set()
def set_encoders(self,le,oh):
self.label_encoder = le
self.onehot_encoder = oh
def get_encoder(self):
#This encoder is only for Y values, encode X using a global encoder
return self.label_encoder, self.onehot_encoder
def add_XY(self,X,Y):
self.X.append(X)
self.Y.append(Y)
def get_XY(self):
return self.X,self.Y
def add_word(self,word):
self.word_list.add(word)
def get_word(self):
return self.word_list
def set_clf(self,clf):
self.clf = clf
def get_clf(self):
return self.clf
def __str__(self):
return "".join(self.word_list)
#def get_
from sklearn.tree import DecisionTreeClassifier
train = corpus.load_corpus(test = TEST)
statistic = analytics.load_analytics(train)
heighest_probabilty = {}
for i in statistic:
heighest_probabilty[i] = max(statistic[i].items(),key=lambda x:x[1])[0]
X_train_raw, Y_train_raw = extract_feature(data=train)
#Global label_encoder to encode X values
global_label_encoder,global_hot_encoder = set_encoder(Y_train_raw)
print("Training Global Classifer ....")
X_train,Y_train = encode_features(X_train_raw,Y_train_raw,global_label_encoder,global_hot_encoder)
global_clf = DecisionTreeClassifier()
global_clf.fit(X_train,Y_train)
print("Completed")
# print(train)
# Identify the ambiguity classes
amb_class = {}
for i in train:
for x,y in enumerate(i):
#If the word only has one tagging, we don't need a classifier
if len(statistic[y[0]]) == 1:
pass
#If there is an ambiguity, we need a decission tree classifier
else:
cls = sorted(statistic[y[0]])
cls_string = "-".join(cls)
if cls_string not in amb_class:
amb_class[cls_string] = AmbigiousClass(cls_string)
amb_class[cls_string].add_XY(
(data_or_empty(i, x - 4),
data_or_empty(i,x - 3),
data_or_empty(i,x - 2),
data_or_empty(i, x - 1),
data_or_empty(i, x + 1),
data_or_empty(i, x + 2),
data_or_empty(i, x + 3),
data_or_empty(i, x + 4)),
y[1]
)
amb_class[cls_string].add_word(y[0])
amb_classifier = {}
print("Trainning Ambigious Class Classifiers")
for i,j in amb_class.items():
X_raw ,Y_raw = j.get_XY()
# print(i,len(X_raw),len(Y_raw), j.get_word())
#
# print("*************************************")
Z = []
label_encoder,hot_encoder = set_encoder(Y_raw)
j.set_encoders(label_encoder,hot_encoder)
#Encoding X and Y using different encodings
Y = label_encoder.transform(Y_raw)
Y = hot_encoder.transform(Y.reshape(-1, 1))
# This is computationally expensive task
X = np.array([global_label_encoder.transform(i) for i in X_raw])
Z = np.array(global_hot_encoder.transform(X[:, 0].reshape(-1, 1)))
for i in range(1, len(X.T)):
Z = np.append(Z, np.array(global_hot_encoder.transform(X[:, i].reshape(-1, 1))), axis=1)
clf = DecisionTreeClassifier()
clf.fit(Z,Y)
j.set_clf(clf)
#print(Z.shape)
print("Completed")
def get_labels(l,i):
try:
return l[i]
except IndexError:
return "EMT"
#Here we will classify the text:
def classify_tokenized_sentence(words,itr = 0):
## Initial Labeling Using Probablistic Tagger
labeled_string = list()
for i in words:
try:
labeled_string.append(heighest_probabilty[i])
except KeyError:
labeled_string.append("UNK")
#Now Applying the Hybrid Approach
# Now Applying the Hybrid Approach
for i in range(itr):
for e, i in enumerate(words):
X_raw = ((
get_labels(labeled_string, e - 4),
get_labels(labeled_string, e - 3),
get_labels(labeled_string, e - 2),
get_labels(labeled_string, e - 1),
get_labels(labeled_string, e + 1),
get_labels(labeled_string, e + 2),
get_labels(labeled_string, e + 3),
get_labels(labeled_string, e + 4),
))
X = global_label_encoder.transform(X_raw)
X_one_hot = global_hot_encoder.transform(X.reshape(-1, 1))
X_one_hot = X_one_hot.reshape(1, -1)
try:
if len(statistic[i]) == 1:
pass
else:
amb_class_object = amb_class["-".join(sorted(statistic[i].keys()))]
clf = amb_class_object.get_clf()
# print(X_one_hot.shape)
pre = clf.predict(X_one_hot)
labeled_string[e] = amb_class_object.get_encoder()[0].inverse_transform([np.argmax(pre)])[0]
except:
pre = global_clf.predict(X_one_hot)
labeled_string[e] = global_label_encoder.inverse_transform([np.argmax(pre)])[0]
return labeled_string
#Now We Will Do The Testing
test = corpus.load_corpus(test = TEST,last=True)
#Error for no iteration
hit = 0
miss = 0
for i in test:
sentence = [j[0] for j in i]
tags_true = [j[1] for j in i]
tags_predicted = classify_tokenized_sentence(sentence)
for i in range(len(tags_true)):
if tags_true[i] == tags_predicted[i]:
hit+=1
else:
miss+=1
print("Accuracy for probalistic tagger", hit/(hit+miss))
#Error for 1 iteration
hit = 0
miss = 0
for i in test:
sentence = [j[0] for j in i]
tags_true = [j[1] for j in i]
tags_predicted = classify_tokenized_sentence(sentence,1)
for i in range(len(tags_true)):
if tags_true[i] == tags_predicted[i]:
hit+=1
else:
miss+=1
print("Accuracy for 1 iteration of decission tree", hit/(hit+miss))
#Error for 2 iteration
hit = 0
miss = 0
for i in test:
sentence = [j[0] for j in i]
tags_true = [j[1] for j in i]
tags_predicted = classify_tokenized_sentence(sentence,2)
for i in range(len(tags_true)):
if tags_true[i] == tags_predicted[i]:
hit+=1
else:
miss+=1
print("Accuracy for 2 iteration of decission tree", hit/(hit+miss))
#Error for 3 iteration
hit = 0
miss = 0
for i in test:
sentence = [j[0] for j in i]
tags_true = [j[1] for j in i]
tags_predicted = classify_tokenized_sentence(sentence,3)
for i in range(len(tags_true)):
if tags_true[i] == tags_predicted[i]:
hit+=1
else:
miss+=1
print("Accuracy for 3 iteration of decission tree", hit/(hit+miss))
|
985,871 | 78955a1b2751e611aceecdbfa1eff7c77e1b4c8e | from IPython.display import clear_output, Image, display, HTML
import tensorflow as tf
import subprocess
import re
import os
import logging
import ngrok
import urllib.parse
def strip_graph_consts(graph_def, max_const_size=32):
"""
Strip large constant values from graph_def.
"""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = "<stripped %d bytes>"%size
return strip_def
def show_graph(graph_def = None, max_const_size=32, height=800):
"""
Embed a visualization of the Tensorflow Graph inside the jupyter notebook.
Code from https://stackoverflow.com/a/38192374/995480
"""
if graph_def is None:
graph_def = tf.get_default_graph().as_graph_def()
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_graph_consts(graph_def, max_const_size=max_const_size)
code = f"""
<script>
function load() {{
document.getElementById("tf-graph").pbtxt = {repr(str(strip_def))};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height: {height}px">
<tf-graph-basic id="tf-graph"></tf-graph-basic>
</div>
"""
code = code.replace('"', '"')
iframe = f"""
<iframe seamless style="width:1200px;height:{height}px;border:0" srcdoc="{code}"></iframe>
"""
display(HTML(iframe))
class Badge:
def __init__(self, subject, status='', color='green', url=None):
self.url = url
self.subject = subject
self.status = status
self.color = color
def _repr_html_(self):
def quote(x):
return urllib.parse.quote(x).replace('-', '--')
img = f'<img src="https://img.shields.io/badge/{quote(self.subject)}-{quote(self.status)}-{quote(self.color)}.svg">'
if self.url is None:
return f'{img}'
else:
return f'<a href="{self.url}" target="_blank">{img}</a>'
logger = logging.getLogger('tensorboard-server')
class Server:
instances = {}
@staticmethod
def of(logdir="tensorboard"):
logdir = os.path.abspath(logdir)
if logdir not in Server.instances:
Server.instances[logdir] = Server(logdir, closeable=False)
return Server.instances[logdir]
def __init__(self, logdir="tensorboard", closeable = True):
self.closeable = closeable
self.logdir = os.path.abspath(logdir)
os.makedirs(self.logdir, exist_ok=True)
logger.debug(f'Starting tensorboard process for {self.logdir}')
self.process = subprocess.Popen(["tensorboard", "--logdir", self.logdir, "--host", '0.0.0.0', "--port", "0"], stderr=subprocess.PIPE)
try:
self.version = None
self.host = None
self.port = None
self.ngrok = None
for line in self.process.stderr:
line = line.decode("utf-8").strip()
match = re.match('TensorBoard (.*) at http://([^:]+):(\d+) .*', line)
if match:
self.version = match.group(1)
self.host = match.group(2)
self.port = match.group(3)
break
if self.port is None or self.version is None:
raise Exception("tensorboard didn't bind to a local address!?")
self.ngrok = ngrok.Http(host=self.host, port=self.port)
self.private_url = f'http://{self.host}:{self.port}'
logging.debug(f'TensorBoard running at {self.public_url}')
except:
logger.warning('Initialization error, killing tensorboard process')
if self.ngrok != None:
self.ngrok.close()
self.process.kill()
raise
@property
def runs(self):
return os.listdir(self.logdir)
@property
def public_url(self):
return self.ngrok.public_url
def run_url(self, run):
return f'{self.public_url}?run={run}'
def clear(self):
for run in self.runs:
logging.debug(f'Deleting run {run}')
subprocess.check_call(["rm", "-Rf", f"{self.logdir}/{run}"])
def close(self):
if self.closeable:
self.ngrok.close()
self.process.kill()
def badge(self, run=None):
if run is None:
return Badge('tensorboard', 'all', color='yellow', url=self.public_url)
else:
return Badge('tensorboard', run, color='green', url=self.run_url(run))
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
def __repr__(self):
return f'Server(logdir={repr(self.logdir)})'
def __str__(self):
return f'Server({self.public_url} -> {repr(self.logdir)})'
|
985,872 | 60d8c90c34bb8a4b31d31df22118ea0be0e43f92 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 10 09:45:07 2018
@author: wfsha
"""
import pandas as pd
import numpy as np
spotlist = pd.read_csv('C:/Users/wfsha/Desktop/list.csv',names=['id','spot','coordi'])
spotlist['co'] = np.array
for li in range(len(spotlist['id'])):
spotlist['co'][li] = []
for i in spotlist['coordi'][li].split(','):
print(i.split())
spotlist['co'][li].append(i.split())
def IsPtInPoly(aLon, aLat, pointList):
iSum = 0
iCount = len(pointList)
if(iCount < 3):
return False
for i in range(iCount):
pLon1 = float(pointList[i][0])
pLat1 = float(pointList[i][1])
if(i == iCount - 1):
pLon2 = float(pointList[0][0])
pLat2 = float(pointList[0][1])
else:
pLon2 = float(pointList[i + 1][0])
pLat2 = float(pointList[i + 1][1])
if ((aLat >= pLat1) and (aLat < pLat2)) or ((aLat>=pLat2) and (aLat < pLat1)):
if (abs(pLat1 - pLat2) > 0):
pLon = pLon1 - ((pLon1 - pLon2) * (pLat1 - aLat)) / (pLat1 - pLat2);
if(pLon < aLon):
iSum += 1
if(iSum % 2 != 0):
return True
else:
return False
|
985,873 | 1b2bc8b1d204cd8d81278fa248abb56f89df63bb | from django.shortcuts import render, HttpResponse
from .forms import FileUploadFormForm, FileUploadFormModelForm
def index(request):
if request.method == 'POST':
#
# This Is The Basic Form Method
#
# form = FileUploadFormForm(request.POST, request.FILES)
form = FileUploadFormModelForm(request.POST, request.FILES)
if form.is_valid():
#
# The Use Of This Function Is Describe Below
#
# handle_file_upload(request.FILES['file'])
form.save()
return HttpResponse('Successfully Uploaded')
else:
form = FileUploadFormModelForm()
return render(request, 'fileupload_form/form.html', {
'form': form
})
#
# This Function Is Used To Upload File In A Specific Location Outside Of Database Models
#
# def handle_file_upload(file):
# FILE_PATH = 'Example.txt'
# FILE_MODE = 'wb+'
# with open(FILE_PATH, FILE_MODE) as destination:
# for chunk in file.chunks():
# destination.write(chunk)
|
985,874 | 1fbee19032fecb8191f0eb99993a7d32a95d61de | from flask import Flask, render_template, request, redirect, session
app = Flask(__name__)
app.secret_key = 'keep it secret, keep it safe, hush hush!'
@app.route('/')
def counter():
if 'counter' in session:
session['counter'] += 1
else:
session['counter'] = 0
return render_template("index.html")
@app.route('/destroy')
def destroy():
session.clear()
return redirect("/")
@app.route('/reset')
def reset():
if 'counter' in session:
session['counter'] = 0
return redirect("/")
@app.route('/plusTwo')
def plusTwo():
if 'counter' in session:
session['counter'] += 1
return redirect("/")
@app.route('/yourNumber', methods=['POST'])
def yourNumber():
session['counter'] += int(request.form['yourNumber']) - 1
return redirect("/")
if __name__ == "__main__":
app.run(debug=True)
|
985,875 | 1395390cf133c8d3f5089c024567a6f86917608c | from __future__ import print_function
from keras.callbacks import LambdaCallback, ModelCheckpoint,ReduceLROnPlateau
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import Dropout
from keras.layers import LSTM
from keras.layers import RNN
from keras.utils.data_utils import get_file
import numpy as np
import random
import sys
import io, os
from keras.utils import np_utils
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def on_epoch_end(epoch, logs):
# Function invoked at end of each epoch. Prints generated text.
print()
print('----- Generating text after Epoch: %d' % epoch)
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0]:
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(500):
x_pred = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x_pred[0, t, char_indices[char]] = 1.
preds = model.predict(x_pred, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
print_callback = LambdaCallback(on_epoch_end=on_epoch_end)
def generate_text(length, diversity):
# Get random starting text
start_index = random.randint(0, len(text) - maxlen - 1)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
for i in range(length):
x_pred = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x_pred[0, t, char_indices[char]] = 1.
preds = model.predict(x_pred, verbose=0)[0].astype('float64')
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
return generated
text = open('input_data.txt', 'r').read()
chars = sorted(list(set(text)))
print('total chars: ', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
maxlen = 100
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
x = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
x[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
model = Sequential()
model.add(LSTM(256, input_shape=(maxlen, len(chars))))
model.add(Dropout(0.1))
model.add(Dense(len(chars)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
filepath="new_input-{epoch:02d}-{loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.2,
patience=1, min_lr=0.001)
callbacks = [print_callback, checkpoint, reduce_lr]
model.fit(x, y, batch_size=128, epochs=100, callbacks=callbacks)
print(generate_text(500, 0.5)) |
985,876 | 122391fb521322df1d6d61329cbaf66dff2c3cd1 | import csv
from unittest import TestCase
from DB.schema_definition import Author, Post, Claim_Tweet_Connection, DB, Claim
from commons.commons import convert_str_to_unicode_datetime
from preprocessing_tools.fake_news_word_classifier.fake_news_word_classifier import FakeNewsClassifier
class TestFakeNewsClassifier(TestCase):
def setUp(self):
self._db = DB()
self._db.setUp()
self._posts = []
self._author = None
def tearDown(self):
self._db.session.close()
def test_classify_by_dictionary_1_FN_1_FP(self):
self._add_author(u'author')
self._add_claim(u'post0', u'the claim', "2017-06-10 05:00:00", u'TRUE')
self._add_post(u"post1", u"1 liar bad word joke", "2017-06-12 05:00:00")
self._add_post(u"post2", u"no bad words untrue at all liar", "2017-06-12 05:00:00")
self._add_post(u"post3", u"no joke bad words at all laugh", "2017-06-12 05:00:00")
self._add_post(u"post4", u" liar no didnt actually bad words at all", "2017-06-12 05:00:00")
self._add_claim_tweet_connection(u"post0", u"post1")
self._add_claim_tweet_connection(u"post0", u"post2")
self._add_claim_tweet_connection(u"post0", u"post3")
self._add_claim_tweet_connection(u"post0", u"post4")
self._add_author(u'author_guid')
self._add_claim(u'post5', u'the claim', "2017-06-10 05:00:00", u'FALSE')
self._add_post(u"post6", u"1 bad word at all", "2017-06-12 05:00:00")
self._add_post(u"post7", u"no bad words at all", "2017-06-12 05:00:00")
self._add_post(u"post8", u"no bad words at all", "2017-06-12 05:00:00")
self._add_post(u"post9", u"no bad words at all", "2017-06-12 05:00:00")
self._add_claim_tweet_connection(u"post5", u"post6")
self._add_claim_tweet_connection(u"post5", u"post7")
self._add_claim_tweet_connection(u"post5", u"post8")
self._add_claim_tweet_connection(u"post5", u"post9")
self._db.session.commit()
self.fake_news_feature_classifier = FakeNewsClassifier(self._db)
self.fake_news_feature_classifier.setUp()
self.fake_news_feature_classifier.execute()
output_file_path = self.fake_news_feature_classifier._output_path + '/fake_news_classifier_results.csv'
output_file = open(output_file_path, 'r')
reader = csv.DictReader(output_file)
output_data = reader.next()
self.assertAlmostEqual(float(output_data['FN (think good but bad)']), 1)
self.assertAlmostEqual(float(output_data['FP (think bad but good)']), 1)
self.assertAlmostEqual(float(output_data['accuracy']), 0.0)
self.assertAlmostEqual(float(output_data['AUC']), 0.0)
def test_classify_by_dictionary_1_FN_1_FP_and_ignore_1(self):
self._add_author(u'author')
self._add_claim(u'post0', u'the claim', "2017-06-10 05:00:00", u'TRUE')
self._add_post(u"post1", u"1 liar bad word joke", "2017-06-12 05:00:00")
self._add_post(u"post2", u"no bad words untrue at all liar", "2017-06-12 05:00:00")
self._add_post(u"post3", u"no joke bad words at all laugh", "2017-06-12 05:00:00")
self._add_post(u"post4", u" liar no didnt actually bad words at all", "2017-06-12 05:00:00")
self._add_claim_tweet_connection(u"post0", u"post1")
self._add_claim_tweet_connection(u"post0", u"post2")
self._add_claim_tweet_connection(u"post0", u"post3")
self._add_claim_tweet_connection(u"post0", u"post4")
self._add_author(u'author_guid')
self._add_claim(u'post5', u'the claim', "2017-06-10 05:00:00", u'FALSE')
self._add_post(u"post6", u"1 bad word at all", "2017-06-12 05:00:00")
self._add_post(u"post7", u"no bad words at all", "2017-06-12 05:00:00")
self._add_post(u"post8", u"no bad words at all", "2017-06-12 05:00:00")
self._add_post(u"post9", u"no bad words at all", "2017-06-12 05:00:00")
self._add_claim_tweet_connection(u"post5", u"post6")
self._add_claim_tweet_connection(u"post5", u"post7")
self._add_claim_tweet_connection(u"post5", u"post8")
self._add_claim_tweet_connection(u"post5", u"post9")
self._add_claim(u'post10', u'the claim', "2017-06-10 05:00:00", u'unknown')
self._add_post(u"post11", u"1 liar bad word joke", "2017-06-12 05:00:00")
self._add_post(u"post12", u"no bad words untrue at all liar", "2017-06-12 05:00:00")
self._add_post(u"post13", u"no joke bad words at all laugh", "2017-06-12 05:00:00")
self._add_post(u"post14", u" liar no didnt actually bad words at all", "2017-06-12 05:00:00")
self._add_claim_tweet_connection(u"post10", u"post11")
self._add_claim_tweet_connection(u"post10", u"post12")
self._add_claim_tweet_connection(u"post10", u"post13")
self._add_claim_tweet_connection(u"post10", u"post14")
self._db.session.commit()
self.fake_news_feature_classifier = FakeNewsClassifier(self._db)
self.fake_news_feature_classifier.setUp()
self.fake_news_feature_classifier.execute()
output_file_path = self.fake_news_feature_classifier._output_path + '/fake_news_classifier_results.csv'
output_file = open(output_file_path, 'r')
reader = csv.DictReader(output_file)
output_data = reader.next()
self.assertAlmostEqual(float(output_data['FN (think good but bad)']), 1)
self.assertAlmostEqual(float(output_data['FP (think bad but good)']), 1)
self.assertAlmostEqual(float(output_data['accuracy']), 0.0)
self.assertAlmostEqual(float(output_data['AUC']), 0.0)
def test_classify_by_dictionary_0_FN_0_FP(self):
self._add_author(u'author')
self._add_claim(u'post0', u'the claim', "2017-06-10 05:00:00", u'FALSE')
self._add_post(u"post1", u"1 liar bad word joke", "2017-06-12 05:00:00")
self._add_post(u"post2", u"no bad words untrue at all liar", "2017-06-12 05:00:00")
self._add_post(u"post3", u"no joke bad words at all laugh", "2017-06-12 05:00:00")
self._add_post(u"post4", u" liar no didnt actually bad words at all", "2017-06-12 05:00:00")
self._add_claim_tweet_connection(u"post0", u"post1")
self._add_claim_tweet_connection(u"post0", u"post2")
self._add_claim_tweet_connection(u"post0", u"post3")
self._add_claim_tweet_connection(u"post0", u"post4")
self._add_author(u'author_guid')
self._add_claim(u'post5', u'the claim', "2017-06-10 05:00:00", u'TRUE')
self._add_post(u"post6", u"1 bad word at all", "2017-06-12 05:00:00")
self._add_post(u"post7", u"no bad words at all", "2017-06-12 05:00:00")
self._add_post(u"post8", u"no bad words at all", "2017-06-12 05:00:00")
self._add_post(u"post9", u"no bad words at all", "2017-06-12 05:00:00")
self._add_claim_tweet_connection(u"post5", u"post6")
self._add_claim_tweet_connection(u"post5", u"post7")
self._add_claim_tweet_connection(u"post5", u"post8")
self._add_claim_tweet_connection(u"post5", u"post9")
self._db.session.commit()
self.fake_news_feature_classifier = FakeNewsClassifier(self._db)
self.fake_news_feature_classifier.setUp()
self.fake_news_feature_classifier.execute()
output_file_path = self.fake_news_feature_classifier._output_path + '/fake_news_classifier_results.csv'
output_file = open(output_file_path, 'r')
reader = csv.DictReader(output_file)
output_data = reader.next()
self.assertAlmostEqual(float(output_data['FN (think good but bad)']), 0)
self.assertAlmostEqual(float(output_data['FP (think bad but good)']), 0)
self.assertAlmostEqual(float(output_data['accuracy']), 1.0)
self.assertAlmostEqual(float(output_data['AUC']), 1.0)
def test_classify_by_dictionary_1_FN_0_FP_3_claims(self):
self._add_author(u'author')
self._add_claim(u'post0', u'the claim', "2017-06-10 05:00:00", u'FALSE')
self._add_post(u"post1", u"1 liar bad word joke", "2017-06-12 05:00:00")
self._add_post(u"post2", u"no bad words untrue at all liar", "2017-06-12 05:00:00")
self._add_post(u"post3", u"no joke bad words at all laugh", "2017-06-12 05:00:00")
self._add_post(u"post4", u" liar no didnt actually bad words at all", "2017-06-12 05:00:00")
self._add_claim_tweet_connection(u"post0", u"post1")
self._add_claim_tweet_connection(u"post0", u"post2")
self._add_claim_tweet_connection(u"post0", u"post3")
self._add_claim_tweet_connection(u"post0", u"post4")
self._add_author(u'author_guid')
self._add_claim(u'post5', u'the claim', "2017-06-10 05:00:00", u'TRUE')
self._add_post(u"post6", u"1 bad word at all", "2017-06-12 05:00:00")
self._add_post(u"post7", u"no bad words at all", "2017-06-12 05:00:00")
self._add_post(u"post8", u"no bad words at all", "2017-06-12 05:00:00")
self._add_post(u"post9", u"no bad words at all", "2017-06-12 05:00:00")
self._add_claim_tweet_connection(u"post5", u"post6")
self._add_claim_tweet_connection(u"post5", u"post7")
self._add_claim_tweet_connection(u"post5", u"post8")
self._add_claim_tweet_connection(u"post5", u"post9")
self._add_author(u'author_guid')
self._add_claim(u'post10', u'the claim', "2017-06-10 05:00:00", u'FALSE')
self._add_post(u"post11", u"1 bad word at all", "2017-06-12 05:00:00")
self._add_post(u"post12", u"no bad words at all", "2017-06-12 05:00:00")
self._add_post(u"post13", u"no bad words at all", "2017-06-12 05:00:00")
self._add_post(u"post14", u"no bad words at all", "2017-06-12 05:00:00")
self._add_claim_tweet_connection(u"post10", u"post11")
self._add_claim_tweet_connection(u"post10", u"post12")
self._add_claim_tweet_connection(u"post10", u"post13")
self._add_claim_tweet_connection(u"post10", u"post14")
self._db.session.commit()
self.fake_news_feature_classifier = FakeNewsClassifier(self._db)
self.fake_news_feature_classifier.setUp()
self.fake_news_feature_classifier.execute()
output_file_path = self.fake_news_feature_classifier._output_path + '/fake_news_classifier_results.csv'
output_file = open(output_file_path, 'r')
reader = csv.DictReader(output_file)
output_data = reader.next()
self.assertAlmostEqual(float(output_data['FN (think good but bad)']), 1)
self.assertAlmostEqual(float(output_data['FP (think bad but good)']), 0)
self.assertAlmostEqual(float(output_data['accuracy']), 0.666666, places=4)
self.assertAlmostEqual(float(output_data['AUC']), 0.75)
def test_classify_by_dictionary_0_FN_1_FP_3_claims(self):
self._add_author(u'author')
self._add_claim(u'post0', u'the claim', "2017-06-10 05:00:00", u'FALSE')
self._add_post(u"post1", u"1 liar bad word joke", "2017-06-12 05:00:00")
self._add_post(u"post2", u"no bad words untrue at all liar", "2017-06-12 05:00:00")
self._add_post(u"post3", u"no joke bad words at all laugh", "2017-06-12 05:00:00")
self._add_post(u"post4", u" liar no didnt actually bad words at all", "2017-06-12 05:00:00")
self._add_claim_tweet_connection(u"post0", u"post1")
self._add_claim_tweet_connection(u"post0", u"post2")
self._add_claim_tweet_connection(u"post0", u"post3")
self._add_claim_tweet_connection(u"post0", u"post4")
self._add_claim(u'post5', u'the claim', "2017-06-10 05:00:00", u'TRUE')
self._add_post(u"post6", u"1 bad word at all", "2017-06-12 05:00:00")
self._add_post(u"post7", u"no bad words at all", "2017-06-12 05:00:00")
self._add_post(u"post8", u"no bad words at all", "2017-06-12 05:00:00")
self._add_post(u"post9", u"no bad words at all", "2017-06-12 05:00:00")
self._add_claim_tweet_connection(u"post5", u"post6")
self._add_claim_tweet_connection(u"post5", u"post7")
self._add_claim_tweet_connection(u"post5", u"post8")
self._add_claim_tweet_connection(u"post5", u"post9")
self._add_claim(u'post10', u'the claim', "2017-06-10 05:00:00", u'TRUE')
self._add_post(u"post11", u"1 liar bad word joke", "2017-06-12 05:00:00")
self._add_post(u"post12", u"no bad words untrue at all liar", "2017-06-12 05:00:00")
self._add_post(u"post13", u"no joke bad words at all laugh", "2017-06-12 05:00:00")
self._add_post(u"post14", u" liar no didnt actually bad words at all", "2017-06-12 05:00:00")
self._add_claim_tweet_connection(u"post10", u"post11")
self._add_claim_tweet_connection(u"post10", u"post12")
self._add_claim_tweet_connection(u"post10", u"post13")
self._add_claim_tweet_connection(u"post10", u"post14")
self._db.session.commit()
self.fake_news_feature_classifier = FakeNewsClassifier(self._db)
self.fake_news_feature_classifier.setUp()
self.fake_news_feature_classifier.execute()
output_file_path = self.fake_news_feature_classifier._output_path + '/fake_news_classifier_results.csv'
output_file = open(output_file_path, 'r')
reader = csv.DictReader(output_file)
output_data = reader.next()
self.assertAlmostEqual(float(output_data['FN (think good but bad)']), 0)
self.assertAlmostEqual(float(output_data['FP (think bad but good)']), 1)
self.assertAlmostEqual(float(output_data['accuracy']), 0.666666, places=4)
self.assertAlmostEqual(float(output_data['AUC']), 0.75)
def _add_author(self, author_guid):
author = Author()
author.author_guid = author_guid
author.author_full_name = u'test author'
author.author_screen_name = author_guid
author.name = u'test'
author.domain = u'tests'
author.statuses_count = 0
author.created_at = u"2017-06-14 05:00:00"
self._db.add_author(author)
self._author = author
def _add_post(self, title, content, date_str, domain=u'Microblog', post_type=None):
post = Post()
post.author = self._author.author_guid
post.author_guid = self._author.author_guid
post.content = content
post.title = title
post.domain = domain
post.post_id = title
post.guid = post.post_id
post.date = convert_str_to_unicode_datetime(date_str)
post.created_at = post.date
post.post_type = post_type
self._db.addPost(post)
self._posts.append(post)
def _add_claim_tweet_connection(self, claim_id, post_id):
connection = Claim_Tweet_Connection()
connection.claim_id = claim_id
connection.post_id = post_id
self._db.add_claim_connections([connection])
pass
def _add_claim(self, claim_id, content, date_str, post_type=None):
claim = Claim()
claim.claim_id = claim_id
claim.verdict = post_type
claim.title = claim_id
claim.description = content
claim.verdict_date = convert_str_to_unicode_datetime(date_str)
claim.url = u"claim url"
self._db.addPost(claim)
|
985,877 | 40d477834a4e942bb64bffb2a50fdfbf0731cb8d | class Patent(object):
patent_id = ""
patent_name = ""
medicine_components = None
patent_form = None
patent_major = None
patent_functions = None
def __init__(self):
pass |
985,878 | 4b30a29c67cbdf7b76f10f040b88aeff8f713317 | from .BeamOrientation import BeamOrientation
class BeamOrientationArray(list[BeamOrientation]):
def findAt(self):
pass
|
985,879 | cce0686386eac7ba0dcbbde5098e88a6bca555bb | import sys
import os
import re
backlog = []
def main(argv):
source = r'f:\Family Fotky'
print('Source path: {}'.format(source))
extension = re.compile(r'^.*\..+_\d{3}$')
for root, dirs, files in os.walk(source):
duplicates = [duplicate for duplicate in files if extension.match(duplicate.lower())]
for duplicate in duplicates:
duplicate_path = os.path.join(root, duplicate)
orig_path = os.path.join(root, duplicate[:-4])
if os.path.getsize(duplicate_path) == os.path.getsize(orig_path):
print('Safe to delete {}. Original is the same'.format(duplicate_path))
os.remove(duplicate_path)
else:
new_duplicate_name = duplicate[:-8] + duplicate_path[-4:] + duplicate_path[-8:-4]
print('Will rename {} to {}. Original is different.'.format(duplicate, new_duplicate_name))
os.rename(duplicate_path, os.path.join(root, new_duplicate_name))
if __name__ == '__main__':
main(sys.argv)
|
985,880 | 8624b1722587a070d614add986464a915fc12676 | import pytest
import aiohttp
from aiohttp import web
async def test_async_with_session(loop):
async with aiohttp.ClientSession(loop=loop) as session:
pass
assert session.closed
async def test_close_resp_on_error_async_with_session(loop, test_server):
async def handler(request):
return web.Response()
app = web.Application(loop=loop)
app.router.add_get('/', handler)
server = await test_server(app)
async with aiohttp.ClientSession(loop=loop) as session:
with pytest.raises(RuntimeError):
async with session.get(server.make_url('/')) as resp:
resp.content.set_exception(RuntimeError())
await resp.read()
assert len(session._connector._conns) == 0
async def test_release_resp_on_normal_exit_from_cm(loop, test_server):
async def handler(request):
return web.Response()
app = web.Application(loop=loop)
app.router.add_get('/', handler)
server = await test_server(app)
async with aiohttp.ClientSession(loop=loop) as session:
async with session.get(server.make_url('/')) as resp:
await resp.read()
assert len(session._connector._conns) == 1
async def test_non_close_detached_session_on_error_cm(loop, test_server):
async def handler(request):
return web.Response()
app = web.Application(loop=loop)
app.router.add_get('/', handler)
server = await test_server(app)
cm = aiohttp.get(server.make_url('/'), loop=loop)
session = cm._session
assert not session.closed
with pytest.raises(RuntimeError):
async with cm as resp:
resp.content.set_exception(RuntimeError())
await resp.read()
assert not session.closed
async def test_close_detached_session_on_non_existing_addr(loop):
cm = aiohttp.get('http://non-existing.example.com', loop=loop)
session = cm._session
assert not session.closed
with pytest.raises(Exception):
await cm
assert session.closed
|
985,881 | 907a7df5d8652c164acf5c3d2d5d3e4b090f9876 | from benchmark_reader import Benchmark
from benchmark_reader import select_files
# where to find the corpus
path_to_corpus = '../benchmark/original/test'
# initialise Benchmark object
b = Benchmark()
# collect xml files
files = select_files(path_to_corpus)
# load files to Benchmark
b.fill_benchmark(files)
entities = set()
for entry in b.entries:
entity = entry.modifiedtripleset.triples[0].s
entities.add(entity)
entity = entry.modifiedtripleset.triples[0].o
entities.add(entity)
#print(entities)
for entity in entities:
print(entity.replace('_',' '))
|
985,882 | 348f8bfcec9dc4e5e32091cd4efd03a935655533 | def print_timesTable() :
for i in range(2,10):
print("===",i,"===")
for j in range(1,10):
print(i,"x",j,"=",i*j)
print_timesTable()
#https://wikidocs.net/24
num = [1,2,3,4]
num.append(5)
print(num)
num.insert(0,0)
print(num)
num.extend([6,7])
print(num)
color = ['red','blue','yello','red']
print(color.index('red'))
print(color.index('red',1))
print(color.count('red'))
print(color.pop())
color.sort()
print(color)
color.remove('blue')
print(color) |
985,883 | f2360f39c0cb2df54d51629ab0eeee067dd86cd8 | import json
import geojson
from django.test import TestCase
from django.urls import reverse
def randomFeature():
return {
"type":"Feature",
"geometry": geojson.utils.generate_random("Polygon"),
"properties":{}
}
def returnData(inner):
def wrapper(*args,**kwargs):
r = inner(*args,**kwargs)
try:
data = json.loads(r.content)
except:
data = None
return r.status_code,data
return wrapper
def returnContent(inner):
def wrapper(*args,**kwargs):
r = inner(*args,**kwargs)
return r.status_code,r.content
return wrapper
class ApiTestCase(TestCase):
"""
Terse testing of the API through call-methods.
"""
@returnContent
def shapes_post(self,**kwargs):
return self.client.post(reverse("shape-list"),kwargs,content_type="application/json")
@returnContent
def shapes_put(self,pk,**kwargs):
return self.client.put(reverse("shape-detail",args=(pk,)),kwargs,content_type="application/json")
@returnContent
def shapes_delete(self,pk):
return self.client.delete(reverse("shape-detail",args=(pk,)))
@returnData
def project_status(self,pk):
return self.client.get(reverse("projectstatus",kwargs={"project":pk}))
@returnData
def project_assigned(self):
return self.client.get(reverse("assigned"))
@returnContent
def nonanswer(self,project):
return self.client.post(reverse("nonanswer",kwargs={"project":project}))
@returnData
def shapes_get(self,project=None):
url = reverse("shape-list")
if project:
url += f"?country={project}"
return self.client.get(url)
@returnData
def shapes_detail(self,pk):
return self.client.get(reverse("shape-detail",args=(pk,)))
|
985,884 | a41632de1f510d0577eeae1d3b3c8482b00d6914 | import csv
from datetime import datetime
import io
import logging
import os
import re
import traceback
from utils.event_source_map import event_source_map
class CsvFormatter(logging.Formatter):
def __init__(self):
self.output = io.StringIO()
self.writer = csv.writer(self.output, quoting=csv.QUOTE_ALL)
self.writer.writerow(["Time", "Level", "Event Source", "Message", "Exc Info"])
self.now = datetime.now().strftime("%m-%d-%Y")
self.event_source_map = event_source_map
def format(self, record):
try:
exc_type, exc_value, tb = record.exc_info
tb = " ".join(traceback.format_tb(tb)).replace("\n",'').replace(",",'').strip()
exc_info = f"{exc_type} {exc_value} {tb}"
except (AttributeError, TypeError) as e:
raise ValueError(f"{e} raised by logging formatter, likely becuase a logger didn't set exc_info=True")
record_name = record.name
record_message = record.msg
if record_name == '__main__':
record_name = record_message[35:record_message.index(":")]
record_name = record_name.replace("events.","")
event_source = self.event_source_map[record_name]
self.writer.writerow(
[self.now, record.levelname, event_source, record_message, exc_info])
data = self.output.getvalue()
self.output.truncate(0)
self.output.seek(0)
return data.strip()
|
985,885 | efc61574c543e3aafb0b1ea30714204602c3561f | import pygame
from defs import *
import numpy as np
import random
class Plataform():
def __init__(self, gameDisplay, brain):
self.gameDisplay = gameDisplay;
self.x = PLATAFORM_POSITION[0]
self.y = PLATAFORM_POSITION[1]
self.plat_color = self.random_color()
self.rect = self.draw()
self.points = 0
self.brain = brain
self.fitness = 0
self.is_alive = True
self.last_catched_ball = None
def move_left(self):
speed = PLATAFORM_SPEED
if(self.x - speed < 0):
self.x = 0
else:
self.x = self.x - speed
def move_right(self):
speed = PLATAFORM_SPEED
if(self.x + speed > WINDOW_WIDTH - PLATAFORM[0]):
self.x = WINDOW_WIDTH - PLATAFORM[0]
else:
self.x = self.x + speed
def draw(self):
self.rect = pygame.draw.rect(
self.gameDisplay,
self.plat_color,
pygame.Rect(self.x, self.y, PLATAFORM[0], PLATAFORM[1]))
return self.rect
def random_color(self):
rgbl=[random.randint(0,255),random.randint(0,255),random.randint(0,255)]
return tuple(rgbl)
def update(self):
if(self.lost_game() == False):
self.draw()
def lost_game(self):
return self.is_alive == False;
def kill(self):
self.is_alive = False
def reset(self):
self.is_alive = True
self.last_catched_ball = None
self.points = 0
self.fitness = 0
def get_points(self):
return self.points
def get_rect(self):
return self.rect
def try_to_catch(self, ball):
if(self.last_catched_ball == ball):
return
if(self._catched_the_ball(ball)):
self.last_catched_ball = ball
self.points += 1
return
self.set_fitness(ball)
self.kill()
def set_fitness(self, ball):
ball_points = self.points * WINDOW_WIDTH
if(ball.center_x > self.x):
self.fitness = ball_points + (WINDOW_WIDTH - (ball.center_x - self.x))
else:
self.fitness = ball_points + (WINDOW_WIDTH - (self.x - ball.center_x))
def _catched_the_ball(self, ball):
return pygame.Rect(self.x, self.y, PLATAFORM[0], PLATAFORM[1]).colliderect(ball.get_rect())
def predict(self, ballGenerator):
ball = ballGenerator.ball
input = np.array([[(self.x + PLATAFORM[0] / 2) - ball.center_x],[ ball.center_y]])
left, right = self.brain.predict(input)
if(left > right):
self.move_left()
else:
self.move_right() |
985,886 | 9fc23e62ee714c089fea23707f42141fff6e8e67 | import pyautogui
pyautogui.hotkey('command', 'tab')
pyautogui.press('z') |
985,887 | 2bd763ab20585d60e215fd00dad0fbb2617ba619 | import logging
import re
import certifi
from ssl import create_default_context
from elasticsearch import Elasticsearch
from copy import copy
from sokannonser import settings
from sokannonser.repository.ontology import Ontology
log = logging.getLogger(__name__)
OP_NONE = ''
OP_PLUS = '+'
OP_MINUS = '-'
class TextToConcept(object):
COMPETENCE_KEY = 'KOMPETENS'
OCCUPATION_KEY = 'YRKE'
TRAIT_KEY = 'FORMAGA'
LOCATION_KEY = 'PLATS'
REMOVED_TAG = '<removed>'
def __init__(self, ontologyhost='localhost', ontologyport=9200,
ontologyindex='narvalontology', ontologyuser=None, ontologypwd=None):
log.info('Creating TextToConcept')
self.client = self.create_elastic_client(ontologyhost, ontologyport, ontologyuser,
ontologypwd)
self.ontologyindex = ontologyindex
self.ontology = None
if settings.ES_HOST != 'localhost':
# Cache ontology directly unless it's a local call (tests or docker build)
self.get_ontology()
def get_ontology(self):
if self.ontology is None:
log.info('Creating Ontology, ontologyindex: %s' % self.ontologyindex)
self.ontology = Ontology(client=self.client,
index=self.ontologyindex,
annons_index=settings.ES_INDEX,
concept_type=None,
include_misspelled=True)
log.info('Done creating Ontology, ontologyindex: %s' % self.ontologyindex)
return self.ontology
@staticmethod
def create_elastic_client(host, port, user, pwd):
log.info('Creating ontology elasticclient, host: %s, port: %s, user: %s' % (
host, port, user))
if user and pwd:
context = create_default_context(cafile=certifi.where())
client = Elasticsearch([host], port=port,
use_ssl=True, scheme='https',
ssl_context=context,
http_auth=(user, pwd))
else:
client = Elasticsearch([{'host': host, 'port': port}])
return client
RE_PLUS_MINUS = re.compile(r"((^| )[+-])", re.UNICODE)
def clean_plus_minus(self, text):
return self.RE_PLUS_MINUS.sub(" ", text).strip()
def text_to_concepts(self, text):
# Note: Remove eventual '+' and '-' in every freetext query word since flashText is configured
# so it can't find words starting with minus/hyphen.
searchtext = self.clean_plus_minus(text)
text_lower = text.lower()
ontology_concepts_orig = self.get_ontology().get_concepts(searchtext, concept_type=None,
span_info=True)
ontology_concepts = [c[0] for c in ontology_concepts_orig]
# print(ontology_concepts)
text_lower_plus_blank_end = text_lower + ' '
for concept in ontology_concepts:
# print(concept)
concept_term = concept['term']
if '-' + concept_term + ' ' in text_lower_plus_blank_end:
concept['operator'] = OP_MINUS
elif '+' + concept_term + ' ' in text_lower_plus_blank_end:
concept['operator'] = OP_PLUS
else:
concept['operator'] = OP_NONE
skills = [c for c in ontology_concepts
if self.filter_concepts(c, self.COMPETENCE_KEY, OP_NONE)]
occupations = [c for c in ontology_concepts
if self.filter_concepts(c, self.OCCUPATION_KEY, OP_NONE)]
traits = [c for c in ontology_concepts
if self.filter_concepts(c, self.TRAIT_KEY, OP_NONE)]
locations = [c for c in ontology_concepts
if self.filter_concepts(c, self.LOCATION_KEY, OP_NONE)]
skills_must = [c for c in ontology_concepts
if self.filter_concepts(c, self.COMPETENCE_KEY, OP_PLUS)]
occupations_must = [c for c in ontology_concepts
if self.filter_concepts(c, self.OCCUPATION_KEY, OP_PLUS)]
traits_must = [c for c in ontology_concepts
if self.filter_concepts(c, self.TRAIT_KEY, OP_PLUS)]
locations_must = [c for c in ontology_concepts
if self.filter_concepts(c, self.LOCATION_KEY, OP_PLUS)]
skills_must_not = [c for c in ontology_concepts
if self.filter_concepts(c, self.COMPETENCE_KEY, OP_MINUS)]
occupations_must_not = [c for c in ontology_concepts
if self.filter_concepts(c, self.OCCUPATION_KEY, OP_MINUS)]
traits_must_not = [c for c in ontology_concepts
if self.filter_concepts(c, self.TRAIT_KEY, OP_MINUS)]
locations_must_not = [c for c in ontology_concepts
if self.filter_concepts(c, self.LOCATION_KEY, OP_MINUS)]
result = {'skill': skills,
'occupation': occupations,
'trait': traits,
'location': locations,
'skill_must': skills_must,
'occupation_must': occupations_must,
'trait_must': traits_must,
'location_must': locations_must,
'skill_must_not': skills_must_not,
'occupation_must_not': occupations_must_not,
'trait_must_not': traits_must_not,
'location_must_not': locations_must_not}
return result
@staticmethod
def filter_concepts(concept, concept_type, operator):
if concept['type'] == concept_type and concept['operator'] == operator:
return True
else:
return False
|
985,888 | a7bdcfe35ee2782ea12e033812e242f8b22e38dc | import re
from util import *
import wl_data as wl
import matcher
help_command_color = '1;37'
def command_format(cmd):
if check_gdb():
return '(gdb) ' + color(help_command_color, 'wl' + cmd)
else:
return '$ ' + color(help_command_color, cmd)
class Command:
def __init__(self, name, arg, func, help_text):
self.name = name
self.arg = arg
self.func = func
self.help = help_text
def matches(self, command):
return self.name.startswith(command.lower())
class Session:
def __init__(self, display_matcher, stop_matcher, output):
assert display_matcher
assert stop_matcher
self.current_connection_id = None
self.connection_list = []
self.connections = {}
self.commands = [
Command('help', '[COMMAND]', self.help_command,
'Show this help message, or get help for a specific command'),
Command('show', '[MATCHER] [~ COUNT]', self.show_command,
'Show messages matching given matcher (or show all messages, if no matcher provided)\n' +
'Append "~ COUNT" to show at most the last COUNT messages that match\n' +
'See ' + command_format('help matcher') + ' for matcher syntax'),
Command('filter', '[MATCHER]', self.filter_command,
'Show the current output filter matcher, or add a new one\n' +
'See ' + command_format('help matcher') + ' for matcher syntax'),
Command('breakpoint', '[MATCHER]', self.break_point_command,
'Show the current breakpoint matcher, or add a new one\n' +
'Use an inverse matcher (^) to disable existing breakpoints\n' +
'See ' + command_format('help matcher') + ' for matcher syntax'),
Command('matcher', '[MATCHER]', self.matcher_command,
'Parse a matcher, and show it unsimplified'),
Command('connection', '[CONNECTION]', self.connection_command,
'Show Wayland connections, or switch to another connection'),
Command('resume', None, self.continue_command,
'Resume processing events\n' +
'In GDB you can also use the continue gdb command'),
Command('quit', None, self.quit_command,
'Quit the program'),
]
self.is_stopped = False
self.should_quit = False
self.display_matcher = display_matcher
self.stop_matcher = stop_matcher
self.out = output
def set_stopped(self, val):
self.is_stopped = val
def stopped(self):
return self.is_stopped
def quit(self):
return self.should_quit
def message(self, connection_id, message):
if message == None:
return
self.is_stopped = False
if not connection_id in self.connections:
self.out.warn('connection_id ' + repr(connection_id) + ' never explicitly created')
self.open_connection(connection_id)
self.connections[connection_id].message(message)
if connection_id == self.current_connection_id:
if self.display_matcher.matches(message):
message.show(self.out)
if self.stop_matcher.matches(message):
self.out.show(color('1;37', ' Stopped at ') + str(message).strip())
self.is_stopped = True
def open_connection(self, connection_id):
self.close_connection(connection_id)
if not self.connections:
self.current_connection_id = connection_id
self.out.show(color('1;32', 'First connection ' + repr(connection_id)))
else:
self.out.show(color('1;32', 'New connection ' + repr(connection_id)))
self.connections[connection_id] = wl.Connection()
self.connection_list.append(self.connections[connection_id])
def close_connection(self, connection_id):
if connection_id in self.connections:
del self.connections[connection_id]
self.out.show(color('1;31', 'Closed connection ' + repr(connection_id)))
def show_messages(self, matcher, cap=None):
self.out.show('Messages that match ' + str(matcher) + ':')
matching, matched, didnt_match, not_searched = self._get_matching(matcher, cap)
if not matching:
if not self.connections:
self.out.show(' ╰╴ No messages yet')
else:
assert didnt_match == len(self.messages)
self.out.show(' ╰╴ None of the ' + color('1;31', str(didnt_match)) + ' messages so far')
else:
for message in matching:
message.show(self.out)
self.out.show(
'(' +
color(('1;32' if matched > 0 else '37'), str(matched)) + ' matched, ' +
color(('1;31' if didnt_match > 0 else '37'), str(didnt_match)) + ' didn\'t' +
(', ' + color(('37'), str(not_searched)) + ' not checked' if not_searched != 0 else '') +
')')
def _get_matching(self, matcher, cap=None):
if cap == 0:
cap = None
didnt_match = 0
acc = []
messages = self.connections[self.current_connection_id].messages
for message in reversed(messages):
if matcher.matches(message):
acc.append(message)
if cap and len(acc) >= cap:
break
else:
didnt_match += 1
return (reversed(acc), len(acc), didnt_match, len(messages) - len(acc) - didnt_match)
def command(self, command):
assert isinstance(command, str)
command = command.strip()
args = re.split('\s', command, 1)
if len(args) == 0:
return False
cmd = args[0].strip()
arg = None if len(args) < 2 else args[1].strip()
if cmd == '':
assert not arg
self.out.error('No command specified')
cmd = 'help'
if cmd == 'w': # in case they use GDB style commands when not in GDB
return self.command(arg)
cmd = self._get_command(cmd)
if cmd:
self.out.log('Got ' + cmd.name + ' command' + (' with \'' + arg + '\'' if arg else ''))
cmd.func(arg)
def _get_command(self, command):
found = []
for c in self.commands:
if c.name.startswith(command):
found.append(c)
if len(found) == 1:
return found[0]
else:
if len(found) > 1:
self.out.error('\'' + command + '\' could refer to multiple commands: ' + ', '.join(c.name for c in found))
else:
self.out.error('Unknown command \'' + command + '\'')
return None
def help_command(self, arg):
if arg:
if arg.startswith('wl'):
arg = arg[2:].strip()
if arg == 'matcher':
import matcher
matcher.print_help()
return
else:
cmd = self._get_command(arg)
if cmd:
start = command_format(cmd.name) + ': '
body = cmd.help.replace('\n', '\n' + ' ' * len(no_color(start)))
self.out.show(start + body)
return
self.out.show('Usage: ' + command_format('<COMMAND> [ARGUMENT]'))
self.out.show('Commands can be abbreviated (down to just the first unique letter)')
self.out.show('Help with matcher syntax: ' + command_format('help matcher'))
self.out.show('Commands:')
for c in self.commands:
s = c.name
if c.arg:
s += ' ' + c.arg
self.out.show(' ' + command_format(s))
# Old can be None
def parse_and_join(self, new_unparsed, old):
try:
parsed = matcher.parse(new_unparsed)
if old:
return matcher.join(parsed, old).simplify()
else:
return parsed.simplify()
except RuntimeError as e:
self.out.error('Failed to parse "' + new_unparsed + '":\n ' + str(e))
return old
def filter_command(self, arg):
if arg:
self.display_matcher = self.parse_and_join(arg, self.display_matcher)
self.out.show('Only showing messages that match ' + str(self.display_matcher))
else:
self.out.show('Output filter: ' + str(self.display_matcher))
def break_point_command(self, arg):
if arg:
self.stop_matcher = self.parse_and_join(arg, self.stop_matcher)
self.out.show('Breaking on messages that match: ' + str(self.stop_matcher))
else:
self.out.show('Breakpoint matcher: ' + str(self.stop_matcher))
def matcher_command(self, arg):
if arg:
try:
parsed = matcher.parse(arg)
unsimplified_str = str(parsed)
self.out.show('Unsimplified: ' + unsimplified_str)
self.out.show(' Simplified: ' + str(parsed.simplify()))
self.out.show(' Reparsed: ' + str(matcher.parse(unsimplified_str).simplify()))
except RuntimeError as e:
self.out.error('Failed to parse "' + arg + '":\n ' + str(e))
else:
self.out.show('No matcher to parse')
def show_command(self, arg):
cap = None
if arg:
args = arg.split('~')
if len(args) == 2:
try:
cap = int(args[1])
except ValueError:
self.out.error('Expected number after \'~\', got \'' + args[1] + '\'')
return
m = self.parse_and_join(args[0], None)
if not m:
return
else:
m = matcher.always
self.show_messages(m, cap)
def connection_command(self, arg):
if arg:
arg = no_color(arg)
if arg in self.connections:
self.current_connection_id = arg
self.out.show('Switched to connection ' + repr(arg))
else:
self.out.error(repr(arg) + ' is not a known connection')
for k, v in self.connections.items():
if k == self.current_connection_id:
name = color('1;37', ' > ' + k)
else:
name = color('37', ' ' + k)
self.out.show(name + ' (' + str(len(v.messages)) + ' messages)')
def continue_command(self, arg):
self.is_stopped = False
self.out.log('Continuing...')
def quit_command(self, arg):
self.should_quit = True
if __name__ == '__main__':
print('File meant to be imported, not run')
exit(1)
|
985,889 | da47948c1c1a03fc4d03b0d07c4da62381fd56a2 | import pandas as pd
sheets = []
with pd.ExcelFile("/home/ccalleri/Documents/Charlotte/AutomtisationInfosNutritionelles/données nutritionnelles.ods") as xls:
print(xls.sheet_names)
sheets = xls.sheet_names
dfs = [pd.read_excel(xls, sheet_name, engine="odf") for sheet_name in xls.sheet_names]
donnees_nutritionnelles = pd.read_excel(xls, sheet_name="Données nutritionnelles", engine="odf")
ingredients_by_recettes_ordered = []
total_nutrition_by_recette = {
'Énergie (kJ)' : [],
'Energie (kcal)' : [],
'Matières grasses (g)': [],
'dont acides gras saturés (g)' : [],
'Glucides (g)' : [],
'dont sucres (g)' : [],
'Protéines (g)' : [],
'Sel (g)' : []
}
for df, sheet in zip(dfs, xls.sheet_names) :
if sheet not in('Données nutritionnelles', 'Total') :
df = df.sort_values(by=["masse (g)"], ascending=False)
ingredients = ','.join(df["Ingrédients"])
ingredients_by_recettes_ordered.append(ingredients)
df["Liste des ingrédients"]= ""
df.loc[[0],"Liste des ingrédients"] = ingredients
total_nutrition = {
'Énergie (kJ)' : 0,
'Energie (kcal)' : 0,
'Matières grasses (g)': 0,
'dont acides gras saturés (g)' : 0,
'Glucides (g)' : 0,
'dont sucres (g)' : 0,
'Protéines (g)' : 0,
'Sel (g)' : 0
}
# Pour chaque ingredient et pour chaque masse associée
# Multiplier l'ensemble des caracteristique nutritionnelles de l'ingredient par la quantité d'ingrédient dans 100g de produit
# ajouter l'ensemble au caracteristiques nutritionelles de la recette"""
for ingredient,masse in zip(df.Ingrédients, df['masse (g)']) :
ingredient_nutrition = donnees_nutritionnelles[donnees_nutritionnelles['Pour 100g'] == ingredient]
if ingredient_nutrition.empty :
print(f"Ingredient not found: {ingredient} in: {sheet}")
else :
for carac in total_nutrition :
proportion = masse / df['masse (g)'].sum()
total_nutrition[carac] += ingredient_nutrition.iloc[0][carac] * proportion
for carac in total_nutrition_by_recette :
total_nutrition_by_recette[carac].append(total_nutrition[carac])
total_df = pd.DataFrame(
{'Recette': xls.sheet_names[1:-1],
'Ingredients ordonnés' : ingredients_by_recettes_ordered
} | total_nutrition_by_recette # dictionnary merge python >3.9
)
writer = pd.ExcelWriter("/home/ccalleri/Documents/Charlotte/AutomtisationInfosNutritionelles/données nutritionnelles.ods", engine='odf')
for df,sheet in zip(dfs,sheets):
if sheet != 'Total' :
df.to_excel(writer, sheet_name=sheet, index=False)
total_df.to_excel(writer, sheet_name='Total', index=False)
writer.save()
writer.close()
|
985,890 | fd3a23714a0e3899e21fb275ffa8099277d73615 | class Solution(object):
def slowestKey(self, releaseTimes, keysPressed):
"""
:type releaseTimes: List[int]
:type keysPressed: str
:rtype: str
"""
prev = 0
output = (0, '')
for key,time in zip(keysPressed, releaseTimes):
output = max(output, (time-prev, key))
prev = time
return output[1]
|
985,891 | c5ddd9c7dabd38ed20f047443e24ccbc207389ca | # -*- coding: utf-8 -*-
'''
Created on Nov 7, 2010
@author: morten
'''
import unittest
from Output.TableOutput.TableOutput import TableOutput
#from Datatypes.ActivityData import ActivityData
from testpackage.Utilities.TestdataSupport.TableOutput import * #@UnusedWildImport
# test data
class Test(unittest.TestCase):
def testConstruction(self):
''' TableOutput : constructs the TableOutput class '''
TableOutput( IterableObject )
pass
def testGetTextile(self):
''' TableOutput : test building the textile table '''
TO = TableOutput( IterableObject )
Result = TO.GetTextileTable()
self.assertEqual( Result, ObjectAsTextile )
def testGetTextileNoHeader(self):
''' TableOutput : test building the textile table without headers '''
TO = TableOutput( IterableObject, IncludeHeader=False )
Result = TO.GetTextileTable()
self.assertEqual( Result, ObjectAsTextileNoHeader )
def testGetTextileWithCSums(self):
''' TableOutput : test building the textile table with column sums'''
TO = TableOutput( IterableObject, IncludeColumnSums=True )
Result = TO.GetTextileTable()
self.assertEqual( Result, ObjectAsTextileColumnSum )
def testGetTextileWithRSums(self):
''' TableOutput : test building the textile table with Row sums'''
TO = TableOutput( IterableObject, IncludeRowSums=True )
Result = TO.GetTextileTable()
self.assertEqual( Result, ObjectAsTextileRowSum )
def testGetTextileWithRAndCSums(self):
''' TableOutput : test building the textile table with Row and column sums'''
TO = TableOutput( IterableObject, IncludeRowSums=True, IncludeColumnSums=True )
Result = TO.GetTextileTable()
self.assertEqual( Result, ObjectAsTextileRowAndColumnSum )
def testGetTextileWithPreparation(self):
''' TableOutput : test building the textile table with Row and column sums and preparation'''
TO = TableOutput( IterableObjectEasy, IncludeRowSums=True, IncludeColumnSums=True,
IncludePreperation=True )
Result = TO.GetTextileTable( StartWeek=1, EndWeek=3 )
self.assertEqual( Result, ObjectAsTextilePreparation )
# TODO: auto extraction of weeks would be nice
# def testExtractWeeksFromData(self):
# ''' TableOutput : test using weeks from ItObject '''
# TO = TableOutput( IterableObject, AutoWeeks=True )
# Result = TO.GetTextileTable()
# self.assertEqual( Result, ObjectAsTextile )
# self.assertEqual( TO.GetWeeks(), WeeksInObject )
def testGetHtml(self):
''' TableOutput : test the output converted to HTML '''
TO = TableOutput( IterableObject )
HTML = TO.GetHtmlTable()
self.assertEqual( HTML, ObjectAsHtml )
def testGetTextileWithExtra(self):
''' TableOutput : test building the textile table with extra iterator '''
TO = TableOutput( IterableObject, ItObjectExtra=IterableObjectExtra1 )
Result = TO.GetTextileTable()
self.assertEqual( Result, ObjectAsTextileExtra1 )
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
985,892 | 8c8a2c7707f8a15794ed8b521a1b4857d9a07f2a | class PeakFinder:
def __init__(self, threshold):
self.threshold = threshold
self.last = 0
self.name = f'nrm_peak{threshold}'
def is_peak(self, value):
difference = value - self.last
previous = self.last
self.last = value
if difference > (self.threshold * previous):
return True
return False
|
985,893 | 9e6a4908fcff92a103985a83abc057aa7a8af3be | """
You have to create different func,ons for Sam’s college of cartoon. Please find the func,ons list
below -
• Give me a random cartoon character: - Func?on 1
• This func,on should take N arguments, where N is not fixed and ranges from 0 to
many. This func,on should return a random character from the N argument.
• For example: If arguments are “Dora”, “Shin Chan”, “Poke mon” etc… this func,on
should return any one of the above character. (Eg: “Dora”) and must be random.
• If the argument length for the func,on is 0, then this func,on should return False
(boolean) as output.
• Swap the cartoon character: - Func?on 2
• This func,on should call Func,on 1 (above) and if func,on 1 returns False, then this
func,on should also return False.
• Else, get the character and swap the lecers of characters. (Upper case to lower case
and vice versa)
• For example: if the func,on gives you “Dora”, then output should be “dORA”.
• Return the swapped output as result.
• Mul?ply the swap: - Func?on 3
• This func,on should take 2 arguments. First one is cartoon_character and second one as
mul,plier. If the user is not specifying mul,plier value it should take 3. Else if user
specified any value, take that value into account.
• Mul,ply the cartoon_character (first argument) with the mul,plier value given.
• Example: If cartoon_character is “Dora”, mul,plier is 5, then DoraDoraDoraDoraDora
should be the output.
• Main func?on: - Func?on 4
• Create a func,on with name main()
• Call func,on 2, if it is not returning False, pass the output of func,on 2 as a first
parameter to func,on 3 and get the output from F3 and print it.
• If call to func,on 2, gives False, print the message “Oops! No cartoon selected”.
"""
import random
#selects a random Character name
def catroon_character(*N):
if(N==None):
return False
else:
return random.choice(N)
#gets the character_name and reverse it and also change it 2 upper -> lower ; lower -> upper
def swap_character():
result = catroon_character("Shin Chan","Dora","PokeMon","Naruto Uzumaki")
if(result == False):
return False
else:
swap_ch = result[::-1].swapcase()
return result,swap_ch
#multiply the character_name by given times
def multiply_the_swap(swaped_character,multiplier=3):
return swaped_character*multiplier
#swaped_char will be a list {character_name,its swaped_name}
def main():
swaped_character = swap_character()
if(swap_character()!=False):
print(multiply_the_swap(swaped_character[0],5))
print("Its Swaped value: ",swaped_character[1])
else:
print ("the message “Oops! No cartoon selected")
N = main() |
985,894 | 0a99291e6e994b39f605ca4a34fb686638eea9ae | import sys
from konlpy.tag import Komoran
komoran = Komoran()
sentences = sys.argv[1]
#sentences = '사과, 감자, 이것은 테스트입니다'
#s = sentences.encode('utf8')
morphs = komoran.pos(sentences) #pos: Every mophs
#morphs = komoran.nouns(sentences) # nouns: N
morphs.sort()
i=0
k=0
nat = [[0]*2 for _ in range(len(morphs))]
while i<len(morphs)-1:
nat[k][1]=morphs[i] #nat[k][1]=morphs
nat[k][0]+=1 #nat[k][0]=cnt
if morphs[i]!=morphs[i+1]:
k+=1
i+=1
nat.sort(reverse=True)
i=0
while i<len(morphs): #del[0,0]
if nat[i][0]==0:
del nat[i:len(nat)]
break
i+=1
diet = str(nat)
i=0
while i<len(diet):
if diet[i]=='['or']'or'('or')':
diet.strip(diet[i])
#i-=1
i+=1
s=str(nat).encode('ascii', 'xmlcharrefreplace')
print(s)
#while i < len(nat):
# print(nat[i], nat[i+1], nat[i+2], nat[i+3])
# i+=4
|
985,895 | 00f92d04ad58a2fa45ff40b00e2a71ead2a26048 | from pyalgotrade import dataseries
from pyalgotrade import technical
# An EventWindow is responsible for making calculations using a window of values.
class Accumulator(technical.EventWindow):
def getValue(self):
ret = None
if self.windowFull():
ret = self.getValues().sum()
return ret
# Build a sequence based DataSeries.
seqDS = dataseries.SequenceDataSeries()
# Wrap it with a filter that will get fed as new values get added to the underlying DataSeries.
accum = technical.EventBasedFilter(seqDS, Accumulator(3))
# Put in some values.
for i in range(0, 50):
seqDS.append(i)
# Get some values.
print(accum[0]) # Not enough values yet.
print(accum[1]) # Not enough values yet.
print(accum[2]) # Ok, now we should have at least 3 values.
print(accum[3])
# Get the last value, which should be equal to 49 + 48 + 47.
print(accum[-1])
|
985,896 | 701838cf9766eeb7bae2ec249037e3da65d8c587 | from distance_functions.handle_numbers import find_numbers
import roman
import re
def get_physical_information(physical_description: str):
pages, volume_nr, is_multiple_volume = [], 1, 0
description_splitted = physical_description.split('. -')
if len(description_splitted) > 1:
multi_volume_indication = [string for string in description_splitted if ':' in string]
volume_nr = len(multi_volume_indication)
if volume_nr:
is_multiple_volume = 1
if not volume_nr:
description_splitted = [description_splitted[0]]
if len(description_splitted) == 1:
description = description_splitted[0].split(':')[0].split(';')[0]
if re.findall(r'(?:^|\W)[vV][.oO]', description) or re.findall(r'(?:^|\W)[bB][.dD]', description):
is_multiple_volume = 1
volume_nr = max([int(number) for number in re.findall('\d+', description)[:2] if int(number) < 60]) \
if [int(number) for number in re.findall('\d+', description)[:2] if int(number) < 60] else 0
else:
pages = [[roman.fromRoman(number) for number in number_list[:1]] for number_list in find_numbers(description)][1]
return pages[0] if pages else 0, is_multiple_volume, volume_nr
|
985,897 | 866981f75ceb97525cf33b198cb66a7e3e99b454 | from django.shortcuts import render
from django.http import HttpResponse,HttpResponseRedirect
from .models import Client,Manager
from information.models import *
from django.contrib import auth
# Create your views here.
# def re(response):
# return response
def index(request):
if request.method == "GET":
username = '用户名'
passwords = '密码'
values = '/'
return render(request,'login/login.html',{'username':username,'passwords':passwords,'values':values})
else:
username = request.POST.get('username', '')
passwords = request.POST.get('passwords', '')
isc = False
user = ''
for c in Client.objects.all():
if c.cname == username:
user = c
isc = True
if user == '':
for m in Manager.objects.all():
if m.mname == username:
user = m
if user =='':
passwords = '密码'
values = '/'
return render(request,'login/login.html', {'username':username,'passwords':passwords,'values':values})
elif isc:
if user.cpasswords == passwords:
response = HttpResponseRedirect('/mainpage')
response.set_cookie('username',username)
response.set_cookie('state', 'client')
# response.set_cookie('isentertainment', 1)
return response
# re(response)
# return render(request,'managerpage/managerpage.html',{'isentertainment':1})
else:
passwords = '密码错误'
values = username
username = '用户名'
return render(request,'login/login.html',
{'username':username,'passwords':passwords,'values':values})
else:
if user.mpasswords == passwords:
manager = Manager.objects.get(mname=username)
house = ManagerHouse.objects.get(manager=manager.pk).house
category = house.hcategory
isentertainment = 1 if category == '娱乐场所' else 0
response = HttpResponseRedirect('/managerpage')
response.set_cookie('username', username)
response.set_cookie('state', 'manager')
response.set_cookie("isentertainment",isentertainment)
return response
else:
passwords = '密码错误'
values = username
username = '用户名'
return render(request, 'login/login.html',
{'username': username, 'passwords': passwords, 'values': values})
def register(request):
if request.method == "GET":
username = '用户名'
passwords1 = '密码'
passwords2 = '密码'
gender = '性别'
age = '年龄'
email = '邮箱'
phonenum = '号码'
state = '身份'
return render(request, 'login/register.html',
{'username': username, 'passwords1': passwords1, 'passwords2': passwords2, 'gender': gender,
'age': age, 'email': email, 'phonenum': phonenum, 'state': state})
else:
username = request.POST.get('username', '')
passwords1 = request.POST.get('passwords1','')
passwords2 = request.POST.get('passwords2','')
gender = request.POST.get('gender','')
age = request.POST.get('age','')
email = request.POST.get('email','')
phonenum = request.POST.get('phonenum','')
# state = request.POST.get('state','')
if username == '' or passwords1 == '' or passwords2 == '' or age == '' or email == '' or phonenum == '' or passwords2 != passwords1:
return render(request,'login/register.html',{'username':username,'passwords1':passwords1,'passwords2':passwords2,'gender':gender,
'age':age,'email':email,'phonenum':phonenum})
try:
c = Client(cname=username,cpasswords=passwords1,cgender=gender,cage=age,cemail=email,cphonenum=phonenum)
c.save()
except:
return render(request, 'login/register.html',
{'username': '用户名已被占用', 'passwords1': passwords1, 'passwords2': passwords2,
'gender': gender,
'age': age, 'email': email, 'phonenum': phonenum})
# else:
# try:
# m = Manager(mname=username, mpasswords=passwords1, mgender=gender, mage=age, memail=email, mphonenum=phonenum)
# m.save()
# except:
# return render(request, 'login/register.html',
# {'username': '用户名已被占用', 'passwords1': passwords1, 'passwords2': passwords2,
# 'gender': gender,
# 'age': age, 'email': email, 'phonenum': phonenum, 'state': state})
return render(request, 'login/register_success.html')
|
985,898 | 086b1f51f20921d0d2461f74a07983294e66fcde | #Embedded file name: ui/cocoa/selective_sync.py
from __future__ import absolute_import
import os
import time
from objc import YES, NO, ivar
from Foundation import NSFileTypeForHFSTypeCode, NSMakeRect, NSMakeSize, NSMaxX, NSMaxY, NSObject, NSPoint, NSRect, NSSize, NSZeroRect
from AppKit import NSApp, NSBackingStoreBuffered, NSBezierPath, NSBrowser, NSBrowserCell, NSButtonCell, NSBrowserNoColumnResizing, NSBrowserUserColumnResizing, NSClosableWindowMask, NSCompositeSourceOver, NSFocusRingTypeNone, NSFont, NSImage, NSMatrix, NSNotificationCenter, NSTextField, NSTitledWindowMask, NSResizableWindowMask, NSSmallControlSize, NSSwitchButton, NSView, NSViewHeightSizable, NSViewWidthSizable, NSWindow, NSWindowDidBecomeKeyNotification, NSWindowDidResignKeyNotification, NSWorkspace
from PyObjCTools import AppHelper
from .constants import ENTER_KEY, ESCAPE_KEY
from .dropbox_controls import DropboxSheetErrorFactory
from .dropbox_menu import DropboxNSMenu
from .dynamic_layouts import align_center_to_offset, height_for_fixed_width, BOTTOM_BUTTON_BORDER, HORIZ_BUTTON_BORDER, HORIZ_BUTTON_SPACING, NEARBY_CONTROL_BORDER, STATICTEXT_TO_BUTTON_BASELINE_ADJUSTMENT
from ..common.selective_sync import CachingLazySelectiveSyncUI, selsync_strings, failed_unignores_message_from_failures
from dropbox.gui import message_sender, event_handler, spawn_thread_with_name
from dropbox.trace import unhandled_exc_handler, TRACE
import build_number
import objc
REMOTE_EVENT_CHILLOUT = 1
BROWSER_ROW_HEIGHT = 18
DEFAULT_ADVANCED_WIDTH = 630
DEFAULT_SIMPLE_WIDTH = 450
DEFAULT_BROWSER_HEIGHT = 15 * BROWSER_ROW_HEIGHT + 2
CHECK_CELL_LEFT_PADDING = 3
class SelectiveSyncBrowserCell(NSBrowserCell):
@objc.typedSelector('i@:{NSPoint=ff}')
@event_handler
def isViewRelativePointInCheckCell_(self, view_point):
return view_point.x < self._checkCell.cellSize().width + CHECK_CELL_LEFT_PADDING
def setPath_callbackTarget_(self, path, callbackTarget):
self._path = path
self._callbackTarget = callbackTarget
self._checkCell = NSButtonCell.alloc().init()
self._checkCell.setButtonType_(NSSwitchButton)
self._checkCell.setControlSize_(NSSmallControlSize)
self._checkCell.setTitle_('')
self._checkCell.setAllowsMixedState_(YES)
def path(self):
if hasattr(self, '_path'):
return self._path
@event_handler
def cellSizeForBounds_(self, bounds):
ret = super(SelectiveSyncBrowserCell, self).cellSizeForBounds_(bounds)
if hasattr(self, '_path'):
checksize = self._checkCell.cellSize()
ret.width += checksize.width
ret.height = max(ret.height, checksize.height)
return ret
@objc.typedSelector('v@:')
@event_handler
def invalidateFolderTag(self):
if hasattr(self, '_image'):
del self._image
@event_handler
def drawWithFrame_inView_(self, frame, view):
if hasattr(self, '_path'):
if not hasattr(self, '_image'):
self._image = self._callbackTarget.imageForPath_(self._path)
self.setTitle_(self._callbackTarget.titleForPath_(self._path))
self._checkCell.setIntValue_(self._callbackTarget.stateForPath_(self._path))
x = frame.origin.x
y = frame.origin.y + 1
height = frame.size.height - 1
if self.state() or self.isHighlighted():
highlight_rect = NSMakeRect(x, y, frame.size.width, height)
if view.needsToDrawRect_(highlight_rect):
self.highlightColorInView_(view).set()
NSBezierPath.bezierPathWithRect_(highlight_rect).fill()
x += CHECK_CELL_LEFT_PADDING
checkCellSize = self._checkCell.cellSize()
check_cell_rect = NSMakeRect(x, y, checkCellSize.width, height)
if view.needsToDrawRect_(check_cell_rect):
self._checkCell.drawWithFrame_inView_(check_cell_rect, view)
x += checkCellSize.width - 1
imageSize = self._image.size()
image_rect = NSMakeRect(x, y, imageSize.width, imageSize.height)
if view.needsToDrawRect_(image_rect):
self._image.drawInRect_fromRect_operation_fraction_(image_rect, NSZeroRect, NSCompositeSourceOver, 1.0)
x += imageSize.width + 4
rest_of_cell_rect = NSMakeRect(x, y, frame.size.width - x, height)
if view.needsToDrawRect_(rest_of_cell_rect):
super(SelectiveSyncBrowserCell, self).drawWithFrame_inView_(rest_of_cell_rect, view)
else:
super(SelectiveSyncBrowserCell, self).drawWithFrame_inView_(frame, view)
@objc.typedSelector(NSButtonCell.intValue.signature)
@event_handler
def valueForToggle(self):
if self._checkCell.intValue() == 0:
return 1
return 0
@objc.typedSelector(NSButtonCell.intValue.signature)
@event_handler
def intValue(self):
return self._checkCell.intValue()
@objc.typedSelector(NSButtonCell.setIntValue_.signature)
@event_handler
def setIntValue_(self, intValue):
if intValue != self._checkCell.intValue():
self._checkCell.setIntValue_(intValue)
self._callbackTarget.checkState_fromPath_(intValue, self._path)
def setIntValueFromForest_(self, intValue):
if intValue != self._checkCell.intValue():
self._checkCell.setIntValue_(intValue)
return YES
else:
return NO
@objc.typedSelector(NSButtonCell.setHighlighted_.signature)
@event_handler
def setCheckHighlighted_(self, boolean):
self._checkCell.setHighlighted_(boolean)
@objc.typedSelector(NSButtonCell.isHighlighted.signature)
@event_handler
def isCheckHighlighted(self):
return self._checkCell.isHighlighted()
class SelectiveSyncBrowserMatrix(NSMatrix):
@event_handler
def mouseDown_(self, event):
the_cell = self.selectiveSyncCellForMouseEvent_(event)
if the_cell:
self.down_in_cell = the_cell
the_cell.setCheckHighlighted_(YES)
self.setNeedsDisplay_(YES)
else:
super(SelectiveSyncBrowserMatrix, self).mouseDown_(event)
self.down_in_cell = None
@event_handler
def mouseDragged_(self, event):
the_cell = self.selectiveSyncCellForMouseEvent_(event)
if self.down_in_cell is not None:
if self.down_in_cell != the_cell:
if self.down_in_cell.isCheckHighlighted():
self.down_in_cell.setCheckHighlighted_(NO)
self.setNeedsDisplay_(YES)
elif not self.down_in_cell.isCheckHighlighted():
self.down_in_cell.setCheckHighlighted_(YES)
self.setNeedsDisplay_(YES)
@event_handler
def mouseUp_(self, event):
if self.down_in_cell is None:
super(SelectiveSyncBrowserMatrix, self).mouseUp_(event)
else:
the_cell = self.selectiveSyncCellForMouseEvent_(event)
if self.down_in_cell == the_cell:
the_cell.setIntValue_(the_cell.valueForToggle())
the_cell.setCheckHighlighted_(NO)
self.setNeedsDisplay_(YES)
self.down_in_cell = None
@event_handler
def keyDown_(self, event):
self.spacebar_cell = None
if event.keyCode() == 49:
cell = self.keyCell()
if cell:
cell.setCheckHighlighted_(YES)
self.setNeedsDisplay_(YES)
self.spacebar_cell = cell
return
super(SelectiveSyncBrowserMatrix, self).keyDown_(event)
@event_handler
def keyUp_(self, event):
if event.keyCode() == 49 and self.spacebar_cell is not None:
self.spacebar_cell.setIntValue_(self.spacebar_cell.valueForToggle())
self.spacebar_cell.setCheckHighlighted_(NO)
self.setNeedsDisplay_(YES)
self.spacebar_cell = None
return
super(SelectiveSyncBrowserMatrix, self).keyUp_(event)
@objc.typedSelector('@@:@')
@event_handler
def selectiveSyncCellForMouseEvent_(self, event):
window_point = event.locationInWindow()
view_point = self.convertPoint_fromView_(window_point, self.window().contentView())
successful, row, column = self.getRow_column_forPoint_(None, None, view_point)
if successful:
the_cell = self.cellAtRow_column_(row, column)
if the_cell.isViewRelativePointInCheckCell_(view_point):
return the_cell
@event_handler
def cellSize(self):
ret = super(SelectiveSyncBrowserMatrix, self).cellSize()
return NSSize(ret.width, max(ret.height, BROWSER_ROW_HEIGHT))
class SelectiveSyncBrowserDelegate(NSObject):
syncEngine = ivar('syncEngine')
def __new__(cls, browser, forest, reloadInvalidState):
return SelectiveSyncBrowserDelegate.alloc().initWithBrowser_andForest_reloadInvalidState_(browser, forest, reloadInvalidState)
def initWithBrowser_andForest_reloadInvalidState_(self, browser, forest, reloadInvalidState):
self = super(SelectiveSyncBrowserDelegate, self).init()
if self is None:
return
from dropbox.mac.internal import get_resources_dir
self.default_width = None
icons_path = get_resources_dir() if hasattr(build_number, 'frozen') else u'icons/'
self.images = {}
for key, icon in (('dropbox', 'DropboxFolderIcon_leopard.icns'),
('shared', 'shared_leopard.icns'),
('public', 'public_leopard.icns'),
('photos', 'photos_leopard.icns'),
('sandbox', 'sandbox_leopard.icns'),
('camerauploads', 'camerauploads_leopard.icns')):
image = NSImage.alloc().initByReferencingFile_(os.path.join(icons_path, icon))
image.setSize_((16, 16))
image.setFlipped_(YES)
image.recache()
self.images[key] = image
images_path = get_resources_dir() if hasattr(build_number, 'frozen') else u'images/mac'
folder_image = NSWorkspace.sharedWorkspace().iconForFileType_(NSFileTypeForHFSTypeCode('fldr'))
folder_image.setFlipped_(YES)
folder_image.setSize_(NSMakeSize(16, 16))
self.images['folder'] = folder_image
self.forest = forest
self.browser_reloadAdvancedView_(browser, self.forest.advanced_view)
self.reloadInvalidState = reloadInvalidState
TRACE('initialized %r', self.forest)
self.browser = browser
return self
def checkState_fromPath_(self, checkState, path):
self.forest.set_check_state_from_ui(path, checkState)
try:
for x in range(self.browser.firstVisibleColumn(), self.browser.lastVisibleColumn() + 1):
matrix = self.browser.matrixInColumn_(x)
needs_update = NO
for y in range(matrix.numberOfRows()):
cell = matrix.cellAtRow_column_(y, 0)
if cell.setIntValueFromForest_(self.stateForPath_(cell.path())):
needs_update = YES
if needs_update:
matrix.setNeedsDisplay_(YES)
except:
unhandled_exc_handler()
self.reloadInvalidState()
def imageForPath_(self, path):
image_tag = self.forest.image_tag_for_path(path)
return self.images[image_tag]
def titleForPath_(self, path):
return self.forest.title_for_path(path)
def stateForPath_(self, path):
return self.forest.check_state_for_path(path)
@objc.typedSelector('v@:@B')
@event_handler
def browser_reloadAdvancedView_(self, browser, advancedView):
self.last_ui_invalidation = None
self.pending_paths_to_invalidate = {}
if advancedView:
browser.setColumnResizingType_(NSBrowserUserColumnResizing)
browser.setHasHorizontalScroller_(YES)
else:
browser.setColumnResizingType_(NSBrowserNoColumnResizing)
browser.setHasHorizontalScroller_(NO)
if browser.respondsToSelector_('setAutohidesScroller:'):
browser.setAutohidesScroller_(YES)
def intelligentlyRefreshMatrix_withDirChildren_restoreSelection_(self, matrix, dirChildren, restoreSelection):
if restoreSelection:
selected = matrix.selectedCell()
if selected:
selected = selected.path().lower()
else:
selected = False
matrix.renewRows_columns_(len(dirChildren), 1)
to_select = -1
for i, dir_child in enumerate(dirChildren):
path, num_children = dir_child
cell = SelectiveSyncBrowserCell.alloc().init()
cell.setPath_callbackTarget_(path, self)
cell.setLeaf_(not num_children)
try:
ctxmenu = self.forest.context_menu_for_path(path)
except:
unhandled_exc_handler()
else:
cell.setMenu_(DropboxNSMenu.menuWithDropboxMenuDescriptor_(ctxmenu))
matrix.putCell_atRow_column_(cell, i, 0)
if selected and path.lower() == selected:
to_select = i
if to_select > -1:
matrix.selectCellAtRow_column_(to_select, 0)
return to_select
if selected:
return -1
return -2
@objc.typedSelector('v@:@')
@event_handler
def invalidateUICallback_(self, paths_to_invalidate):
TRACE('++ remote events told me to invalidate the following folders: %r' % (paths_to_invalidate,))
self.pending_paths_to_invalidate.update(paths_to_invalidate)
incoming = time.time()
if self.last_ui_invalidation is not None and incoming - self.last_ui_invalidation < REMOTE_EVENT_CHILLOUT:
TRACE('chilling out for a bit')
AppHelper.callLater(REMOTE_EVENT_CHILLOUT - (incoming - self.last_ui_invalidation), self.invalidateUICallback_, {})
return
self.last_ui_invalidation = incoming
last_selected = None
for x in range(0, self.browser.lastColumn() + 1):
dir_children_for_refresh = None
if x == 0:
for path in self.pending_paths_to_invalidate:
if path.ns_rel()[1] == '/':
dir_children_for_refresh = self.pending_paths_to_invalidate[path]
break
else:
selected = self.browser.matrixInColumn_(x - 1).selectedCell()
dir_children_for_refresh = self.pending_paths_to_invalidate.get(selected.path().lower()) if selected else None
matrix = self.browser.matrixInColumn_(x)
if matrix:
if dir_children_for_refresh:
TRACE('+++++ refreshing: %r, %r' % (x, dir_children_for_refresh))
ret = self.intelligentlyRefreshMatrix_withDirChildren_restoreSelection_(matrix, dir_children_for_refresh, True)
matrix.sizeToCells()
matrix.setNeedsDisplay_(YES)
if ret > -1:
last_selected = (ret, x)
elif ret == -1:
if last_selected is not None:
self.browser.selectRow_inColumn_(last_selected[0], last_selected[1])
else:
self.browser.setPath_(self.browser.pathSeparator())
break
needs_display = False
for y in range(matrix.numberOfRows()):
cell = matrix.cellAtRow_column_(y, 0)
path = cell.path().lower()
if path in self.pending_paths_to_invalidate:
new_state = NO if self.pending_paths_to_invalidate[path] else YES
if cell.isLeaf() != new_state:
TRACE('<> refreshing leaf state for %r' % (path,))
cell.setLeaf_(new_state)
cell.invalidateFolderTag()
needs_display = True
if cell == matrix.selectedCell():
self.browser.selectRow_inColumn_(y, x)
break
if needs_display:
matrix.setNeedsDisplay_(YES)
self.pending_paths_to_invalidate = {}
@objc.typedSelector('v@:')
@event_handler
def removeInvalidationCallbacks(self):
self.forest.clear_invalidate_ui_callback()
self.forest.remove_remote_file_event_callback()
def browser_isColumnValid_(self, browser, column):
return YES
def browser_createRowsForColumn_inMatrix_(self, browser, column, matrix):
try:
if column > 0:
cell = browser.selectedCellInColumn_(column - 1)
dir_children = self.forest.dir_children_for_path(cell.path())
else:
dir_children = self.forest.dir_children_for_path(self.forest.get_root_paths()[0])
self.intelligentlyRefreshMatrix_withDirChildren_restoreSelection_(matrix, dir_children, False)
except:
unhandled_exc_handler()
def browser_titleOfColumn_(self, browser, column):
return ''
def browser_shouldSizeColumn_forUserResize_toWidth_(self, browser, column, userResize, suggestedWidth):
try:
if self.forest.advanced_view:
if self.default_width is None:
self.default_width = browser.frame().size.width / 3 - 1
return self.default_width
return browser.frame().size.width - 1
except:
unhandled_exc_handler()
class SelectiveSyncBrowser(NSBrowser):
forest = ivar('forest')
delegate = ivar('delegate')
def __new__(cls):
return SelectiveSyncBrowser.alloc().init()
@event_handler
def init(self):
self = super(SelectiveSyncBrowser, self).initWithFrame_(NSZeroRect)
if self is None:
return
self.setMatrixClass_(SelectiveSyncBrowserMatrix)
self.setCellClass_(SelectiveSyncBrowserCell)
return self
def allowsTypeSelect(self):
return YES
def dealloc(self):
try:
NSNotificationCenter.defaultCenter().removeObserver_(self)
except Exception:
unhandled_exc_handler()
super(SelectiveSyncBrowser, self).dealloc()
@event_handler
def viewWillMoveToWindow_(self, window):
if not window:
return
notification_center = NSNotificationCenter.defaultCenter()
notification_center.addObserver_selector_name_object_(self, 'windowChangedKeyNotification:', NSWindowDidBecomeKeyNotification, window)
notification_center.addObserver_selector_name_object_(self, 'windowChangedKeyNotification:', NSWindowDidResignKeyNotification, window)
@objc.typedSelector('v@:@')
@event_handler
def windowChangedKeyNotification_(self, notification):
self.setNeedsDisplay_(YES)
class SelectiveSyncSheet(NSWindow):
def __new__(cls, dropbox_app, initial_ignore_list = None, take_action = False, callback = None, remote = False):
return SelectiveSyncSheet.alloc().initWithDropboxApp_initialIgnoreList_takeAction_callback_remote_(dropbox_app, initial_ignore_list, take_action, callback, remote)
def initWithDropboxApp_initialIgnoreList_takeAction_callback_remote_(self, dropbox_app, initial_ignore_list, take_action, callback, remote):
self = super(SelectiveSyncSheet, self).initWithContentRect_styleMask_backing_defer_(NSZeroRect, NSTitledWindowMask | NSClosableWindowMask | NSResizableWindowMask, NSBackingStoreBuffered, YES)
if self is None:
return
try:
self.innerView = SelectiveSyncView(dropbox_app, initial_ignore_list, take_action, callback, remote)
self.contentView().addSubview_(self.innerView)
self.setContentMinSize_(self.innerView.frame().size)
self.setContentSize_(self.innerView.frame().size)
self.setReleasedWhenClosed_(NO)
except:
unhandled_exc_handler()
return self
def beginSheetForWindow_(self, window):
try:
self.innerView.beginSheetForWindow_(window)
except:
unhandled_exc_handler()
class SelectiveSyncView(NSView):
syncEngine = ivar('syncEngine')
forest = ivar('forest')
browser = ivar('browser')
infoLabel = ivar('infoLabel')
VERT_TEXT_SPACING = 14
def __new__(cls, dropbox_app, initial_ignore_list = None, take_action = False, callback = None, remote = False):
return SelectiveSyncView.alloc().initWithDropboxApp_initialIgnoreList_takeAction_callback_remote_(dropbox_app, initial_ignore_list, take_action, callback, remote)
def initWithDropboxApp_initialIgnoreList_takeAction_callback_remote_(self, dropbox_app, initial_ignore_list, take_action, callback, remote):
self = super(SelectiveSyncView, self).initWithFrame_(NSZeroRect)
if self is None:
return
self._initial_ignore_list = initial_ignore_list
self._callback = callback
self._take_action = take_action
self._remote = remote
self.setAutoresizingMask_(NSViewWidthSizable | NSViewHeightSizable)
self._dropbox_app = dropbox_app
self.initBrowser(self._remote)
self.initButtons()
f = NSFont.systemFontOfSize_(NSFont.smallSystemFontSize())
self.infoLabel = NSTextField.createLabelWithText_font_(selsync_strings.info, f)
self.addSubview_(self.infoLabel)
self.reloadInvalidState()
self.layoutForWidth_(DEFAULT_ADVANCED_WIDTH if self.forest.advanced_view else DEFAULT_SIMPLE_WIDTH)
return self
def initButtons(self):
if not self.forest.advanced_view:
self._switchButton = self.addNormalRoundButtonWithTitle_action_(selsync_strings.switch_to_advanced_view, self.switchToAdvancedView_)
else:
self._switchButton = None
self._cancelButton = self.addNormalRoundButtonWithTitle_action_(selsync_strings.cancel_button, self.windowCancel_)
self._cancelButton.setKeyEquivalent_(ESCAPE_KEY)
self._okButton = self.addNormalRoundButtonWithTitle_action_(selsync_strings.window_ok_button, self.windowOk_)
def initBrowser(self, remote):
se = self._dropbox_app.sync_engine if not remote else self._dropbox_app.mbox.sync_engine
self.forest = CachingLazySelectiveSyncUI(self._dropbox_app, se, self._dropbox_app.dropbox_url_info, use_tri_state_checks=True, initial_directory_ignore_set=self._initial_ignore_list)
if self._dropbox_app.pref_controller['selsync_advanced_view_hint']:
self.forest.advanced_view = True
self.browser = SelectiveSyncBrowser()
self.browser.setTitled_(NO)
self.browser.setFocusRingType_(NSFocusRingTypeNone)
self.browserDelegate = SelectiveSyncBrowserDelegate(self.browser, self.forest, self.reloadInvalidState)
self.forest.set_invalidate_ui_callback(lambda events: AppHelper.callAfter(self.browserDelegate.invalidateUICallback_, events))
self.browser.setDelegate_(self.browserDelegate)
self.addSubview_(self.browser)
def reloadInvalidState(self):
if self.forest.invalid():
self._okButton.setEnabled_(YES)
self._okButton.setKeyEquivalent_(ENTER_KEY)
else:
self._okButton.setEnabled_(NO)
def layoutForWidth_(self, width):
height_needed = BOTTOM_BUTTON_BORDER
button_row_height = 0
button_row_width = 0
if self._switchButton is not None:
button_row_height = max(button_row_height, NSMaxY(self._switchButton.frame()))
button_row_width += NSMaxX(self._switchButton.frame()) + HORIZ_BUTTON_BORDER
self._switchButton.setFrameOrigin_(NSPoint(HORIZ_BUTTON_BORDER, height_needed))
self._okButton.setFrameOrigin_(NSPoint(0, height_needed))
button_row_height = max(button_row_height, NSMaxY(self._okButton.frame()))
button_row_width += NSMaxX(self._okButton.frame()) + HORIZ_BUTTON_BORDER
self._cancelButton.setFrameOrigin_(NSPoint(0, height_needed))
button_row_height = max(button_row_height, NSMaxY(self._cancelButton.frame()))
button_row_width += NSMaxX(self._cancelButton.frame()) + HORIZ_BUTTON_BORDER
height_needed += button_row_height + NEARBY_CONTROL_BORDER
width = max(width, button_row_width)
self.browser.setFrame_(NSRect((0, height_needed), (width, DEFAULT_BROWSER_HEIGHT)))
self.browser.setWidth_ofColumn_(width / (3 if self.forest.advanced_view else 1) - 1, -1)
height_needed += self.browser.frame().size.height
top_of_browser = height_needed
height_needed += self.VERT_TEXT_SPACING
height_needed += height_for_fixed_width(self.infoLabel, width)
height_needed += self.VERT_TEXT_SPACING
self.top_padding = height_needed - top_of_browser
self.setFrameSize_(NSSize(width, height_needed))
@event_handler
def setFrameSize_(self, newSize):
super(SelectiveSyncView, self).setFrameSize_(newSize)
self._okButton.alignRightToOffset_(newSize.width - HORIZ_BUTTON_BORDER)
self._cancelButton.alignRightToOffset_(newSize.width - HORIZ_BUTTON_BORDER - self._okButton.frame().size.width - HORIZ_BUTTON_SPACING)
self.browser.setFrameSize_(NSSize(newSize.width, newSize.height - self.browser.frame().origin.y - self.top_padding))
self.infoLabel.setFrameOrigin_(NSPoint(0, newSize.height - self.VERT_TEXT_SPACING - self.infoLabel.frame().size.height))
align_center_to_offset(self.infoLabel, newSize.width / 2)
@event_handler
def resizeWithOldSuperviewSize_(self, oldBoundsSize):
new_size = self.superview().frame().size
self.setFrameSize_(new_size)
if not self.forest.advanced_view:
self.browser.setWidth_ofColumn_(new_size.width - 1, 0)
@objc.typedSelector('v@:@i@')
@event_handler
def selsyncSheetDidEnd_returnCode_contextInfo_(self, sheet, returnCode, contextInfo):
sheet.orderOut_(self)
@objc.typedSelector('v@:@')
@event_handler
def beginSheetForWindow_(self, window):
self.relevantWindow = window
NSApp().beginSheet_modalForWindow_modalDelegate_didEndSelector_contextInfo_(self.window(), self.relevantWindow, self, self.selsyncSheetDidEnd_returnCode_contextInfo_.selector, 0)
@objc.typedSelector('v@:@')
@event_handler
def switchToAdvancedView_(self, sender):
self._switchButton.removeFromSuperview()
self._switchButton = None
current_frame = self.window().frame()
if current_frame.size.width < DEFAULT_ADVANCED_WIDTH:
diff = DEFAULT_ADVANCED_WIDTH - current_frame.size.width
current_frame.size.width = DEFAULT_ADVANCED_WIDTH
current_frame.origin.x -= diff / 2
self.window().setFrame_display_animate_(current_frame, YES, YES)
self.browser.setWidth_ofColumn_(DEFAULT_ADVANCED_WIDTH / 3 - 1, 0)
self.browser.setWidth_ofColumn_(DEFAULT_ADVANCED_WIDTH / 3 - 1, -1)
self.forest.set_advanced_view(True)
self.browserDelegate.browser_reloadAdvancedView_(self.browser, True)
self.browser.loadColumnZero()
@objc.typedSelector('v@:@')
@event_handler
def windowOk_(self, sender):
assert self.forest.invalid(), "button shouldn't have been clickable"
NSApp().endSheet_(self.window())
if self.forest.advanced_view:
self._dropbox_app.pref_controller.update({'selsync_advanced_view_hint': True})
if self._take_action:
DropboxSheetErrorFactory.sharedInstance().alertForWindow_withCaption_message_onSuccess_onCancel_successLabel_cancelLabel_(self.relevantWindow, selsync_strings.confirmation_caption, self.forest.get_confirmation_message(), self.confirmationOkay, self.confirmationCancel, selsync_strings.confirmation_ok_button, selsync_strings.cancel_button)
else:
self.endShowSheet()
if self._callback:
self._callback(self)
@objc.typedSelector('v@:@')
@event_handler
def windowCancel_(self, sender):
NSApp().endSheet_(self.window())
self.endShowSheet()
@objc.typedSelector('v@:')
@event_handler
def confirmationCancel(self):
self.beginSheetForWindow_(self.relevantWindow)
@objc.typedSelector('v@:')
@event_handler
def confirmationOkay(self):
self._take_action(True)
message_sender(spawn_thread_with_name('PREP_SELSYNC'), on_success=message_sender(AppHelper.callAfter)(self.prepSelsyncFinishedWithFailures_), on_exception=message_sender(AppHelper.callAfter)(self.prepSelsyncFinishedWithException_andExcInfo_), block=False, handle_exceptions=True, dont_post=lambda : False)(self.browserDelegate.forest.write_changes_to_sync_engine)(self._dropbox_app.sync_engine if not self._remote else self._dropbox_app.mbox.sync_engine)
self.endShowSheet()
@objc.typedSelector('v@:')
@event_handler
def endShowSheet(self):
self.browserDelegate.removeInvalidationCallbacks()
@objc.typedSelector('v@:@')
@event_handler
def prepSelsyncFinishedWithFailures_(self, failures):
self._take_action(False)
if failures:
DropboxSheetErrorFactory.sharedInstance().alertForWindow_withCaption_message_onSuccess_onCancel_(None, selsync_strings.unignore_error_caption, failed_unignores_message_from_failures(failures, self._dropbox_app.default_dropbox_path), lambda : None, None)
@objc.typedSelector('v@:@@')
@event_handler
def prepSelsyncFinishedWithException_andExcInfo_(self, exc, exc_info):
self._take_action(False)
DropboxSheetErrorFactory.sharedInstance().alertForWindow_withCaption_message_onSuccess_onCancel_successLabel_cancelLabel_(None, selsync_strings.really_bad_error_caption, selsync_strings.really_bad_error_message, lambda : None, None, selsync_strings.really_bad_error_button, None)
def ignoreList(self):
return self.forest.current_directory_ignore_set
class SelectiveSyncLauncher(NSView):
def __new__(cls, dropbox_app, take_action = True, hide_text = False, width = 0):
return cls.alloc().initWithDropboxApp_takeAction_hideText_width_(dropbox_app, take_action, hide_text, width)
@objc.typedSelector('v@:@@@@')
@event_handler
def initWithDropboxApp_takeAction_hideText_width_(self, dropbox_app, take_action, hide_text, width):
self = super(SelectiveSyncLauncher, self).initWithFrame_(NSZeroRect)
if self is None:
return
self._dropbox_app = dropbox_app
self._action = None
self._take_action = take_action
self.current_ignore_list = None
self._width = width
self.listingProgress = None
if not hide_text:
self.info_label = NSTextField.createLabelWithText_(selsync_strings.prefs_launch_label)
self.addSubview_(self.info_label)
else:
self.info_label = None
self.launch_button = self.addNormalRoundButtonWithTitle_action_(selsync_strings.prefs_launch_button, self.launch_)
self.setEnabled_(self._dropbox_app.ui_kit.post_link)
self.sizeToFit()
return self
@objc.typedSelector('v@:@')
@event_handler
def launch_(self, sender):
self.showSelectiveSyncSheet()
@objc.typedSelector('v@:c')
@event_handler
def setEnabled_(self, enabled):
self.launch_button.setEnabled_(enabled)
@objc.typedSelector('v@:c')
@event_handler
def setBusy_(self, busy):
self.launch_button.setTitle_(selsync_strings.prefs_working_button if busy else selsync_strings.prefs_launch_button)
self.launch_button.setEnabled_(not busy)
@objc.typedSelector('v@:')
@event_handler
def endShowSheet(self):
pass
@objc.typedSelector('v@:')
@event_handler
def beginShowSheet(self):
pass
@event_handler
@objc.typedSelector('v@:')
def showSelectiveSyncSheet(self):
TRACE('showing user selective sync')
self.beginShowSheet()
ignore_list = self.current_ignore_list if not self._take_action else None
self.selectiveSyncSheet = SelectiveSyncSheet(self._dropbox_app, initial_ignore_list=ignore_list, take_action=self._take_action and self.setBusy_, callback=self.sheetCallback_)
self.selectiveSyncSheet.beginSheetForWindow_(self.window())
@objc.typedSelector('v@:f')
@event_handler
def sizeToFit(self):
height_needed = 0
if self.info_label:
self.info_label.setFrameOrigin_(NSPoint(0, height_needed + STATICTEXT_TO_BUTTON_BASELINE_ADJUSTMENT))
if self._width:
self.info_label.wrapToWidth_(self._width - self.launch_button.frame().size.width)
total_size = self.info_label.placeButtonToRight(self.launch_button)
else:
total_size = self.launch_button.frame().size
self.setFrameSize_(total_size)
return total_size
def sheetCallback_(self, sheet):
self.current_ignore_list = sheet.ignoreList()
if self._action:
self._action(self)
def setAction_(self, action):
self._action = action
|
985,899 | 521a6c04e5dd9e1830321bed2e6a065d176d5783 | import matplotlib.pyplot as plt
import math
import os
import re
import sys
dir=os.getcwd()
dir_list=dir.split("/")
loc=[i for i in range(0, len(dir_list)) if dir_list[i]=="General_electrochemistry"]
source_list=dir_list[:loc[0]+1] + ["src"]
source_loc=("/").join(source_list)
sys.path.append(source_loc)
print(sys.path)
from single_e_class_unified import single_electron
from EIS_class import EIS
from EIS_optimiser import EIS_genetics
import numpy as np
import pints
from pandas import read_csv
data_loc="/home/henney/Documents/Oxford/Experimental_data/Alice/Galectin_31_7/"
files=os.listdir(data_loc)
header=6
footer=2
model_dict={"CPE":{"z1":"R0", "z2":{"p_1":("Q1", "alpha1"),"p_2":["R1", "W1"]}},
"2CPE":{"z1":"R0", "z2":{"p_1":("Q1", "alpha1"),"p_2":["R1", ("Q2", "alpha2")]}},
#"C":{"z1":"R0", "z2":{"p_1":"C2","p_2":["R1", "W1"]}}
}
names={"CPE":["R0", "R1", "Q1", "alpha1", "W1"],
"2CPE":["R0", "R1", "Q1", "alpha1" ,"Q2", "alpha2"]
}
marker_dict={"CPE":{"line":"-", "marker":"o"},
"2CPE":{"line":"--", "marker":"v"},
}
model_names=list(names.keys())
boundaries={"R0":[0, 1e4,],
"R1":[1e-6, 1e6],
"C2":[0,1],
"Q2":[0,1],
"alpha2":[0,1],
"Q1":[0,1],
"alpha1":[0,1],
"W1":[0,1e6]}
get_conc=re.compile(r"0\.[0-9]+(?=\.csv$)")
#
#concentration
#->Repeat
#---->Data
#---->Model
#------->Data type
#---------->Parameter values
#---------->Generated fits
results_file=np.load("alice_fitting_results_2.npy", allow_pickle=True).item()
concentrations=list(results_file.keys())
number_list=[float(x) for x in concentrations]
enumerated_list=sorted(enumerate(number_list), key=lambda x:x[1])
concentrations=[concentrations[x[0]] for x in enumerated_list]
num_repeats=3
modes=["bode", "nyquist"]
plot_class=EIS()
repeat_dict={"2023-07-14_SPE-P-DS_1":"1",
"2023-07-27_SPE-P-DS_1":"2",
"2023-07-27_SPE-P-DS_5":"3"}
repeat_dict={v: k for k, v in repeat_dict.items()}
for model in ["CPE", "2CPE"]:
for mode in modes:
plots=plt.subplots(num_repeats, 2)
for repeat in range(0, num_repeats):
current_axes=plots[1]
bode_axes=current_axes[repeat,0]
#bode_axes.text(x=1.2, y=0, s=text, fontsize=12, transform=bode_axes.transAxes)
bode_twinx=bode_axes.twinx()
nyquist_axes=current_axes[repeat,1]
for i in range(0, len(concentrations)):
if concentrations[i]=="0":
text="Blank"
else:
text=concentrations[i]
if i==0 and repeat==0:
no_labels=False
else:
no_labels=False
repeat_str=str(repeat+1)
fitting_data=simulated_data=results_file[concentrations[i]][repeat_str]["Data"]
plot_frequencies=simulated_data=results_file[concentrations[i]][repeat_str]["Frequencies"]
plot_class.bode(fitting_data, plot_frequencies, compact_labels=True, ax=bode_axes, twinx=bode_twinx, lw=1, no_labels=no_labels, alpha=0.75, scatter=1, markersize=10, line=False)
plot_class.nyquist(fitting_data, ax=nyquist_axes, label=text, orthonormal=False, lw=1, alpha=0.75, scatter=1, markersize=10, line=False)
bode_axes.set_title(repeat_dict[repeat_str])
nyquist_axes.set_title(repeat_dict[repeat_str])
#sim_class=EIS(circuit=model_dict[model], fitting=True, parameter_bounds=boundaries, normalise=True)
simulated_data=results_file[concentrations[i]][repeat_str][model][mode]["Fit"]
#print(simulated_data)
plot_class.bode(simulated_data, plot_frequencies, compact_labels=True, ax=bode_axes, twinx=bode_twinx, no_labels=no_labels)
plot_class.nyquist(simulated_data, ax=nyquist_axes, orthonormal=False, label=text)
plots[1][0, -1].legend()
plots[0].set_size_inches(9, 9)
plots[0].subplots_adjust(left = 0.09,
right = 0.965,
bottom = 0.05,
top = 0.97,
wspace = 0.35,
hspace = 0.35)
plots[0].savefig("{0}-Model_{1}_Mode_fits.png".format(model, mode), dpi=500)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.