text stringlengths 38 1.54M |
|---|
import sys, os
import requests
import urllib
import types
import re
import math
from operator import itemgetter
handle = open("A-large (1).in","r")
allconts = handle.read().split("\n")
handle.close()
T = int(allconts[0])
results = []
for i in range(0,T):
sequence = str(allconts[i+1])
seqlen = len(sequence)
netstr = sequence[0]
for k in range(1,seqlen):
curchar = sequence[k]
if ord(netstr[0])>ord(curchar):
netstr = netstr + curchar
else:
netstr = curchar + netstr
results.append(netstr)
handle = open("jammer_1a_1.txt","w")
for i in range(0,len(results)):
handle.write("Case #"+str(i+1)+": "+results[i]+"\n")
handle.close()
|
from pal.writer.printer.printer import PrinterWriter
class NonePrinterWriter(PrinterWriter):
def declare_fieldset_printer(self, outfile, register, fieldset):
pass
def declare_field_printer(self, outfile, register, field):
pass
|
import sys
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QApplication, QDialog
from PyQt5.uic import loadUi
from PyQt5 import QtWidgets, uic, QtCore
from PyQt5.QtGui import QPixmap
from mysql.connector import Error
from datetime import datetime
import mysql.connector
from user_module import CL_userModule
class CL_user(QtWidgets.QDialog):
def __init__(self):
super(CL_user, self).__init__()
def FN_LOAD_MODIFY(self):
loadUi('../Presentation/modifyUser.ui', self)
self.FN_GET_USERS()
self.FN_GET_USERID()
self.FN_GET_USER()
self.CMB_userName.currentIndexChanged.connect( self.FN_GET_USER )
self.BTN_modifyUser.clicked.connect(self.FN_MODIFY_USER)
self.CMB_branch.addItems(["1", "2", "3"])
self.CMB_userType.addItems(["1", "2", "3"])
self.CMB_userStatus.addItems(["0", "1"])
def FN_LOAD_CREATE(self):
loadUi('../Presentation/createUser.ui', self)
self.setWindowTitle('Users')
self.BTN_createUser.clicked.connect(self.FN_CREATE_USER)
self.CMB_branch.addItems(["1","2","3"])
self.CMB_userType.addItems(["1","2","3"])
self.CMB_userStatus.addItems(["0","1"])
def FN_GET_USER(self):
self.FN_GET_USERID()
self.id = self.LB_userID.text()
connection = mysql.connector.connect(host='localhost', database='PosDB'
, user='root', password='password', port='3306')
mycursor = connection.cursor()
sql_select_query = "select * from SYS_USER where user_id = %s"
x = (self.id,)
mycursor.execute(sql_select_query, x)
record = mycursor.fetchone()
print(record)
self.LE_name.setText(record[2])
self.LE_fullName.setText(record[4])
self.LE_hrId.setText(record[5])
self.CMB_branch.setCurrentText(record[1])
self.CMB_userType.setCurrentText(record[11])
self.CMB_userStatus.setCurrentText(record[10])
connection.close()
mycursor.close()
print(mycursor.rowcount, "record retrieved.")
def FN_MODIFY_USER(self):
self.id = self.LE_id.text()
self.name = self.LE_name.text()
self.password = self.LE_password.text()
self.branch = self.CMB_branch.currentText()
self.fullName = self.LE_fullName.text()
self.hrId = self.LE_hrId.text()
self.userType = self.CMB_userType.currentText()
self.status = self.CMB_userStatus.currentText()
# connection = mysql.connector.connect(host='localhost',database='test',user='shelal',password='2ybQvkZbNijIyq2J',port='3306')
connection = mysql.connector.connect(host='localhost', database='PosDB'
, user='root', password='password', port='3306')
mycursor = connection.cursor()
changeDate = str(datetime.today().strftime('%Y-%m-%d-%H:%M-%S'))
sql = "UPDATE SYS_USER set USER_NAME= %s , USER_PASSWORD= %s , BRANCH_NO = %s, USER_FULLNAME = %s , USER_HR_ID = %s, USER_CHANGED_ON = %s , USER_CHENGED_BY = %s, USER_STATUS = %s, USER_TYPE_ID = %s where USER_id= %s "
val = (self.name , self.password, self.branch, self.fullName,self.hrId, changeDate, CL_userModule.user_name , self.status, self.userType , self.id)
print(val)
mycursor.execute(sql, val)
# mycursor.execute(sql)
connection.commit()
connection.close()
mycursor.close()
print(mycursor.rowcount, "record Modified.")
self.close()
def FN_GET_USERS(self):
connection = mysql.connector.connect( host='localhost', database='PosDB'
, user='root', password='password', port='3306' )
mycursor = connection.cursor()
mycursor.execute( "SELECT USER_NAME FROM SYS_USER order by USER_ID asc" )
records = mycursor.fetchall()
for row in records:
self.CMB_userName.addItems( [row[0]] )
connection.commit()
connection.close()
mycursor.close()
def FN_GET_USERID(self):
self.user = self.CMB_userName.currentText()
connection = mysql.connector.connect(host='localhost', database='PosDB'
, user='root', password='password', port='3306')
mycursor = connection.cursor()
sql_select_query= "SELECT USER_ID FROM SYS_USER WHERE USER_NAME = %s"
x = (self.user,)
mycursor.execute(sql_select_query, x)
myresult = mycursor.fetchone()
self.LB_userID.setText(myresult [0])
def FN_CREATE_USER(self):
self.name = self.LE_name.text()
self.password = self.LE_password.text()
self.branch = self.CMB_branch.currentText()
self.fullName = self.LE_fullName.text()
self.hrId = self.LE_hrId.text()
self.userType = self.CMB_userType.currentText()
self.status = self.CMB_userStatus.currentText()
connection = mysql.connector.connect(host='localhost', database='PosDB'
, user='root', password='password', port='3306')
mycursor = connection.cursor()
# get max userid
mycursor.execute("SELECT max(USER_ID) FROM SYS_USER")
myresult = mycursor.fetchone()
if myresult[0] == None:
self.id = "1"
else:
self.id = int(myresult[0]) + 1
creationDate = str(datetime.today().strftime('%Y-%m-%d-%H:%M-%S'))
print(creationDate)
sql = "INSERT INTO SYS_USER (USER_ID, BRANCH_NO, USER_NAME, USER_PASSWORD, USER_FULLNAME, USER_HR_ID, USER_CREATED_ON, USER_CREATED_BY, USER_CHANGED_ON, USER_CHENGED_BY,USER_STATUS, USER_TYPE_ID) VALUES ( %s, %s, %s, %s,%s, %s, %s, %s, %s, %s, %s, %s)"
# sql = "INSERT INTO SYS_USER (USER_ID,USER_NAME) VALUES (%s, %s)"
val = (
self.id, self.branch, self.name, self.password, self.fullName, self.hrId, creationDate, CL_userModule.user_name , '', '', self.status,
self.userType)
mycursor.execute(sql, val)
# mycursor.execute(sql)
connection.commit()
connection.close()
mycursor.close()
print(mycursor.rowcount, "record inserted.")
self.close()
|
#class to list
def salClassToList():
for i in range (0,len(classNo)):
for j in range (0,classlenth)
if classNo[i]==j:
salary.append(minSal+(maxSal-minSal)*(i+1))
|
'''
Longest decreasing subsequence
Given array of ints, find the longest subsequence that has all values in increasing order.
Also return the values themselves.
Examples:
Input: arr[] = [15, 27, 14, 38, 63, 55, 46, 65, 85]
Output: 3
Explanation: The longest decreasing sub sequence is [63, 55, 46]
Input: arr[] = [50, 3, 10, 7, 40, 80]
Output: 3
Explanation: The longest decreasing subsequence is [50, 10, 7]
https://www.geeksforgeeks.org/longest-decreasing-subsequence/
'''
class Prob:
@staticmethod
def longestDecreasingSubsequence(array):
# store longest decreasing subsequence up to a val in the input array.
longestUpToVal = [1 for _ in array]
# store index of the previous number in longest decreasing subsequence.
longestDecIndices = [None for _ in array]
# store the index of the longest decreasing subsequence count so far
longestSeqInd = 0
for i in range(len(array)):
inputVal = array[i]
for j in range(0,i):
upToVal = array[j]
if upToVal > inputVal and longestUpToVal[j]+1 > longestUpToVal[i]:
longestUpToVal[i] = longestUpToVal[j]+1
longestDecIndices[i] = j
print("longestUpToVal: ", longestUpToVal)
if longestUpToVal[i] > longestUpToVal[longestSeqInd]:
longestSeqInd = i
longestSubseq = []
tmpInd = longestSeqInd
while tmpInd != None:
longestSubseq.insert(0, array[tmpInd])
tmpInd = longestDecIndices[tmpInd]
return [max(longestUpToVal), longestSubseq]
@staticmethod
def test1():
#array = [50, 3, 10, 7, 40, 80]
array = [15, 27, 14, 38, 63, 55, 46, 65, 85]
ans = Prob.longestDecreasingSubsequence(array)
print("test1: ans: ", ans)
Prob.test1()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
11. タブをスペースに置換
タブ1文字につきスペース1文字に置換せよ.確認にはsedコマンド,trコマンド,もしくはexpandコマンドを用いよ.
"""
import sys
import fileinput
if __name__ == '__main__':
# file = open(sys.argv[1], 'r')
file = fileinput.input("-")
for line in file:
lis = list(line)
# for tab, i in zip(lis, range(len(lis))):
# # タブを空白に変換
# if tab == "\t":
# lis[i] = " "
# result = "".join(lis)
# 内包表記
result = "".join([" " if lis[i] == "\t" else lis[i] for i in range(len(lis))])
print(result, end='')
# print(line) |
from uxhub.models import Comment, ChangingComment, User, Milestone, ChangingMilestone, Issue, ChangingIssue
def create_comment_event(pk):
comment = Comment.objects.get(pk=pk)
comment_event = ChangingComment(description=comment.description, issues=comment.issues,
author=comment.author, comment=comment)
comment_event.save()
def create_milestone_event(pk, auth_user):
logged_author = User.objects.get(auth_user=auth_user)
milestone = Milestone.objects.get(pk=pk)
milestone_event = ChangingMilestone(
milestones=milestone,
name=milestone.name,
projects=milestone.projects,
start_date=milestone.start_date,
end_date=milestone.end_date,
author=logged_author
)
milestone_event.save()
def create_issue_event(pk, auth_user):
logged_author = User.objects.get(auth_user=auth_user)
issue = Issue.objects.get(pk=pk)
issue_event = ChangingIssue(issues=issue, new_state=issue.state, author=logged_author)
issue_event.save()
issue_event.assignees.set(issue.assignee.all())
|
# coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.bitmovin_response import BitmovinResponse
from bitmovin_api_sdk.models.mjpeg_video_configuration import MjpegVideoConfiguration
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
from bitmovin_api_sdk.encoding.configurations.video.mjpeg.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.configurations.video.mjpeg.mjpeg_video_configuration_list_query_params import MjpegVideoConfigurationListQueryParams
class MjpegApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(MjpegApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
self.customdata = CustomdataApi(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def create(self, mjpeg_video_configuration, **kwargs):
# type: (MjpegVideoConfiguration, dict) -> MjpegVideoConfiguration
"""Create MJPEG Codec Configuration
:param mjpeg_video_configuration: The MJPEG Codec Configuration to be created
:type mjpeg_video_configuration: MjpegVideoConfiguration, required
:return: MJPEG Video Configuration
:rtype: MjpegVideoConfiguration
"""
return self.api_client.post(
'/encoding/configurations/video/mjpeg',
mjpeg_video_configuration,
type=MjpegVideoConfiguration,
**kwargs
)
def delete(self, configuration_id, **kwargs):
# type: (string_types, dict) -> BitmovinResponse
"""Delete MJPEG Codec Configuration
:param configuration_id: Id of the codec configuration
:type configuration_id: string_types, required
:return: Id of the codec configuration
:rtype: BitmovinResponse
"""
return self.api_client.delete(
'/encoding/configurations/video/mjpeg/{configuration_id}',
path_params={'configuration_id': configuration_id},
type=BitmovinResponse,
**kwargs
)
def get(self, configuration_id, **kwargs):
# type: (string_types, dict) -> MjpegVideoConfiguration
"""MJPEG Codec Configuration Details
:param configuration_id: Id of the codec configuration
:type configuration_id: string_types, required
:return: MJPEG Audio Configuration
:rtype: MjpegVideoConfiguration
"""
return self.api_client.get(
'/encoding/configurations/video/mjpeg/{configuration_id}',
path_params={'configuration_id': configuration_id},
type=MjpegVideoConfiguration,
**kwargs
)
def list(self, query_params=None, **kwargs):
# type: (MjpegVideoConfigurationListQueryParams, dict) -> MjpegVideoConfiguration
"""List MJPEG Configurations
:param query_params: Query parameters
:type query_params: MjpegVideoConfigurationListQueryParams
:return: List of MJPEG codec configurations
:rtype: MjpegVideoConfiguration
"""
return self.api_client.get(
'/encoding/configurations/video/mjpeg',
query_params=query_params,
pagination_response=True,
type=MjpegVideoConfiguration,
**kwargs
)
|
from django.db import transaction
from django.db.models.query import QuerySet, ValuesListQuerySet, ValuesQuerySet
from django.db.models.query_utils import deferred_class_factory
from django.db.models.sql import UpdateQuery
from django.db.models.sql.compiler import SQLUpdateCompiler, SQLCompiler
from django.db.models.sql.constants import MULTI
from django.db import connections
class SQLUpdateReturningCompiler(SQLUpdateCompiler):
def as_sql(self):
sql, params = super(SQLUpdateReturningCompiler,self).as_sql()
sql = sql.rstrip() + ' RETURNING ' + ', '.join(self.get_returning_columns())
return sql, params
def get_returning_columns(self):
return self.get_columns(False)
def execute_sql(self, result_type):
return super(SQLUpdateCompiler,self).execute_sql(result_type)
class UpdateReturningQuery(UpdateQuery):
compiler_class = SQLUpdateReturningCompiler
def get_compiler(self, using=None, connection=None):
""" we need our own compiler """
if using is None and connection is None:
raise ValueError('Need either using or connection')
if using:
connection = connections[using]
return self.compiler_class(self, connection, using)
class UpdateReturningMethods(object):
"""
Extends querysets with methods to return rows from sql updates.
"""
def _clone(self, klass=None, setup=False, **kwargs):
""" Changing a given klass to the matching update_returning one. """
overwrites = {
'QuerySet' : UpdateReturningQuerySet,
'ValuesQuerySet' : UpdateReturningValuesQuerySet,
'ValuesListQuerySet' : UpdateReturningValuesListQuerySet,
}
if klass and klass.__name__ in overwrites:
klass = overwrites[klass.__name__]
return super(UpdateReturningMethods,self)._clone(klass,setup,**kwargs)
def update_returning(self, **kwargs):
"""
An update that returns the rows that have been updated as an iterator.
The type of the return objects can be handled by preciding queryset methods like
in normal querysets.
Preciding methods that change the type of result items are "only", "defer", "values_list"
and "values", if none those is used the result items will full model instances.
For example a model.objects.values_list('id',flat=True).update_returning(published=True)
will return a iterator with the ids of the changed objects.
"""
self._for_write = True
query = self.query.clone(UpdateReturningQuery)
query.add_update_values(kwargs)
if not transaction.is_managed(using=self.db):
transaction.enter_transaction_management(using=self.db)
forced_managed = True
else:
forced_managed = False
try:
cursor = query.get_compiler(self.db).execute_sql(MULTI)
if forced_managed:
transaction.commit(using=self.db)
else:
transaction.commit_unless_managed(using=self.db)
finally:
if forced_managed:
transaction.leave_transaction_management(using=self.db)
self._result_cache = None
result_factory = self._returning_update_result_factory()
for rows in cursor:
for row in rows:
yield result_factory(row)
def update_returning_list(self,**kwargs):
return list(self.update_returning(**kwargs))
def _returning_update_result_factory(self):
return lambda x:x
class UpdateReturningQuerySet(UpdateReturningMethods, QuerySet):
def _returning_update_result_factory(self):
""" returns a mapper function to convert the iterated rows into model instances
or defered models instance depending on the use of "only" or "defer"
"""
fill_cache = False # always False for now!
only_load = self.query.get_loaded_field_names()
fields = self.model._meta.fields
load_fields = []
if only_load:
for field, model in self.model._meta.get_fields_with_model():
if model is None:
model = self.model
try:
if field.name in only_load[model]:
# Add a field that has been explicitly included
load_fields.append(field.name)
except KeyError:
# Model wasn't explicitly listed in the only_load table
# Therefore, we need to load all fields from this model
load_fields.append(field.name)
skip = None
if load_fields:
skip = set()
init_list = []
for field in fields:
if field.name not in load_fields:
skip.add(field.attname)
else:
init_list.append(field.attname)
model_cls = deferred_class_factory(self.model,skip)
assert self._for_write, "_for_write must be True"
db = self.db
if skip:
factory = lambda row: model_cls(**dict(zip(init_list,row)))
else:
model = self.model
factory = lambda row: model(*row)
def mapper(row):
obj = factory(row)
obj._state.db = db
obj._state.adding = False
return obj
return mapper
class UpdateReturningValuesQuerySet(UpdateReturningMethods, ValuesQuerySet):
def _returning_update_result_factory(self):
field_names = self.field_names
return lambda x:dict(zip(field_names,x))
class UpdateReturningValuesListQuerySet(UpdateReturningMethods, ValuesListQuerySet):
def _returning_update_result_factory(self):
if self.flat and len(self._fields) == 1:
return lambda x:x[0]
else:
return tuple
|
"""High-level methods to obtain information about accounts."""
from typing import Any, Dict, Union, cast
from xrpl.clients import Client, XRPLRequestFailureException
from xrpl.models.requests import AccountInfo
from xrpl.models.response import Response
def does_account_exist(address: str, client: Client) -> bool:
"""
Query the ledger for whether the account exists.
Args:
address: the account to query.
client: the network client used to make network calls.
Returns:
Whether the account exists on the ledger.
Raises:
XRPLRequestFailureException: if the transaction fails.
"""
try:
get_account_info(address, client)
return True
except XRPLRequestFailureException as e:
if e.error == "actNotFound":
# error code for if the account is not found on the ledger
return False
raise
def get_next_valid_seq_number(address: str, client: Client) -> int:
"""
Query the ledger for the next available sequence number for an account.
Args:
address: the account to query.
client: the network client used to make network calls.
Returns:
The next valid sequence number for the address.
"""
return cast(int, get_account_root(address, client)["Sequence"])
def get_balance(address: str, client: Client) -> int:
"""
Query the ledger for the balance of the given account.
Args:
address: the account to query.
client: the network client used to make network calls.
Returns:
The balance of the address.
"""
return int(get_account_root(address, client)["Balance"])
def get_account_root(address: str, client: Client) -> Dict[str, Union[int, str]]:
"""
Query the ledger for the AccountRoot object associated with a given address.
Args:
address: the account to query.
client: the network client used to make network calls.
Returns:
The AccountRoot dictionary for the address.
"""
account_info = get_account_info(address, client)
result = cast(Dict[str, Any], account_info.result)
return cast(Dict[str, Union[int, str]], result["account_data"])
def get_account_info(address: str, client: Client) -> Response:
"""
Query the ledger for account info of given address.
Args:
address: the account to query.
client: the network client used to make network calls.
Returns:
The account info for the address.
Raises:
XRPLRequestFailureException: if the rippled API call fails.
"""
response = client.request(
AccountInfo(
account=address,
ledger_index="validated",
)
)
if response.is_successful():
return response
result = cast(Dict[str, Any], response.result)
raise XRPLRequestFailureException(result)
|
#################################################################################
# FOQUS Copyright (c) 2012 - 2023, by the software owners: Oak Ridge Institute
# for Science and Education (ORISE), TRIAD National Security, LLC., Lawrence
# Livermore National Security, LLC., The Regents of the University of
# California, through Lawrence Berkeley National Laboratory, Battelle Memorial
# Institute, Pacific Northwest Division through Pacific Northwest National
# Laboratory, Carnegie Mellon University, West Virginia University, Boston
# University, the Trustees of Princeton University, The University of Texas at
# Austin, URS Energy & Construction, Inc., et al. All rights reserved.
#
# Please see the file LICENSE.md for full copyright and license information,
# respectively. This file is also available online at the URL
# "https://github.com/CCSI-Toolset/FOQUS".
#################################################################################
import os
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtWidgets import (
QApplication,
QTableWidget,
QTableWidgetItem,
QComboBox,
QCheckBox,
QMessageBox,
QAbstractItemView,
QSpinBox,
QFileDialog,
)
from PyQt5.QtGui import QColor
import numpy as np
from foqus_lib.framework.uq.SampleData import *
from foqus_lib.framework.uq.LocalExecutionModule import *
class InputPriorTable(QTableWidget):
typeChanged = pyqtSignal()
pdfChanged = pyqtSignal()
SIMSETUP, RSANALYSIS, INFERENCE, OUU, ODOE = list(range(5))
def __init__(self, parent=None):
super(InputPriorTable, self).__init__(parent)
self.typeItems = []
self.format = "%g" # numeric format for table entries in UQ Toolbox
self.paramColWidth = 126
self.labelWidth = 62
self.paramWidth = 62
self.obsTableValues = {}
self.sampleFiles = []
self.dispSampleFiles = []
self.sampleNumInputs = []
self.epistemicMode = False
self.rsEvalMode = False
self.useTypeChangedSignal = True
# You must call init() separately.
# Put code to upgrade from QTableWidget to InputPriorTable here as well as init table
def init(self, data, mode, wizardMode=False, viewOnly=False):
self.blockSignals(True)
self.data = data
self.mode = mode
# self.simSetup = (mode == InputPriorTable.SIMSETUP)
# self.inferenceTable = (mode == InputPriorTable.INFERENCE)
# self.rsAnalysis = (mode == InputPriorTable.RSANALYSIS)
# self.ouu = (mode == InputPriorTable.OUU)
self.wizardMode = wizardMode # RSANALYSIS mode
self.viewOnly = viewOnly # SIMSETUP mode : Generate ensemble
# populate prior table
inVarNames = data.getInputNames()
inVarTypes = data.getInputTypes()
nInputs = data.getNumInputs()
nVariableInputs = inVarTypes.count(Model.VARIABLE)
self.dist = data.getInputDistributions()
dist = self.dist
self.setupLB()
self.setupUB()
self.defaults = data.getInputDefaults()
self.setupDists()
if self.mode == InputPriorTable.INFERENCE:
col_index = {"name": 0, "type": 1, "check": 2, "value": 3}
if not wizardMode:
col_index.update({"pdf": 4, "p1": 5, "p2": 6, "min": 7, "max": 8})
elif self.mode == InputPriorTable.SIMSETUP:
col_index = {
"name": 0,
"type": 1,
"value": 2,
"min": 3,
"max": 4,
"pdf": 5,
"p1": 6,
"p2": 7,
}
elif self.mode == InputPriorTable.RSANALYSIS: # RS Analysis
col_index = {
"name": 0,
"type": 1,
"value": 2,
"pdf": 3,
"p1": 4,
"p2": 5,
"min": 6,
"max": 7,
}
elif self.mode == InputPriorTable.ODOE: # ODOE
col_index = {
"name": 0,
"type": 1,
"value": 2,
"pdf": 3,
"p1": 4,
"p2": 5,
"min": 6,
"max": 7,
}
else: # OUU
col_index = {
"check": 0,
"name": 1,
"type": 2,
"scale": 3,
"min": 4,
"max": 5,
"value": 6,
"pdf": 7,
"p1": 8,
"p2": 9,
}
self.col_index = col_index
flowsheetFixed = data.getInputFlowsheetFixed()
# rowCount = 0
if self.mode == InputPriorTable.SIMSETUP:
rowCount = nInputs
else:
rowCount = nVariableInputs
# for i in xrange(nInputs):
# if not flowsheetFixed[i] and inVarTypes[i] != Model.FIXED:
# rowCount += 1
self.setRowCount(rowCount)
self.setColumnCount(len(col_index))
if self.mode == InputPriorTable.RSANALYSIS:
self.setColumnHidden(col_index["type"], True)
self.setColumnHidden(col_index["value"], True)
r = 0 # row index
for i in range(nInputs):
# do not add fixed input variables to table
if self.mode != InputPriorTable.SIMSETUP and inVarTypes[i] == Model.FIXED:
continue
if not dist:
dtype = Distribution.UNIFORM
d = Distribution(dtype)
else:
d = dist[i] # distribution
dtype = d.getDistributionType() # distribution type
if dtype == Distribution.SAMPLE:
sampleFile, sampleIndex = d.getParameterValues()
if sampleFile.endswith(".csv"):
data = LocalExecutionModule.readSampleFromCsvFile(sampleFile, False)
sampleData = data.getInputData()
else:
data = LocalExecutionModule.readDataFromSimpleFile(sampleFile)
sampleData = data[0]
# compute min/max from sample file
sdata = sampleData[:, sampleIndex - 1]
# TO DO: insert error handling for if sampleData file does not exist or if incorrect # of columns
xmin = np.min(sdata)
xmax = np.max(sdata)
sampleIndex += 1
else:
xmin = self.lb[i]
xmax = self.ub[i]
p1val, p2val = d.getParameterValues() # distribution parameter values
p1name, p2name = Distribution.getParameterNames(
dtype
) # distribution parameter names
nameMask = ~Qt.ItemIsEnabled
# add input name
item = QTableWidgetItem(inVarNames[i])
flags = item.flags()
item.setFlags(flags & nameMask)
item.setForeground(Qt.black)
self.setItem(r, col_index["name"], item)
# add type
comboFixed = False
if "type" in col_index:
combobox = QComboBox()
combobox.addItems(self.typeItems)
if self.mode == InputPriorTable.SIMSETUP:
if inVarTypes[i] == Model.FIXED:
combobox.setCurrentIndex(1)
combobox.setProperty("row", r)
combobox.setProperty("col", col_index["type"])
combobox.setMinimumContentsLength(8)
combobox.currentIndexChanged[int].connect(self.updatePriorTableRow)
if self.viewOnly:
combobox.setEnabled(False)
self.setCellWidget(r, col_index["type"], combobox)
if combobox.currentText() == "Fixed":
comboFixed = True
if self.mode == InputPriorTable.ODOE:
combobox.removeItem(1)
# add display checkbox
if "check" in col_index:
chkbox = QCheckBox("")
if self.mode == InputPriorTable.OUU:
chkbox.setChecked(False)
else:
chkbox.setChecked(True)
self.setCellWidget(r, col_index["check"], chkbox)
# add fixed value column
if "value" in col_index:
if self.defaults[i] is None:
s = ""
else:
s = self.format % self.defaults[i]
if inVarTypes[i] == Model.FIXED or comboFixed:
item = QTableWidgetItem(s)
if self.viewOnly:
flags = item.flags()
item.setFlags(flags & ~Qt.ItemIsEnabled)
item.setForeground(Qt.black)
item.setBackground(Qt.white)
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.setItem(r, col_index["value"], item)
self.obsTableValues[(r, col_index["value"])] = s
else:
self.clearCell(r, col_index["value"], s, createItem=True)
# add scale column
if "scale" in col_index:
self.clearCell(r, col_index["scale"], createItem=True)
# add distribution
if "pdf" in col_index:
combobox = QComboBox()
distNames = Distribution.fullNames
# if self.mode in (InputPriorTable.INFERENCE, InputPriorTable.OUU):
# distNames = distNames[0:-1] # omit SAMPLE (not currently supported)
combobox.addItems(distNames)
combobox.setCurrentIndex(dtype)
combobox.setProperty("row", r)
combobox.setProperty("col", col_index["pdf"])
combobox.currentIndexChanged[int].connect(self.updatePriorTableRow)
combobox.setMinimumContentsLength(10)
typeCombo = self.cellWidget(r, col_index["type"])
if self.viewOnly:
combobox.setEnabled(False)
else:
text = typeCombo.currentText()
if (
"type" in col_index and self.isColumnHidden(col_index["type"])
) or text in ["Variable", "Aleatory", "UQ: Continuous (Z4)"]:
combobox.setEnabled(True)
else:
combobox.setEnabled(False)
self.setCellWidget(r, col_index["pdf"], combobox)
# add param1
if "p1" in col_index:
if p1name is not None:
self.activateCell(r, col_index["p1"], "", True)
self.activateParamCell(r, 1, p1name, p1val)
else:
self.clearParamCell(r, 1)
# add param2
if "p2" in col_index:
if p2name is not None:
self.activateCell(r, col_index["p2"], "", True)
self.activateParamCell(r, 2, p2name, p2val)
else:
self.clearParamCell(r, 2)
# add min/max
if dtype == Distribution.UNIFORM or self.mode == InputPriorTable.SIMSETUP:
c = Qt.white
else:
c = Qt.lightGray
if "min" in col_index:
s = self.format % xmin
if inVarTypes[i] == Model.FIXED or comboFixed:
self.clearCell(r, col_index["min"], s, createItem=True)
else:
item = QTableWidgetItem(s)
if self.viewOnly:
flags = item.flags()
item.setFlags(flags & ~Qt.ItemIsEnabled)
item.setForeground(Qt.black)
if len(s.strip()) == 0:
item.setBackground(Qt.red)
else:
item.setBackground(c)
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.setItem(r, col_index["min"], item)
self.obsTableValues[(r, col_index["min"])] = s
if "max" in col_index:
s = self.format % xmax
if inVarTypes[i] == Model.FIXED or comboFixed:
self.clearCell(r, col_index["max"], s, createItem=True)
else:
item = QTableWidgetItem(s)
if self.viewOnly:
flags = item.flags()
item.setFlags(flags & ~Qt.ItemIsEnabled)
item.setForeground(Qt.black)
if len(s.strip()) == 0:
item.setBackground(Qt.red)
else:
item.setBackground(c)
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.setItem(r, col_index["max"], item)
self.obsTableValues[(r, col_index["max"])] = s
r = r + 1 # increment row
self.resizeColumns()
self.cellChanged.connect(self.change)
self.blockSignals(False)
def setupLB(self):
inVarTypes = self.data.getInputTypes()
self.lb = self.data.getInputMins()
self.lbVariable = [
self.lb[i] for i in range(len(self.lb)) if inVarTypes[i] == Model.VARIABLE
]
def setupUB(self):
inVarTypes = self.data.getInputTypes()
self.ub = self.data.getInputMaxs()
self.ubVariable = [
self.ub[i] for i in range(len(self.ub)) if inVarTypes[i] == Model.VARIABLE
]
def setupDists(self):
if self.dist == None:
self.distVariable = None
else:
inVarTypes = self.data.getInputTypes()
self.distVariable = [
self.dist[i]
for i in range(len(self.dist))
if inVarTypes[i] == Model.VARIABLE
]
def resizeColumns(self):
col_index = self.col_index
self.resizeColumnsToContents()
if "p1" in col_index:
self.setColumnWidth(col_index["p1"], self.paramColWidth)
if "p2" in col_index:
self.setColumnWidth(col_index["p2"], self.paramColWidth)
def change(self, row, col, hideError=False): # check values
item = self.item(row, col)
if item is not None:
text = item.text()
if (row, col) in self.obsTableValues and text == self.obsTableValues[
(row, col)
]:
return
self.obsTableValues[(row, col)] = text
if len(text) > 0 or (
"min" in self.col_index
and col in (self.col_index["min"], self.col_index["max"])
):
if "min" in self.col_index:
minItem = self.item(row, self.col_index["min"])
maxItem = self.item(row, self.col_index["max"])
showMessage = False
outOfBounds = False
minMoreThanMax = False
if not self.isnumeric(text):
showMessage = True
message = "Value must be a number!"
outOfBounds = True
else:
value = float(item.text())
if value < self.lbVariable[row] or value > self.ubVariable[row]:
showMessage = False
message = "Value outside bounds. Your response surface will be extrapolating, which could lead to lower accuracy. Your new bounds will not be saved to the flowsheet."
outOfBounds = False
if (
"min" in self.col_index
and "max" in self.col_index
and col in (self.col_index["min"], self.col_index["max"])
):
if minItem is not None and maxItem is not None:
minVal = float(minItem.text())
maxVal = float(maxItem.text())
if minVal >= maxVal:
minMoreThanMax = True
showMessage = True
message = (
"Minimum value must be less than maximum value!"
)
if showMessage and not hideError:
msgbox = QMessageBox()
msgbox.setWindowTitle("UQ/Opt GUI Warning")
msgbox.setText(message)
msgbox.setIcon(QMessageBox.Warning)
response = msgbox.exec_()
self.setFocus()
if outOfBounds:
# item.setForeground(QColor(192,0,0))
item.setBackground(QColor(255, 0, 0))
self.setCurrentCell(row, col)
elif minMoreThanMax:
# minItem.setForeground(QColor(192,0,0))
# maxItem.setForeground(QColor(192,0,0))
minItem.setBackground(QColor(255, 0, 0))
maxItem.setBackground(QColor(255, 0, 0))
else:
# item.setForeground(QColor(0,0,0))
item.setBackground(QColor(255, 255, 255))
if "min" in self.col_index and col in (
self.col_index["min"],
self.col_index["max"],
):
if minItem is not None and maxItem is not None:
minVal = float(minItem.text())
maxVal = float(maxItem.text())
if self.mode == InputPriorTable.SIMSETUP:
# minItem.setForeground(QColor(0,0,0))
# maxItem.setForeground(QColor(0,0,0))
minItem.setBackground(QColor(255, 255, 255))
maxItem.setBackground(QColor(255, 255, 255))
else:
if (
minVal < self.ubVariable[row]
and maxVal > self.lbVariable[row]
):
# minItem.setForeground(QColor(0,0,0))
# maxItem.setForeground(QColor(0,0,0))
minItem.setBackground(QColor(255, 255, 255))
maxItem.setBackground(QColor(255, 255, 255))
self.resizeColumns()
self.pdfChanged.emit()
def setAleatoryEpistemicMode(self, on):
if self.epistemicMode == on:
return
self.epistemicMode = on
col_index = self.col_index
self.setColumnHidden(col_index["type"], not on)
self.setColumnHidden(col_index["value"], not on)
self.setColumnHidden(col_index["pdf"], False)
self.setColumnHidden(col_index["p1"], False)
self.setColumnHidden(col_index["p2"], False)
self.setColumnHidden(col_index["min"], False)
self.setColumnHidden(col_index["max"], False)
numRows = self.rowCount()
# Disable typechanged signal
self.useTypeChangedSignal = False
for row in range(numRows):
self.updateRow(row, col_index["type"])
self.useTypeChangedSignal = True
def setSolventFitMode(self, on):
distNames = Distribution.fullNames
if "pdf" in self.col_index:
for r in range(self.rowCount()):
combobox = self.cellWidget(r, self.col_index["pdf"])
# Change distributions
count = combobox.count()
if on: # solvent fit. Only use first 3 items and fifth
if count > 4:
index = combobox.currentIndex()
if index == 3 or index > 4:
combobox.setCurrentIndex(0)
for i in range(count - 5):
combobox.removeItem(5)
combobox.removeItem(3)
else:
if count < len(distNames):
combobox.insertItem(3, distNames[3])
for name in distNames[count + 1 :]:
combobox.addItem(name)
def setRSEvalMode(self, on):
if self.rsEvalMode == on:
return
self.rsEvalMode = on
col_index = self.col_index
self.setColumnHidden(col_index["value"], not on)
self.setColumnHidden(col_index["type"], on)
self.setColumnHidden(col_index["pdf"], on)
self.setColumnHidden(col_index["p1"], on)
self.setColumnHidden(col_index["p2"], on)
self.setColumnHidden(col_index["min"], on)
self.setColumnHidden(col_index["max"], on)
numRows = self.rowCount()
# Disable typechanged signal
self.useTypeChangedSignal = False
for row in range(numRows):
self.updateRow(row, col_index["type"])
self.useTypeChangedSignal = True
def updatePriorTableRow(self):
# identify the row of inputPrior_table that requires updating
combobox = self.sender() # the pdf combobox that sent the signal
r = combobox.property("row")
c = combobox.property("col")
self.updateRow(r, c)
def updateRow(self, r, c):
try:
self.cellChanged.disconnect(self.change)
except:
return
col_index = self.col_index
# get selected row of simulationTable
data = self.data
inVarNames = list(data.getInputNames())
if "pdf" in col_index:
pdfcombo = self.cellWidget(r, col_index["pdf"])
# Type was changed
if "type" in col_index and c == col_index["type"]:
if self.useTypeChangedSignal:
self.typeChanged.emit()
combobox = self.cellWidget(r, col_index["type"])
cbtext = combobox.currentText()
if self.mode != InputPriorTable.OUU and "check" in col_index:
# Disable view checkbox if not variable parameter
checkbox = self.cellWidget(r, col_index["check"])
checkbox.setEnabled(cbtext == "Variable")
# Value column
if cbtext == "Fixed" or self.rsEvalMode:
self.activateCell(r, col_index["value"])
if "min" in col_index:
self.clearMinMax(r)
elif cbtext in [
"Epistemic",
"Opt: Primary Continuous (Z1)",
"Opt: Primary Discrete (Z1d)",
"Opt: Recourse (Z2)",
]:
self.activateCell(r, col_index["value"])
self.activateMinMax(r, inVarNames)
elif cbtext == "UQ: Discrete (Z3)":
self.clearCell(r, col_index["value"])
self.clearMinMax(r)
else:
self.clearCell(r, col_index["value"])
if "min" in col_index:
self.activateMinMax(r, inVarNames)
# Scale column
if "scale" in col_index:
if "Primary" in cbtext:
self.activateCell(r, col_index["scale"])
else:
self.clearCell(r, col_index["scale"])
# PDF columns
if "pdf" in col_index:
if self.isColumnHidden(col_index["type"]) or cbtext in [
"Variable",
"Aleatory",
"UQ: Continuous (Z4)",
]:
pdfcombo.setEnabled(True)
else:
pdfcombo.setEnabled(False)
self.clearParamCell(r, 1)
self.clearParamCell(r, 2)
self.cellChanged.connect(self.change)
return
if "pdf" in col_index:
# update the row in inputPrior_table
d = pdfcombo.currentText() # distribution type
d = Distribution.getEnumValue(d)
dist = Distribution(d)
d1name, d2name = Distribution.getParameterNames(
d
) # distribution parameter names
# TO DO: handle the case 'd == Distribution.SAMPLE'
if d == Distribution.UNIFORM:
# clear and deactivate param1/param2
self.clearParamCell(r, 1)
self.clearParamCell(r, 2)
self.activateMinMax(r, inVarNames)
elif d == Distribution.SAMPLE:
self.activateFileCells(r)
self.clearMinMax(r)
else:
value1 = None
value2 = None
dists = self.dist
if dists is not None:
defaultDist = dists[r]
if (
defaultDist is not None
and d == defaultDist.getDistributionType()
):
(value1, value2) = defaultDist.getParameterValues()
self.activateParamCell(r, 1, d1name, value1)
if d2name is None:
self.clearParamCell(r, 2)
else:
self.activateParamCell(r, 2, d2name, value2)
if self.mode != InputPriorTable.SIMSETUP:
self.clearMinMax(r)
self.setColumnWidth(col_index["p1"], self.paramColWidth)
self.setColumnWidth(col_index["p2"], self.paramColWidth)
if "pdf" in col_index and c == col_index["pdf"]:
self.pdfChanged.emit()
self.resizeColumns()
self.cellChanged.connect(self.change)
def clearCell(self, row, col, text=None, createItem=False):
col_index = self.col_index
if createItem:
item = QTableWidgetItem("")
self.setItem(row, col, item)
else:
item = self.item(row, col)
item.setBackground(Qt.lightGray)
if text is not None:
item.setText(text)
try:
float(text)
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
except ValueError:
pass
mask = ~Qt.ItemIsEnabled
flags = item.flags()
item.setFlags(flags & mask)
def activateCell(self, row, col, text=None, createItem=False):
col_index = self.col_index
if createItem:
item = QTableWidgetItem("")
self.setItem(row, col, item)
else:
item = self.item(row, col)
item.setBackground(Qt.white)
if text is not None and not item.text():
item.setText(text)
try:
float(text)
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
except ValueError:
pass
mask = Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
flags = item.flags()
item.setFlags(flags | mask)
def clearParamCell(self, row, paramNum):
col_index = self.col_index
if paramNum == 1:
col = col_index["p1"]
else: # assume param 2
col = col_index["p2"]
self.removeCellWidget(row, col)
self.clearCell(row, col, createItem=True)
def activateParamCell(self, row, paramNum, text, value=None):
col_index = self.col_index
if paramNum == 1:
col = col_index["p1"]
else: # assume param 2
col = col_index["p2"]
self.activateCell(row, col)
# clear and activate
nameMask = ~Qt.ItemIsEnabled
pname = QTableWidgetItem(text)
pname.setBackground(Qt.white)
pname.setForeground(Qt.black)
flags = pname.flags()
pname.setFlags(flags & nameMask)
# add 2-cell table
cellTable = self.cellWidget(row, col)
if isinstance(cellTable, QComboBox): # combo from file selection
self.removeCellWidget(row, col)
cellTable = None
if cellTable is None:
cellTable = QTableWidget(self)
self.setCellWidget(row, col, cellTable)
cellTable.clear()
cellTable.setRowCount(1)
cellTable.setColumnCount(2)
cellTable.horizontalHeader().setVisible(False)
cellTable.verticalHeader().setVisible(False)
cellTable.setProperty("row", row)
cellTable.setProperty("col", col)
cellTable.setItem(0, 0, pname)
if value is not None:
pval = QTableWidgetItem(str(value))
cellTable.setItem(0, 1, pval)
if self.viewOnly:
flags = pval.flags()
pval.setFlags(flags & ~Qt.ItemIsEnabled)
pval.setForeground(Qt.black)
cellTable.setColumnWidth(0, self.labelWidth)
cellTable.setColumnWidth(1, self.paramWidth)
cellTable.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
cellTable.cellChanged.connect(self.paramChange)
cellTable.setEditTriggers(
QAbstractItemView.AllEditTriggers
) # Allow single click to edit
return cellTable
def paramChange(self):
cellTable = self.sender()
cellTable.cellChanged.disconnect()
row = cellTable.property("row")
col = cellTable.property("col")
pdfCombo = self.cellWidget(row, self.col_index["pdf"])
dist = pdfCombo.currentIndex()
outOfBounds = False
showMessage = False
item = cellTable.item(0, 1)
# Value must not be less than 0
if item is not None and item.text():
if self.isnumeric(item.text()):
value = float(item.text())
if col == self.col_index["p2"] and dist in (
Distribution.NORMAL,
Distribution.LOGNORMAL,
Distribution.TRIANGLE,
):
if value <= 0:
message = "Value must be greater than 0! Please fix it."
showMessage = True
outOfBounds = True
if dist in (
Distribution.LOGNORMAL,
Distribution.GAMMA,
Distribution.BETA,
Distribution.WEIBULL,
):
if value < 0:
message = "Value must not be negative! Please fix it."
showMessage = True
outOfBounds = True
else:
message = "Entry is not a number! Please fix it."
showMessage = True
outOfBounds = True
if showMessage:
msgbox = QMessageBox()
msgbox.setWindowTitle("UQ/Opt GUI Warning")
msgbox.setText(message)
msgbox.setIcon(QMessageBox.Warning)
response = msgbox.exec_()
if outOfBounds:
# item.setForeground(QColor(192,0,0))
item.setBackground(QColor(255, 0, 0))
cellTable.setFocus()
elif item is not None and item.text():
# item.setForeground(QColor(0,0,0))
item.setBackground(QColor(255, 255, 255))
cellTable.cellChanged.connect(self.paramChange)
self.pdfChanged.emit()
def activateFileCells(self, row):
col_index = self.col_index
self.activateCell(row, col_index["p1"])
self.activateCell(row, col_index["p2"])
# File combo
combobox = self.cellWidget(row, col_index["p1"])
if isinstance(combobox, QTableWidget): # cell table from other PDFs
self.removeCellWidget(row, col_index["p1"])
combobox = None
if combobox is None:
combobox = QComboBox()
self.setCellWidget(row, col_index["p1"], combobox)
items = ["Select File"]
items.extend([os.path.basename(f) for f in self.dispSampleFiles])
items.append("Browse...")
for i, item in enumerate(items[: combobox.count()]):
if i < combobox.count:
combobox.setItemText(i, items[i])
combobox.addItems(items[combobox.count() :])
combobox.setProperty("row", row)
combobox.currentIndexChanged[int].connect(self.setFile)
# Index
cellTable = self.activateParamCell(row, 2, "Input #")
spinbox = cellTable.cellWidget(0, 1)
if spinbox is None:
spinbox = QSpinBox()
cellTable.setCellWidget(0, 1, spinbox)
spinbox.setMinimum(1)
if self.sampleNumInputs:
spinbox.setMaximum(self.sampleNumInputs[0])
if combobox.currentText() in ("Browse...", "Select File"):
cellTable.setEnabled(False)
else:
cellTable.setEnabled(True)
def isSamplePDFChosen(self):
col_index = self.col_index
for row in range(self.rowCount()):
combobox = self.cellWidget(row, col_index["pdf"])
if combobox.currentText() == Distribution.getFullName(Distribution.SAMPLE):
return True
return False
def setFile(self):
col_index = self.col_index
combobox = self.sender()
combobox.blockSignals(True)
currentRow = combobox.property("row")
text = combobox.currentText()
if text == "Browse...":
if platform.system() == "Windows":
allFiles = "*.*"
else:
allFiles = "*"
fname, _ = QFileDialog.getOpenFileName(
self,
"Load Sample file",
"",
"Psuade Simple Files (*.smp);;CSV (Comma delimited) (*.csv);;All files (%s)"
% allFiles,
)
if len(fname) == 0: # Cancelled
combobox.setCurrentIndex(0)
combobox.blockSignals(False)
return
elif fname in self.sampleFiles:
index = self.sampleFiles.index(fname) + 1
combobox.setCurrentIndex(index)
table = self.cellWidget(currentRow, col_index["p2"])
table.setEnabled(True)
spinbox = table.cellWidget(0, 1)
spinbox.setMaximum(self.sampleNumInputs[index - 1])
else:
## try: # Full PSUADE format
## data = LocalExecutionModule.readSampleFromPsuadeFile(fname)
## inputData = data.getInputData()
## numInputs = data.getNumInputs()
## except:
dispFName = fname
try: # Simple format
if fname.endswith(".csv"):
data = LocalExecutionModule.readDataFromCsvFile(
fname, askForNumInputs=False
)
Common.initFolder(LocalExecutionModule.dname)
newFileName = (
LocalExecutionModule.dname
+ os.sep
+ os.path.basename(fname)[:-4]
+ ".smp"
)
LocalExecutionModule.writeSimpleFile(newFileName, data[0])
fname = newFileName
else:
data = LocalExecutionModule.readDataFromSimpleFile(fname)
inputData = data[0]
numInputs = inputData.shape[1]
except: # Invalid file
import traceback
traceback.print_exc()
msgbox = QMessageBox()
msgbox.setWindowTitle("UQ/Opt GUI Warning")
msgbox.setText(
"File format not recognized! File must be in PSUADE simple format."
)
msgbox.setIcon(QMessageBox.Warning)
msgbox.exec_()
combobox.setCurrentIndex(0)
combobox.blockSignals(False)
return
# File format good
self.sampleFiles.append(fname)
self.dispSampleFiles.append(os.path.basename(dispFName))
self.sampleNumInputs.append(numInputs)
index = len(self.sampleFiles)
combobox.setCurrentIndex(0) # Prevent calling twice with Browse...
combobox.insertItem(index, os.path.basename(dispFName))
combobox.setCurrentIndex(index)
for row in range(self.rowCount()):
if row != currentRow:
combo = self.cellWidget(row, col_index["p1"])
if combo is not None and isinstance(combo, QComboBox):
currentIndex = combo.currentIndex()
combo.setCurrentIndex(0)
combo.insertItem(index, os.path.basename(dispFName))
combo.setCurrentIndex(currentIndex)
# Set max index number
table = self.cellWidget(currentRow, col_index["p2"])
table.setEnabled(True)
spinbox = table.cellWidget(0, 1)
spinbox.setMaximum(numInputs)
elif text == "Select File":
table = self.cellWidget(currentRow, col_index["p2"])
table.setEnabled(False)
else: # File selected
index = combobox.currentIndex()
table = self.cellWidget(currentRow, col_index["p2"])
table.setEnabled(True)
spinbox = table.cellWidget(0, 1)
spinbox.setMaximum(self.sampleNumInputs[index - 1])
# Set index to next value if previous row has same file selected
if (
currentRow > 0
and self.cellWidget(currentRow - 1, col_index["pdf"]).currentText()
== "Sample"
):
if (
self.cellWidget(currentRow - 1, col_index["p1"]).currentIndex()
== index
):
prevRowTable = self.cellWidget(currentRow - 1, col_index["p2"])
prevRowSpinbox = prevRowTable.cellWidget(0, 1)
spinbox.setValue(prevRowSpinbox.value() + 1)
combobox.blockSignals(False)
self.pdfChanged.emit()
def clearMinMax(self, row):
col_index = self.col_index
# deactivate min/max
self.clearCell(row, col_index["min"])
self.clearCell(row, col_index["max"])
def activateMinMax(self, row, inVarNames):
col_index = self.col_index
# activate min/max
inVarName = self.item(row, col_index["name"])
k = inVarNames.index(inVarName.text())
self.activateCell(row, col_index["min"], self.format % self.lb[k])
self.activateCell(row, col_index["max"], self.format % self.ub[k])
def makeAllFixed(self):
self.setAllToType(1)
def makeAllVariable(self):
self.setAllToType(0)
def setAllToType(self, value):
numRows = self.rowCount()
for row in range(numRows):
combobox = self.cellWidget(row, self.col_index["type"])
combobox.setCurrentIndex(value)
def setCheckedToType(self, type):
QApplication.processEvents()
col_index = self.col_index
if isinstance(type, str): # String
if type not in self.typeItems:
raise Exception("setCheckedToType value is not among accepted values")
for r in range(self.rowCount()):
checkbox = self.cellWidget(r, col_index["check"])
combo = self.cellWidget(r, col_index["type"])
if checkbox.isChecked():
if isinstance(type, str): # String
combo.setCurrentIndex(self.typeItems.index(type))
else: # Integer index
combo.setCurrentIndex(type)
checkbox.setChecked(False)
checkbox.setEnabled(True)
QApplication.processEvents()
def getNumDesignVariables(self):
col_index = self.col_index
col = col_index["type"]
count = 0
for row in range(self.rowCount()):
combo = self.cellWidget(row, col)
if combo.currentText() == "Design":
count += 1
return count
def getNumVariables(self):
col_index = self.col_index
col = col_index["type"]
count = 0
for row in range(self.rowCount()):
combo = self.cellWidget(row, col)
if combo.currentText() == "Variable":
count += 1
return count
def getMins(self):
return self.lb
def getMaxs(self):
return self.ub
def getFixedVariables(self):
return self.getVariablesWithType("Fixed")
def getDesignVariables(self):
return self.getVariablesWithType("Design")
def getEpistemicVariables(self):
return self.getVariablesWithType("Epistemic")
def getPrimaryVariables(self):
return self.getVariablesWithType("Z1")
def getRecourseVariables(self):
return self.getVariablesWithType("Z2")
def getUQDiscreteVariables(self):
return self.getVariablesWithType("Z3")
def getContinuousVariables(self):
return self.getVariablesWithType("Z4")
def getVariablesWithType(self, typeString):
col_index = self.col_index
names = []
indices = []
if "type" in col_index:
col = col_index["type"]
for row in range(self.rowCount()):
combo = self.cellWidget(row, col)
if typeString in combo.currentText():
names.append(self.item(row, col_index["name"]).text())
indices.append(row)
return names, indices
def getShowInputList(self):
nInputs = self.rowCount()
col_index = self.col_index
showList = []
for i in range(nInputs):
chkbox = self.cellWidget(i, col_index["check"])
if chkbox is not None and chkbox.isEnabled() and chkbox.isChecked():
showList.append(i)
return showList
def getDistribution(self, row):
col_index = self.col_index
combobox = self.cellWidget(row, col_index["pdf"])
distName = combobox.currentText()
dtype = Distribution.getEnumValue(distName)
widget = self.cellWidget(row, col_index["p1"])
param1 = None
param2 = None
if widget:
if dtype == Distribution.SAMPLE: # file
param1 = self.sampleFiles[widget.currentIndex() - 1]
else:
param1 = float(widget.item(0, 1).text())
cellTable = self.cellWidget(row, col_index["p2"])
if cellTable:
if dtype == Distribution.SAMPLE:
param2 = cellTable.cellWidget(0, 1).value()
else:
param2 = float(cellTable.item(0, 1).text())
d = Distribution(dtype)
d.setParameterValues(param1, param2)
return d
@staticmethod
def isnumeric(str):
if len(str.strip()) == 0:
return False
try:
float(str)
return True
except ValueError:
return False
def checkValidInputs(self):
b = False
nInputs = self.rowCount()
col_index = self.col_index
for i in range(nInputs):
inputName = self.item(i, col_index["name"]).text()
type = "Variable"
if "type" in col_index:
combobox = self.cellWidget(i, col_index["type"])
type = combobox.currentText()
if type == "Variable" or "Z4" in type:
if "pdf" in col_index:
combobox = self.cellWidget(i, col_index["pdf"])
distName = combobox.currentText()
dtype = Distribution.getEnumValue(distName)
if self.mode == InputPriorTable.SIMSETUP:
xmin = self.item(i, col_index["min"])
xmax = self.item(i, col_index["max"])
if (
(xmin is not None)
and self.isnumeric(xmin.text())
and (xmax is not None)
and self.isnumeric(xmax.text())
):
minVal = float(xmin.text())
maxVal = float(xmax.text())
if minVal >= maxVal:
return (
False,
"Minimum value is not less than max value for %s!"
% inputName,
)
b = True
else:
return (
False,
"Min or max value for %s is not a number!" % inputName,
)
if dtype == Distribution.UNIFORM:
xmin = self.item(i, col_index["min"])
xmax = self.item(i, col_index["max"])
if (
(xmin is not None)
and self.isnumeric(xmin.text())
and (xmax is not None)
and self.isnumeric(xmax.text())
):
minVal = float(xmin.text())
maxVal = float(xmax.text())
if (
minVal >= self.ubVariable[i]
or maxVal <= self.lbVariable[i]
or minVal >= maxVal
):
return (
False,
"Minimum value is not less than max value for %s!"
% inputName,
)
b = True
else:
return (
False,
"Min or max value for %s is not a number!" % inputName,
)
elif dtype == Distribution.LOGNORMAL: # Lognormal mean less than 0
cellTable = self.cellWidget(i, col_index["p1"])
param1 = cellTable.item(0, 1)
if (param1 is not None) and self.isnumeric(param1.text()):
if float(param1.text()) < 0:
return (
False,
"Mean value for %s cannot be negative!" % inputName,
)
b = True
else:
return (
False,
"Mean value for %s is not a number!" % inputName,
)
elif dtype == Distribution.EXPONENTIAL:
cellTable = self.cellWidget(i, col_index["p1"])
param1 = cellTable.item(0, 1)
if (param1 is not None) and self.isnumeric(param1.text()):
b = True
else:
return (
False,
"Lambda value for %s is not a number!" % inputName,
)
elif (
dtype == Distribution.GAMMA
or dtype == Distribution.BETA
or dtype == Distribution.WEIBULL
): # Parameters less than 0
cellTable = self.cellWidget(i, col_index["p1"])
param1 = cellTable.item(0, 1)
cellTable = self.cellWidget(i, col_index["p2"])
param2 = cellTable.item(0, 1)
if (
(param1 is not None)
and self.isnumeric(param1.text())
and (param2 is not None)
and self.isnumeric(param2.text())
):
if float(param1.text()) < 0 or float(param2.text()) < 0:
return (
False,
"Distribution parameter value for %s cannot be negative!"
% inputName,
)
b = True
else:
return (
False,
"Distribution parameter value for %s is not a number!"
% inputName,
)
elif dtype == Distribution.SAMPLE:
combo = self.cellWidget(i, col_index["p1"])
text = combo.currentText()
if text == "Browse..." or text == "Select File":
return False, "No file selected for %s!" % inputName
b = True
else:
cellTable = self.cellWidget(i, col_index["p1"])
param1 = cellTable.item(0, 1) # param1 value
cellTable = self.cellWidget(i, col_index["p2"])
param2 = cellTable.item(0, 1) # param2 value
if (
(param1 is not None)
and self.isnumeric(param1.text())
and (param2 is not None)
and self.isnumeric(param2.text())
):
b = True
else:
return (
False,
"Distribution parameter value for %s is not a number!"
% inputName,
)
else:
b = True
elif type == "Fixed":
value = self.item(i, col_index["value"])
if value is not None and self.isnumeric(value.text()):
value = float(value.text())
b = True
else:
return False, "Fixed value for %s is not a number!" % inputName
else: # Design
b = True
return b, None
def getTableValues(self):
nInputs = self.rowCount()
col_index = self.col_index
values = [None] * nInputs
for i in range(nInputs):
inType = "Variable"
value = {}
if "name" in col_index:
item = self.item(i, col_index["name"])
value["name"] = item.text()
if "type" in col_index:
combobox = self.cellWidget(i, col_index["type"])
inType = combobox.currentText()
value["type"] = inType
if (
self.mode == InputPriorTable.RSANALYSIS and not self.epistemicMode
) or inType != "Fixed":
if (
"pdf" in col_index
and self.cellWidget(i, col_index["pdf"]).isEnabled()
):
combobox = self.cellWidget(i, col_index["pdf"])
distName = combobox.currentText()
dtype = Distribution.getEnumValue(distName)
xminText = self.item(i, col_index["min"]).text()
if xminText == self.format % self.lbVariable[i]:
xmin = self.lbVariable[i]
else:
xmin = float(xminText)
xmaxText = self.item(i, col_index["max"]).text()
if xmaxText == self.format % self.lbVariable[i]:
xmax = self.ubVariable[i]
else:
xmax = float(xmaxText)
widget = self.cellWidget(i, col_index["p1"])
if widget:
if dtype == Distribution.SAMPLE: # file
param1 = self.sampleFiles[widget.currentIndex() - 1]
else:
param1 = float(widget.item(0, 1).text())
cellTable = self.cellWidget(i, col_index["p2"])
if cellTable:
if dtype == Distribution.SAMPLE:
param2 = cellTable.cellWidget(0, 1).value()
else:
param2 = float(cellTable.item(0, 1).text())
else: # No pdf setting. Use default PDFs from data
if self.distVariable is None or len(self.distVariable) == 0:
dtype = Distribution.UNIFORM
param1 = None
param2 = None
else:
print(i)
dtype = self.distVariable[i].getDistributionType()
param1, param2 = self.distVariable[i].getParameterValues()
xmin = self.lbVariable[i]
xmax = self.ubVariable[i]
value.update({"pdf": dtype})
if dtype == Distribution.UNIFORM:
value.update(
{"param1": None, "param2": None, "min": xmin, "max": xmax}
)
elif dtype == Distribution.EXPONENTIAL:
value.update(
{"param1": param1, "param2": None, "min": None, "max": None}
)
elif dtype != None:
value.update(
{"param1": param1, "param2": param2, "min": None, "max": None}
)
fixedVal = self.item(i, col_index["value"])
if fixedVal.text() == "":
value["value"] = None
else:
value["value"] = float(fixedVal.text())
values[i] = value
return values
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from collections import namedtuple
import exhibitionist.log
from exhibitionist.providers.IProvider import IProvider
GET_WS_URL_ATTR = "get_ws_url"
logger = exhibitionist.log.getLogger(__name__)
def WSMsg(msg_type,payload=None,**kwds):
# messages from the client to the server
d=dict(msg_type=msg_type,payload=payload)
d.update(kwds)
return d
class WebSocketProvider(IProvider):
"""
"""
name = "websocket"
def __init__(self):
self.server = None
def register_with_server(self, server):
import exhibitionist.providers.websocket.handlers as handlers
self.server = server
server.add_handler(handlers) # register addon's HTTP http_handlers with server
def stop(self):
pass
def populate_context(self,context):
setattr(context,GET_WS_URL_ATTR,lambda : self.server.get_view_url("WebSocketEvents"))
# thread
def subscribe(self, h):
pass
def is_handler(self,h):
# no specific handlers for this provider
# could define a decorator and a predicate if needed
return False
|
from django.urls import path
from quiz_app.views.answer import CreateAnswer, UpdateAnswer
from quiz_app.views.question import question, QuestionDetail
from quiz_app.views.index import quiz_index, QuizDetail
app_name = 'quiz'
urlpatterns = [
path('<int:quiz_id>', quiz_index, name='index'),
path('<int:quiz_id>/question/<int:question_id>', question, name='question'),
path('<int:quiz_id>/question/detail/<int:question_id>/', question, name='question'),
path('<int:quiz_id>/question/<int:question_id>/edit', question, name='question'),
path('detail/<int:pk>/', QuizDetail.as_view(), name='quiz detail'),
path('question/<int:pk>/', QuestionDetail.as_view(), name='question update'),
path('answer/create', CreateAnswer.as_view(), name='create answer'),
path('answer/update/<int:pk>', UpdateAnswer.as_view(), name='update answer'),
]
|
# 迷宫的递归求解
dirs = [(0, 1), (1, 0), (0, -1), (-1, 0)] # 四个方向
def mark(maze, pos): # 给迷宫maze的位置表上pos表示到过了
maze[pos[0][pos[1]]] = 2
def passable(maze, pos):
return maze[pos[0][pos[1]]] == 0
def find_path(maze, pos, end):
mark(maze, pos)
if pos == end:
print('pos:', pos, end=' ')
return True
for i in range(4):
nexp = pos[0] + dirs[i][0], pos[1] + dirs[i][1]
if passable(maze, nexp):
if find_path(maze, nexp, end):
print(pos, end=' ')
return True
return False
# 迷宫的回溯求解
def print_path(end, pos, st):
print(pos)
for i in (0, st.depth() + 1):
print(i, end=' ')
from stack.char1 import SStack
def maze_solve(maze, start, end):
if start == end:
print(start)
return
st = SStack()
mark(maze, start)
st.push((start, 0))
while not st.is_empty():
pos, nxt = st.pop()
for i in range(0, 4):
nexp = pos[0] + dirs[i][0], pos[1] + dirs[i][1]
if nexp == end:
print_path(end, pos, st)
elif passable(maze, nexp):
st.push((pos, i + 1))
mark(maze, pos)
st.push((nexp, 0))
break
print('no path')
# 基于队列的迷宫求解
from stack.char5 import SQueue
def maze_solve_queue(maze, start, end):
if start == end:
print('path find')
return
qu = SQueue()
mark(maze, start)
qu.enqueue(start)
while not qu.is_empty():
pos = qu.dequeue()
for i in range(0, 4):
nexp = pos[0] + dirs[i][0], pos[1] + dirs[i][1]
if passable(maze, nextp):
if nexp == end:
print('find the path')
return
mark(maze, nexp)
qu.enqueue(nexp)
print('no path')
|
#Thesaurus App
import random
thesaurus = {
"hot" : ['balmy','summery','boiling'],
"cold" :['chilly','cool','frigid'],
"happy":['content','merry','cheery'],
"sad" : ['unhappy','glum','miserable']
}
print("Welcome to the Thesaurus\n\n Choose a word from below and I will give you a synonym.\n\nHere are the words in the thesaurus: ")
for k in thesaurus.keys():
print("\t-" + k )
choice = input("\nWhat word would you like a synonym for? ").lower().strip()
if choice in thesaurus.keys():
index = random.randint(0,2)
print("\nA synonym for " + choice + " is " + thesaurus[choice][index])
else:
print("\nI'm sorry that word is not currently in the thesaurus")
choice = input("\nWould you like to see the whole thesaurus? ").lower().strip()
if choice.startswith('y'):
for k,v in thesaurus.items():
print("\n" + k.title())
for value in v:
print("\t-" + value)
else:
print("Have a good day")
|
#!/usr/bin/python
## @package bmp183
# Module for handling Bosch BMP183 pressure sensor
import logging
import time
import numpy
import threading
NaN = float('nan')
## Class for Bosch BMP183 pressure and temperature sensor with SPI interface as sold by Adafruit
class bmp183(threading.Thread):
## @var BMP183_REG
# BMP183 registers
BMP183_REG = {
#@ Calibration data
'CAL_AC1': 0xAA,
'CAL_AC2': 0xAC,
'CAL_AC3': 0xAE,
'CAL_AC4': 0xB0,
'CAL_AC5': 0xB2,
'CAL_AC6': 0xB4,
'CAL_B1': 0xB6,
'CAL_B2': 0xB8,
'CAL_MB': 0xBA,
'CAL_MC': 0xBC,
'CAL_MD': 0xBE,
#@ Chip ID. Value fixed to 0x55. Useful to check if communication works
'ID': 0xD0,
'ID_VALUE': 0x55,
#@ VER Undocumented
'VER': 0xD1,
#@ SOFT_RESET Write only. If set to 0xB6, will perform the same sequence as power on reset.
'SOFT_RESET': 0xE0,
#@ CTRL_MEAS Controls measurements
'CTRL_MEAS': 0xF4,
#@ DATA
'DATA': 0xF6,
}
## @var BMP183_CMD
# BMP183 commands
BMP183_CMD = {
#@ Chip ID Value fixed to 0x55. Useful to check if communication works
'ID_VALUE': 0x55,
# SPI bit to indicate READ or WRITE operation
'READWRITE': 0x80,
# Read TEMPERATURE, Wait time 4.5 ms
'TEMP': 0x2E,
'TEMP_WAIT': 0.0045,
# Read PRESSURE
'PRESS': 0x34, # 001
# PRESSURE reading modes
# Example usage: (PRESS || (OVERSAMPLE_2 << 4)
'OVERSAMPLE_0': 0x0, # ultra low power, no oversampling, wait time 4.5 ms
'OVERSAMPLE_0_WAIT': 0.0045,
'OVERSAMPLE_1': 0x1, # standard, 2 internal samples, wait time 7.5 ms
'OVERSAMPLE_1_WAIT': 0.0075,
'OVERSAMPLE_2': 0x2, # high resolution, 4 internal samples, wait time 13.5 ms
'OVERSAMPLE_2_WAIT': 0.0135,
'OVERSAMPLE_3': 0x3, # ultra high resolution, 8 internal samples, Wait time 25.5 ms
'OVERSAMPLE_3_WAIT': 0.0255,
}
## The constructor
# @param self The python object self
# @param simulate Decides if bmp183 runs in simulation mode or real device mode.
def __init__(self, simulate=False):
# Run init for super class
super(bmp183, self).__init__()
# self.rp = occ.rp
## @var l
# Handle to system logger
self.l = logging.getLogger('system')
self.l.debug("[BMP] Initialising..")
## @var simulate
# Stores simulate parameter from constructor call
self.simulate = simulate
## @var sensor_ready
# Set to the constructor sets it to True after succesfull handshake with the sensor. False otherwise.
self.sensor_ready = False
## @var running
# Variable controlling the main sensor handling loop. Setting it to False stops the loop.
self.running = False
self.first_run = True
## @var measurement_delay
# Time between measurements in [s]
self.measurement_delay = 0.45
## @var temperature
# Measured temperature
self.temperature = 0
## @var temperature_max_delta
# Maximum allowed temperature difference between measurements. Normally temperature doesn't change too quickly
# so a sudden change means the measurement if invalid. It a new temperature value differs from the previous velu more than
# temperature_max_delta the measurement is ignored.
self.temperature_max_delta = 10
## @var pressure
# Measured pressure
self.pressure = 0
self.pressure_unfiltered = 0
# Setup Raspberry PINS, as numbered on BOARD
self.SCK = 15 # GPIO for SCK, other name SCLK
self.SDO = 13 # GPIO for SDO, other name MISO
self.SDI = 11 # GPIO for SDI, other name MOSI
self.CS = 7 # GPIO for CS, other name CE
# SCK frequency 1 MHz
self.delay = 1 / 1000000.0
if not self.simulate:
self.set_up_gpio()
# Check comunication / read ID
ret = self.read_byte(self.BMP183_REG['ID'])
if ret != self.BMP183_CMD['ID_VALUE']:
self.l.error("[BMP] Communication with bmp183 failed")
self.sensor_ready = False
raise IOError("Communication with bmp183 failed")
else:
self.sensor_ready = True
self.read_calibration_data()
# Proceed with initial pressure/temperature measurement
self.measure_pressure()
self.kalman_setup()
self.l.debug("[BMP] Initialised.")
def get_data(self):
r = dict(pressure=self.pressure, temperature=self.temperature)
return r
def stop(self):
self.l.debug("[BMP] Stopping {}".format(__name__))
self.running = False
time.sleep(1)
if not self.simulate:
self.cleanup_gpio()
self.l.debug("[BMP] Stopped {}".format(__name__))
def __del__(self):
self.l.debug("[BMP] __del__")
self.stop()
def set_up_gpio(self):
import RPi.GPIO as GPIO
self.l.debug("[BMP] set_up_gpio")
# GPIO initialisation
GPIO.setmode(GPIO.BOARD)
GPIO.setup(self.SCK, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(self.CS, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(self.SDI, GPIO.OUT)
GPIO.setup(self.SDO, GPIO.IN)
def cleanup_gpio(self):
import RPi.GPIO as GPIO
self.l.debug("[BMP] cleanup_gpio")
GPIO.cleanup(self.SCK)
GPIO.cleanup(self.CS)
GPIO.cleanup(self.SDI)
GPIO.cleanup(self.SDO)
def read_byte(self, addr):
# Read byte from SPI interface from address "addr"
ret_value = self.spi_transfer(addr, 0, 1, 8)
return ret_value
def read_word(self, addr, extra_bits=0):
# Read word from SPI interface from address "addr", option to extend
# read by up to 3 bits
ret_value = self.spi_transfer(addr, 0, 1, 16 + extra_bits)
return ret_value
def write_byte(self, addr, value):
# Write byte of "value" to SPI interface at address "addr"
self.spi_transfer(addr, value, 0, 8)
def spi_transfer(self, addr, value, rw, length):
import RPi.GPIO as GPIO
# Bit banging at address "addr", "rw" indicates READ (1) or WRITE (1)
# operation
ret_value = 0
if (rw == 0):
spi_addr = addr & (~self.BMP183_CMD['READWRITE'])
else:
spi_addr = addr | self.BMP183_CMD['READWRITE']
GPIO.output(self.CS, 0)
time.sleep(self.delay)
for i in range(8):
bit = spi_addr & (0x01 << (7 - i))
if (bit):
GPIO.output(self.SDI, 1)
else:
GPIO.output(self.SDI, 0)
GPIO.output(self.SCK, 0)
time.sleep(self.delay)
GPIO.output(self.SCK, 1)
time.sleep(self.delay)
if (rw == 1):
for i in range(length):
GPIO.output(self.SCK, 0)
time.sleep(self.delay)
bit = GPIO.input(self.SDO)
GPIO.output(self.SCK, 1)
ret_value = (ret_value << 1) | bit
time.sleep(self.delay)
if (rw == 0):
for i in range(length):
bit = value & (0x01 << (length - 1 - i))
if (bit):
GPIO.output(self.SDI, 1)
else:
GPIO.output(self.SDI, 0)
GPIO.output(self.SCK, 0)
time.sleep(self.delay)
GPIO.output(self.SCK, 1)
time.sleep(self.delay)
GPIO.output(self.CS, 1)
return ret_value
def read_calibration_data(self):
self.l.debug("[BMP] read_calibration_data")
# Read calibration data
self.AC1 = numpy.int16(self.read_word(self.BMP183_REG['CAL_AC1']))
self.AC2 = numpy.int16(self.read_word(self.BMP183_REG['CAL_AC2']))
self.AC3 = numpy.int16(self.read_word(self.BMP183_REG['CAL_AC3']))
self.AC4 = numpy.uint16(self.read_word(self.BMP183_REG['CAL_AC4']))
self.AC5 = numpy.uint16(self.read_word(self.BMP183_REG['CAL_AC5']))
self.AC6 = numpy.uint16(self.read_word(self.BMP183_REG['CAL_AC6']))
self.B1 = numpy.int16(self.read_word(self.BMP183_REG['CAL_B1']))
self.B2 = numpy.int16(self.read_word(self.BMP183_REG['CAL_B2']))
# MB is not used
self.MB = numpy.int16(self.read_word(self.BMP183_REG['CAL_MB']))
self.MC = numpy.int16(self.read_word(self.BMP183_REG['CAL_MC']))
self.MD = numpy.int16(self.read_word(self.BMP183_REG['CAL_MD']))
def measure_temperature(self):
# Start temperature measurement
self.write_byte(self.BMP183_REG['CTRL_MEAS'], self.BMP183_CMD['TEMP'])
# Wait
time.sleep(self.BMP183_CMD['TEMP_WAIT'])
# Read uncompensated temperature
self.UT = numpy.int32(self.read_word(self.BMP183_REG['DATA']))
self.calculate_temperature()
def measure_pressure(self):
if self.simulate:
self.pressure_unfiltered = 101300
self.temperature = 19.8
elif self.sensor_ready:
# Measure temperature - required for calculations
self.measure_temperature()
self.write_byte(self.BMP183_REG['CTRL_MEAS'], self.BMP183_CMD[
'PRESS'] | (self.BMP183_CMD['OVERSAMPLE_3'] << 4))
# Wait for conversion
time.sleep(self.BMP183_CMD['OVERSAMPLE_3_WAIT'])
self.UP = numpy.int32(self.read_word(self.BMP183_REG['DATA'], 3))
self.calculate_pressure()
def calculate_pressure(self):
# Calculate atmospheric pressure in [Pa]
self.B6 = self.B5 - 4000
X1 = (self.B2 * (self.B6 * self.B6 / 2 ** 12)) / 2 ** 11
X2 = self.AC2 * self.B6 / 2 ** 11
X3 = X1 + X2
self.B3 = (
((self.AC1 * 4 + X3) << self.BMP183_CMD['OVERSAMPLE_3']) + 2) / 4
X1 = self.AC3 * self.B6 / 2 ** 13
X2 = (self.B1 * (self.B6 * self.B6 / 2 ** 12)) / 2 ** 16
X3 = ((X1 + X2) + 2) / 2 ** 2
self.B4 = numpy.uint32(self.AC4 * (X3 + 32768) / 2 ** 15)
self.B7 = (numpy.uint32(self.UP) - self.B3) * \
(50000 >> self.BMP183_CMD['OVERSAMPLE_3'])
p = numpy.uint32((self.B7 * 2) / self.B4)
X1 = (p / 2 ** 8) * (p / 2 ** 8)
X1 = int(X1 * 3038) / 2 ** 16
X2 = int(-7357 * p) / 2 ** 16
self.pressure_unfiltered = p + (X1 + X2 + 3791) / 2 ** 4
def calculate_temperature(self):
# Calculate temperature in [degC]
X1 = (self.UT - self.AC6) * self.AC5 / 2 ** 15
X2 = self.MC * 2 ** 11 / (X1 + self.MD)
self.B5 = X1 + X2
self.T = (self.B5 + 8) / 2 ** 4
temperature = self.T / 10.0
if not self.first_run:
dtemperature = abs(temperature - self.temperature)
else:
dtemperature = 0
self.first_run = False
if dtemperature < self.temperature_max_delta:
self.temperature = temperature
def run(self):
self.l.debug("[BMP] run")
self.running = True
while self.running:
self.measure_pressure()
self.kalman_update()
self.l.debug(
"[BMP] pressure = {} Pa, temperature = {} degC".format(self.pressure, self.temperature))
time.sleep(self.measurement_delay)
def kalman_setup(self):
# FIXME Add detailed comments
# FIXME that will depend on max descend/ascend speed. calculate from max +/- 1.5m/s
# R makes no difference, R/Q is what matters
# P and K are self tuning
self.Q = 0.02
# First estimate
self.pressure_estimate = self.pressure_unfiltered
# Error
self.P = 0.245657137142
# First previous estimate
self.pressure_estimate_previous = self.pressure_unfiltered
# First previous error
self.P_previous = 0.325657137142
# First gain
self.K = 0.245657137142
# Estimate of measurement variance, sensor noise
self.R = 1.0
def kalman_update(self):
# FIXME Add detailed commants
z = self.pressure_unfiltered
# Save previous value
self.pressure_estimate_previous = self.pressure_estimate
# Save previous error
self.P_previous = self.P + self.Q
# Calculate current gain
self.K = self.P_previous / (self.P_previous + self.R)
# Calculate new estimate
self.pressure_estimate = self.pressure_estimate_previous + \
self.K * (z - self.pressure_estimate_previous)
# Calculate new error estimate
self.P = (1 - self.K) * self.P_previous
self.pressure = self.pressure_estimate
|
# -*- coding:utf-8 -*-
# __author__ = 'gupan'
import pickle
def sayhi(name):
print("my name is,", name)
with open("test.txt", "rb") as f_r:
data = pickle.load(f_r)
data["func"](data["name"]) |
#!/usr/bin/python3
"""returns the number of subscribers"""
import json
import requests
def number_of_subscribers(subreddit):
"""returns the number of subscribers"""
url = 'https://www.reddit.com/r/{}/about.json'.format(subreddit)
headers = {
'User-Agent': 'My User Agent 1.0'
}
response = requests.get(url, headers=headers, allow_redirects=False)
l = response.json()
if response.status_code == 200:
for key, value in l.items():
if key == "data":
for i, j in value.items():
if i == 'subscribers':
return (j)
else:
return(0)
|
# Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models from Locked-image text Tuning.
See paper https://arxiv.org/abs/2111.07991
"""
import dataclasses
import os
from typing import Optional, Tuple
import flax.linen as nn
import jax.numpy as jnp
import ml_collections
from vit_jax import checkpoint
from vit_jax import models_vit
from vit_jax import preprocess
from flaxformer.architectures.bert import bert
from flaxformer.architectures.bert import configs
BASE_PATH = 'gs://vit_models/lit'
class BertModel(nn.Module):
"""BERT encoder with linear projection on last layer CLS token."""
config: str
num_classes: Optional[int] = None
@nn.compact
def __call__(self, tokens):
out = {}
batch_size, max_len = tokens.shape
bert_model = bert.BertEncoder(**dataclasses.asdict({
'base': configs.BertBaseConfig(),
'large': configs.BertLargeConfig(),
}[self.config]))
x = out['transformed'] = bert_model(
token_ids=tokens,
position_ids=jnp.tile(
jnp.arange(0, max_len, dtype=jnp.int32), [batch_size, 1]),
segment_ids=jnp.zeros([batch_size, max_len], dtype=jnp.int32),
input_mask=tokens.astype(jnp.bool_).astype(jnp.int32),
enable_dropout=False,
)
x = out['pre_logits'] = x[:, 0] # CLS token
if self.num_classes:
x = out['logits'] = nn.Dense(self.num_classes, name='head')(x)
return x, out
class TextTransformer(nn.Module):
"""Simple text transformer."""
num_classes: int
width: int = 512
num_layers: int = 12
mlp_dim: int = 2048
num_heads: int = 8
dropout_rate: float = 0.0
vocab_size: int = 32_000
@nn.compact
def __call__(self, x):
out = {}
embedding = nn.Embed(num_embeddings=self.vocab_size, features=self.width)
x = out['embedded'] = embedding(x)
# Add posemb
n, l, d = x.shape # pylint: disable=unused-variable
x = x + self.param('pos_embedding',
nn.initializers.normal(stddev=1 / jnp.sqrt(d)),
(1, l, d), x.dtype)
x = models_vit.Encoder(
num_layers=self.num_layers,
mlp_dim=self.mlp_dim,
num_heads=self.num_heads,
dropout_rate=self.dropout_rate,
attention_dropout_rate=0,
add_position_embedding=False)(
x, train=False)
x = out['pre_logits'] = x[:, -1, :] # note that we take *last* token
x = out['logits'] = nn.Dense(self.num_classes, name='head')(x)
return x, out
class LitModel(nn.Module):
"""Locked-image text Tuning model.
See paper https://arxiv.org/abs/2111.07991
For examples, refer to Colab
https://colab.research.google.com/github/google-research/vision_transformer/blob/main/lit.ipynb
Attributes:
image: Configuration for ViT image tower.
text: Configuration for text tower.
pp: Preprocessing configuration.
out_dim: Size of optional image/text heads that are added to the towers.
model_name: Refers to the key in `model_configs.MODEL_CONFIGS`.
"""
image: ml_collections.ConfigDict
text_model: str
text: ml_collections.ConfigDict
pp: ml_collections.ConfigDict
out_dim: Tuple[Optional[int], Optional[int]]
model_name: str
def load_variables(self, path=None, cache=True):
"""Loads variables.
Args:
path: Path to load params from. If not specified, then the parms will be
loaded from the default public Cloud storage path, unless they exist in
the current working directory.
cache: If set to `True` and `path` is not specified (the default), then
the files will be copied from Cloud and stored in the current working
directory.
Returns:
The module variables, to be used with `model.apply()`
"""
if path is None:
local_path = f'{self.model_name}.npz'
if not os.path.exists(local_path):
path = f'{BASE_PATH}/{self.model_name}.npz'
print('Loading params from cloud:', path)
if cache:
checkpoint.copy(path, local_path)
if os.path.exists(local_path):
print('\n⚠️ Reusing local copy:', local_path)
path = local_path
return {'params': checkpoint.load(path)}
@property
def vocab_path(self):
ext = {
'bert': 'txt',
'sentencepiece': 'model',
}[self.pp.tokenizer_name]
return f'{BASE_PATH}/{self.model_name}.{ext}'
def get_pp(self, crop=False):
"""Returns a preprocessing function suitable for `tf.data.Dataset.map()`."""
return preprocess.get_pp(
tokenizer_name=self.pp.tokenizer_name,
vocab_path=self.vocab_path,
max_len=self.pp.max_len,
size=self.pp.size,
crop=crop)
def get_tokenizer(self):
"""Returns a tokenizer."""
return preprocess.get_tokenizer(self.pp.tokenizer_name)(
vocab_path=self.vocab_path,
max_len=self.pp.max_len)
def get_image_preprocessing(self, crop=False):
"""Returns a function to pre-process images (resize, value range)."""
return preprocess.PreprocessImages(size=self.pp.size, crop=crop)
@nn.compact
def __call__(self, *, images=None, tokens=None):
"""Embeds images and/or tokens.
Args:
images: Batch of images, prepared with the function returned by
`get_image_preprocessing()` or `get_pp()`.
tokens: Batch of tokens, prepared with the function returned by
`get_tokenizer()` or `get_pp()`.
Returns:
A tuple of `(zimg, ztxt, out)`, where `zimg` is a batch of embeddings for
the images (or `None`, if images were not specified), `ztxt` is a batch
of embeddings for the tokens (or `None`, if tokens were not specified),
and `out` is a dictionary of additional values, such as `out['t']` that
is the temperature multiplied with the vector dot products before the
softmax is applied.
"""
# Support calling without text or without images, for example for few-shot.
ztxt, zimg = None, None
out = {}
out_dims = self.out_dim
if isinstance(out_dims, int):
out_dims = (out_dims, out_dims)
if tokens is not None:
# Embed the text:
model_class = {
'bert': BertModel,
'text_transformer': TextTransformer,
}[self.text_model]
text_model = model_class(
**{
'num_classes': out_dims[1],
**(self.text or {})
}, name='txt')
ztxt, out_txt = text_model(tokens)
for k, v in out_txt.items():
out[f'txt/{k}'] = v
# Normalize the embeddings the models give us.
out['txt/norm'] = jnp.linalg.norm(ztxt, axis=1, keepdims=True)
out['txt/normalized'] = ztxt = ztxt / (out['txt/norm'] + 1e-8)
if images is not None:
image_model = models_vit.VisionTransformer(
**{
**self.image,
'num_classes': out_dims[0],
}, name='img') # pylint: disable=not-a-mapping
zimg = image_model(images, train=False)
# Normalize the embeddings the models give us.
out['img/norm'] = jnp.linalg.norm(zimg, axis=1, keepdims=True)
out['img/normalized'] = zimg = zimg / (out['img/norm'] + 1e-8)
t = self.param('t', nn.initializers.zeros, (1,), jnp.float32)
out['t'] = jnp.exp(t)
return zimg, ztxt, out
|
from django import forms
class Showdata(forms.Form):
price1 = forms.IntegerField()
price2= forms.IntegerField()
mcost = forms.CharField()
Mileage = forms.CharField()
engine= forms.CharField()
seat_capacity = forms.IntegerField()
# class update_data(forms.Form):
# mname = forms.CharField()
# mnumber = forms.IntegerField()
# mcost = forms.IntegerField()
#<div class="jumbotron text-center"> <h1>{{data}}</h1></div> |
import sys
sys.stdin = open("D4_8822_input.txt", "r")
T = int(input())
for test_case in range(T):
N, X = map(int, input().split())
print("#{} {}".format(test_case + 1, 0 if X == (1 or 2 * N - 1) else 1)) |
#If there are multiple pages on the website we will scrape through all of them and grab the data that we need. We will do this in the script below. In the previous script we grabbed the base page an grabbed the data out of it in this script we ill build intelligence to get the page range dynamically and crawl through them.
#The trick here is to understand how the url changes as we browse through the pages. So what we did was find out the baseurl for the first page and then loop through the rest of them.
#By inspecting the web page it was gathered that page 1 had a url of 'http://pyclass.com/real-estate/rock-springs-wy/LCWYROCKSPRINGS/t=0&s=0.html', with the 0.html incrementing by 10. So as a rudementary first step we created a foor loop with a base url of 'http://pyclass.com/real-estate/rock-springs-wy/LCWYROCKSPRINGS/t=0&s=' with the 0.html increasing as 10.html, 20.html etc.
import requests
import re
from bs4 import BeautifulSoup
import pandas
l=[]
base_url='http://pyclass.com/real-estate/rock-springs-wy/LCWYROCKSPRINGS/t=0&s='
#So now what we have is a loop that loop through the pages using the base url as a starting point and then adds the range in intervals of 10 as the subsequent pages. This loop grabs the propertyRow class and allows us to extract the data that we need which will be fed into a list of dicts and then into a pandas df.
for item in range(0,30,10):
r=requests.get(base_url+str(item)+'.html', headers={'User-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'})
c=r.content
soup=BeautifulSoup(c,'html.parser')
all=soup.find_all("div",{"class":"propertyRow"})
#Within that we have a nested for loop which loops through all of the propertyRow which were gathered in the for loop above.
for item in all:
d={}
name = str(item.find_all("span",{"class","propAddressCollapse"})).replace('[<span class="propAddressCollapse" title="','').replace('</span>, <span class="propAddressCollapse">','').replace('</span>]','')
name = (re.sub(r'.*>', '>', name))
d['Name']=name.replace('>','')
d['Price']=str(item.find("h4",{"class":"propPrice"})).replace('\n','').replace('<h4 class="propPrice">','').replace(' ','').replace('<spanclass="IconPropertyFavorite16"></span></h4>','')
try:
d['Beds']=str(item.find('span',{'class','infoBed'})).replace('<span class="infoBed"><b>','').replace('</b> Beds','').replace('</span>','')
except:
d['Beds']=None
try:
d['Sq Feet']=str(item.find('span',{'class','infoSqFt'})).replace('<span class="infoSqFt"><b>','').replace('</b> Sq. Ft','').replace('</span>','')
except:
d['Sq Feet']=None
try:
d['Full Baths']=str(item.find('span',{'class','infoValueFullBath'})).replace('<span class="infoValueFullBath"><b>','').replace('</b> Full Baths','').replace('</span>','')
except:
d['Full Baths']=None
try:
d['Half Baths']=str(item.find('span',{'class','infoValueHalfBath'})).replace('<span class="infoValueHalfBath"><b>','').replace('</b> Half Bath','').replace('</span>','')
except:
d['Half Baths']=None
for column_group in item.find_all('div',{'class':'columnGroup'}):
for feature_group, feature_name in zip(column_group.find_all('span',{'class':'featureGroup'}),column_group.find_all('span',{'class':'featureName'})):
if 'Lot Size' in feature_group.text:
d['Lot Size']=feature_name.text
else:
pass
l.append(d)
df=pandas.DataFrame(l)
df.to_csv('output.csv')
|
#!/usr/bin/python
import argparse
import glob
import os
import re
import shutil
import sys
import MySQLdb
import MySQLdb.cursors
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_PATH = os.path.normpath(os.path.join(BASE_DIR, '..', 'orb_api'))
if PROJECT_PATH not in sys.path:
sys.path.append(PROJECT_PATH)
MOODLE_BACKUP_DIR = "/home/alex/data/backup/aws/dc/courses/"
OUTPUT_DIR_BASE = "/home/alex/data/Digital-Campus/content-dev/share/"
IMAGE_DIR_BASE = "/home/alex/data/Digital-Campus/content-dev/images/"
MPOWERING_DEFAULT_TAGS = ["Digital Campus",
"Course",
"Community Health Worker",
"Midwife",
"Nurse",
"Africa",
"Global",
"Ethiopia",
"English",
"Laptop",
"Smartphone",
"Tablet",
"Creative Commons 2.0 (CC-BY-NC-SA)"]
DEBUG = True
from api import orb_api, orb_resource, orb_resource_file, orb_resource_url, ORBAPIResourceExistsException
def run(orb_url, orb_username, orb_key, db_name, db_user, db_passwd, update_files):
api = orb_api()
api.base_url = orb_url
api.user_name = orb_username
api.api_key = orb_key
api.verbose_output = DEBUG
db = MySQLdb.connect(host="localhost",
user=db_user,
passwd=db_passwd,
db=db_name)
db.autocommit(True)
# you must create a Cursor object. It will let
# you execute all the queries you need
cur = db.cursor(MySQLdb.cursors.DictCursor)
if update_files == 'True':
'''
Update with the most recent Moodle backup versions
'''
cur.execute("SELECT f.id, course_id, f.description, file, moodle_course_id, type, short_name, c.location_code FROM dc_file f INNER JOIN dc_course c ON c.id = f.course_id")
for row in cur.fetchall():
course_backups = glob.glob(MOODLE_BACKUP_DIR+'backup-moodle2-course-'+str(row['moodle_course_id'])+'-*.mbz')
course_backups.sort()
file_name = course_backups[len(course_backups)-1]
file_date = re.findall(r'[0-9]{8}', file_name)
old_course_backups = glob.glob(os.path.join(OUTPUT_DIR_BASE, row['location_code'], 'moodle-backups', row['short_name'] + '-' + row['type'] + '*.mbz'))
for old_file in old_course_backups:
if DEBUG:
print "Removing: " + old_file
os.remove(old_file)
new_file_name = row['short_name'] + "-" + row['type'] + "-" + file_date[0] +".mbz"
if DEBUG:
print "Copying over: " + new_file_name
shutil.copy2(file_name, os.path.join(OUTPUT_DIR_BASE, row['location_code'], 'moodle-backups', new_file_name))
#cur2 = db.cursor(MySQLdb.cursors.DictCursor)
cur.execute("""UPDATE dc_file SET file = '%s' WHERE id = %s """ % (new_file_name, int(row['id'])))
'''
Publish updates to the mPowering
'''
cur.execute("""SELECT id, title, description, icon, tags, location_code, study_hours FROM dc_course WHERE mpowering = 1 """)
additional_desc = "<p>This course is part of the Ethiopia Federal Ministry of Health approved upgrade training program for Health Extension Workers.</p>"
# print all the first cell of all the rows
for row in cur.fetchall() :
resource = orb_resource()
resource.title = row['title'].decode('utf-8')
resource.description = row['description'].decode('utf-8') + additional_desc
if row['study_hours'] != None:
resource.study_time_number = row['study_hours']
resource.study_time_unit = 'hours'
try:
resource.id = api.add_resource(resource)
except ORBAPIResourceExistsException, e:
if DEBUG:
print e.message + ", id no:" + str(e.pk)
resource.id = e.pk
# upate the resource
api.update_resource(resource)
if row['icon']:
api.add_or_update_resource_image(resource.id, os.path.join(IMAGE_DIR_BASE, row['icon']))
# get the resource id
resource_from_api = api.get_resource(resource)
# remove all ResourceFiles
api.delete_resource_files(resource_from_api['files'])
# remove all ResourceURLs
api.delete_resource_urls(resource_from_api['urls'])
# remove all tags for resource
api.delete_resource_tags(resource_from_api['tags'])
# add all the default tags
for tag in MPOWERING_DEFAULT_TAGS:
api.add_resource_tag(resource.id, tag.strip())
# add resource specific tags
if row['tags']:
tag_list = [x.strip() for x in row['tags'].split(',')]
for tag in tag_list:
api.add_resource_tag(resource.id, tag.strip())
# add the files
cur.execute("""SELECT * FROM dc_file WHERE course_id = %s """ % (row['id']))
for file in cur.fetchall():
resource_file = orb_resource_file()
resource_file.title = file['title']
resource_file.description = file['description']
resource_file.order_by = file['orderby']
resource_file.file = os.path.join(OUTPUT_DIR_BASE, row['location_code'], 'moodle-backups', file['file'])
api.add_resource_file(resource.id,resource_file)
# add the urls
cur.execute("""SELECT * FROM dc_url WHERE course_id = %s """ % (row['id']))
for url in cur.fetchall():
resource_url = orb_resource_url()
resource_url.title = url['title']
resource_url.description = url['description']
resource_url.order_by = url['orderby']
resource_url.url = url['url']
api.add_resource_url(resource.id,resource_url)
cur.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("orb_url", help="ORB url")
parser.add_argument("orb_username", help="ORB User Name")
parser.add_argument("orb_key", help="ORB API Key")
parser.add_argument("db_name", help="Database name")
parser.add_argument("db_user", help="Database user name")
parser.add_argument("db_passwd", help="Database password")
parser.add_argument("update_files", help="Update files True/False")
args = parser.parse_args()
run(args.orb_url, args.orb_username, args.orb_key, args.db_name, args.db_user, args.db_passwd, args.update_files) |
#! /usr/bin/env python
import os
import sys
import json
import time
import argparse
import re
from oslo_utils import uuidutils
from subprocess import Popen, PIPE, check_output
from distutils.spawn import find_executable
def parse_list_output(output):
"""Parse the output of list commands (like `openstack project list`)."""
lines = output.splitlines()
keys = filter(None, lines[1].split('|'))
keys = [x.lower().strip() for x in keys]
r = []
for line in lines[3:-1]:
if len(line.split()) <= 1:
continue
values = filter(None, line.split('|'))
values = [x.strip() for x in values]
assert len(keys) == len(values)
record = dict(zip(keys, values))
r.append(record)
return r
def parse_output(output):
"""
Parse the output of commands (like `nova boot`) that print in tabular form.
"""
lines = output.splitlines()[3:-1]
r = {}
for line in lines:
kv = filter(None, line.split('|'))
kv = [x.strip() for x in kv]
r.update({kv[0]: kv[1]})
return r
def get(list_of_dict, key, value):
"""
Returns the dictionary in a list of dictionaries that has the value of
'key' as 'value'.
"""
return filter(lambda dictionary: dictionary[key] == value, list_of_dict)
def main():
command="neutron net-list".split()
output=check_output(command)
list_of_networks=parse_list_output(output)
for network in list_of_networks:
network_name=network['name'].split("_", 3)[1]
network_id=network['subnets'].split()[1]
#sample_entry: domain=<domain>
entry="domain=%s.<domain>,%s\n" % (network_name, network_id)
if ".stg" in network_name:
with open("/etc/neutron/dnsmasq-neutron.conf", "a") as myfile:
myfile.write(entry)
return 0
if __name__ == "__main__":
sys.exit(main())
|
__author__ = 'Sean'
from TicTacToe import *
from time import time
from tkinter import *
class TTTgui:
def __init__(self, depth = 9):
self.temp = 0
self.game = None
self.player2 = None
self.buttons = [[None for i in range(0,3)] for j in range(0,3)]
self.depth = depth
def play(self):
print("Setting up game...\n")
alpha = -1
beta = 1
self.game = TicTacToe()
self.player2 = AdversarialSearch(self.game.copy(),"O", self.depth)
window = Tk()
window.rowconfigure((0,3), weight=1)
window.columnconfigure((0,2), weight=1)
for i in range(0,3):
for j in range(0,3):
print(str(i))
b = Button(window, text = "", pady = 2, width = 5, height = 5, command = lambda a=i,b=j,: self.takeTurn(a,b))
b.grid(row = i, column = j)
print(self.buttons)
self.buttons[i][j] = b
buttonR = Button(window, text = "RESET", width = 15, height = 5, command =self.reset)
buttonR.grid(row = 3, column = 0, columnspan = 3)
window.mainloop()
def reset(self):
self.game = TicTacToe()
alpha = -1
beta = 1
self.player2 = AdversarialSearch(self.game.copy(),"O", self.depth)
for i in range(0,3):
for j in range(0,3):
self.buttons[i][j]["text"] = ""
def takeTurn(self, y, x):
if self.game.setPiece(x,y,"X") == -1:
return
self.buttons[y][x]["text"] = "X"
print(self.game)
if self.game.isGameOver() != " ":
status = self.game.isGameOver()
if status == "TIE":
print("TIE GAME")
else:
print(status + " WINS")
return
self.player2.state = self.game.copy()
(t,(x2,y2)) = self.player2.minValue(self.player2.state)
self.game.setPiece(x2,y2,"O")
self.buttons[y2][x2]["text"] = "O"
print(self.game)
if self.game.isGameOver() != " ":
status = self.game.isGameOver()
if status == "TIE":
print("TIE GAME")
else:
print(status + " WINS")
return
class TTTgame:
def __init__(self, none = 0):
self.temp = 0
def play(self):
print("Setting up game...\n")
game = TicTacToe()
player1 = AdversarialSearch(game.copy(),"X")
player2 = AdversarialSearch(game.copy(),"O")
while game.isGameOver() == " ":
print("Player 1 is deciding\n")
player1.state = game.copy()
(t,(x1,y1)) = player1.maxValue(player1.state)
game.setPiece(x1,y1,"X")
print(game)
if game.isGameOver() != " ":
break
print("Player 2 is deciding\n")
player2.state = game.copy()
(t,(x2,y2)) = player2.minValue(player2.state)
game.setPiece(x2,y2,"O")
print(game)
status = game.isGameOver()
if status == "TIE":
print("TIE GAME")
else:
print(status + " WINS")
class TTTABgame:
def __init__(self, none = 0):
self.temp = 0
def play(self):
print("Setting up game...\n")
game = TicTacToe()
alpha = -1
beta = 1
player1 = AlphaBetaSearch(game.copy(),"X",alpha,beta)
player2 = AlphaBetaSearch(game.copy(),"O",alpha,beta)
while game.isGameOver() == " ":
print("Player 1 is deciding\n")
player1.state = game.copy()
(t,(x1,y1)) = player1.maxValueAB(player1.state,alpha,beta)
game.setPiece(x1,y1,"X")
print(game)
if game.isGameOver() != " ":
break
print("Player 2 is deciding\n")
player2.state = game.copy()
(t,(x2,y2)) = player2.minValueAB(player2.state,alpha,beta)
game.setPiece(x2,y2,"O")
print(game)
status = game.isGameOver()
if status == "TIE":
print("TIE GAME")
else:
print(status + " WINS")
class TTTDLgame:
def __init__(self, depth = 9):
self.depth = depth
def play(self):
print("Setting up game...\n")
game = TicTacToe()
player1 = AdversarialSearch(game.copy(),"X", self.depth)
player2 = AdversarialSearch(game.copy(),"O", self.depth)
while game.isGameOver() == " ":
print("Player 1 is deciding\n")
player1.state = game.copy()
(t,(x1,y1)) = player1.maxValue(player1.state)
game.setPiece(x1,y1,"X")
print(game)
if game.isGameOver() != " ":
break
print("Player 2 is deciding\n")
player2.state = game.copy()
(t,(x2,y2)) = player2.minValue(player2.state)
game.setPiece(x2,y2,"O")
print(game)
status = game.isGameOver()
if status == "TIE":
print("TIE GAME")
else:
print(status + " WINS")
g = TTTgame()
h = TTTABgame()
i = TTTDLgame(2)
p = TTTgui()
p.play()
t0 = time()
#g.play()
t1 = time()
t2 = time()
#h.play()
t3 = time()
t4 = time()
#i.play()
t5 = time()
print("reg elapsed: ",t1-t0)
print("AB elapsed: ",t3-t2)
print("DL elapsed: ",t5-t4) |
"""Logging model and view defined"""
from flask import redirect, url_for, request
from flask_admin.contrib import sqla
from flask_sqlalchemy import SQLAlchemy
from flask_login import current_user
db = SQLAlchemy()
class Logging(db.Model):
"""defines the Logging model with its attributes to log app requests"""
id = db.Column(db.Integer, primary_key=True)
request = db.Column(db.String(200), nullable=False)
null_api = db.Column(db.String(200), nullable=False)
def __init__(self, api_request, null_api):
self.request = api_request
self.null_api = null_api
class LoggingView(sqla.ModelView):
"""defines the logging ModelView with its attributes and methods"""
can_delete = False # disable model deletion
can_create = False
can_edit = False
column_searchable_list = ['request']
column_filters = ['null_api']
def is_accessible(self):
"""returns if user should have access or not to Logging View
according to his role and if he is authenticated"""
if current_user.is_authenticated and current_user.role == "admin":
return True
return False
def inaccessible_callback(self, name, **kwargs):
"""defines what should be done if user doesn't have access to Logging View"""
# redirect to login page if user doesn't have access
return redirect(url_for('admin.index', next=request.url))
|
# set of nodes with operations
from LinkedListPKG.Node import Node;
class LinkedList(object):
def __init__(self): #method to create a linked list
self.head = None; #make an empty linked list
self.counter = 0; #therefore the counter is set to 0
def remove(self, data):
self.counter -= 1;
pass
def insertStart(self, data):
self.counter += 1;
newNode = Node(data);
if not self.head: #if this is the first item on the linked list
self.head = newNode;
else:
newNode.nextNode = self.head;
self.head = newNode;
def insertEnd(self, data):
self.counter += 1;
pass
def traverse(list): #to find out or read items in the linked list
actualNode = self.head;
while actualNode is not None:
print("%d " %actualNode.data);
actualNode = actualNode.nextNode;
def size(self):
return self.counter;
pass |
import itertools
RANKS = "23456789TJQKA"
SUITS = "hsdc"
SUIT_PAIRS = list(itertools.combinations(SUITS, 2)) |
import logging
import pandas as pd
from tigeropen.common.consts import BarPeriod
from lib.date import date_delta, get_today
from lib.quant import alpha_beta
from tiger.config import get_bars_from_cache, get_quote_client
"""
波动率计算
https://blog.csdn.net/CoderPai/article/details/82868280
beta 计算
https://blog.csdn.net/thfyshz/article/details/83443783
贝塔系数衡量了个股或基金相对于整个股市的波动情况。
β范围 含义
β=1 股票或基金的风险收益率与市场平均风险收益率相同
β>1 股票或基金的风险相较于市场平均更大
β<1 股票或基金的风险相较于市场平均更小
"""
logging.basicConfig(format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', level=logging.INFO)
def alpha_beta_plot(data: pd.DataFrame, stocks: []):
"""
Alpha, Beta 展示
:param data: 数据
:param stocks: 股票
:return:
"""
spy_data = data.loc[(data["symbol"] == 'SPY')]
qqq_data = data.loc[(data["symbol"] == 'QQQ')]
return_qqq = list(qqq_data['close'].pct_change().dropna())
return_spy = list(spy_data['close'].pct_change().dropna())
for stock in stocks:
stock_data = data.loc[(data["symbol"] == stock)]
return_stock = list(stock_data['close'].pct_change().dropna())
# 以SPY为基准计算alpha, beta
alpha_spy, beta_spy = alpha_beta(return_spy, return_stock)
alpha_qqq, beta_qqq = alpha_beta(return_qqq, return_stock)
logging.info('SPY basics %s alpha: %s, beta: %s', stock, str(alpha_spy), str(beta_spy))
logging.info('QQQ basics %s alpha: %s, beta: %s', stock, str(alpha_qqq), str(beta_qqq))
if __name__ == '__main__':
quote_client = get_quote_client()
stocks = ['QQQ', 'SPY', 'TLT', 'USO', 'IAU']
data = get_bars_from_cache(quote_client, symbols=stocks, period=BarPeriod.DAY,
begin_time=date_delta(-52 * 8), end_time=get_today())
alpha_beta_plot(data, stocks)
|
from contextlib import contextmanager
from typing import Generator, List
from unittest import mock
from prefect.utilities.dockerutils import ImageBuilder
@contextmanager
def capture_builders() -> Generator[List[ImageBuilder], None, None]:
"""Captures any instances of ImageBuilder created while this context is active"""
builders = []
original_init = ImageBuilder.__init__
def capture(self, *args, **kwargs):
builders.append(self)
original_init(self, *args, **kwargs)
with mock.patch.object(ImageBuilder, "__init__", capture):
yield builders
|
"""
Apply tagging process
@author: Praveen Chandar
"""
from ctgov.load_data import load_data
from ctgov.utility.log import strd_logger
from multiprocessing import Process
import ctgov.index.es_index as es_index
from ctgov.concept_mapping.tagger import Tagger
import argparse
import sys
import math
log = strd_logger('tag-miner')
def nct_tagging(index_name, host, port_no, process_ids,
stopwords, umls, pos, nprocs=1):
# open the clinical trail ids file to process
nct_ids = []
for line in open(process_ids, 'rb'):
nct_ids.append(line.strip())
# Check if index exists
index = es_index.ElasticSearch_Index(index_name, host=host, port=port_no)
index.add_field('ec_tags_umls', term_vector=True)
# Get clinical
# process each clinical trial and store to XML file
log.info('processing clinical trials')
procs = []
chunksize = int(math.ceil(len(nct_ids) / float(nprocs)))
for i in xrange(nprocs):
p = Process(target=_worker, args=(nct_ids[chunksize * i:chunksize * (i + 1)],
index_name, host, port_no,
stopwords, umls, pos, (i + 1)))
procs.append(p)
p.start()
for p in procs:
p.join()
# private functions
# processer worker
# indexer function
def _worker(nct, index_name, host, port_no, stopwords, umls, pos, npr):
index = es_index.ElasticSearch_Index(index_name, host=host, port=port_no)
tagger = Tagger(5, stopwords, umls, pos)
# Iterate over NCT trials
for i in xrange(1, len(nct) + 1):
nctid = nct[i - 1]
# if i == 3:
#break
# if nctid != 'NCT00000331':
# continue
if i % 500 == 0:
log.info(' --- core %d: processed %d documents' % (npr, i))
# Get document from the Elastic Search Index
doc = index.get_trail(nctid)
if not doc.has_key('ec_raw_text'):
continue
ec = doc['ec_raw_text']
if ec is None:
continue
(pec, jpec) = tagger.process(ec)
dictlist = []
for key, value in jpec.iteritems():
for i in range(value):
dictlist.append(key)
doc['ec_tags_umls'] = dictlist
print nctid, dictlist
# Index the new document
index.index_trial(nctid, doc)
# Main Function
# processing the command line options
def _process_args():
parser = argparse.ArgumentParser(description='Download and Process Clinical Trials')
# index name
parser.add_argument('-index_name', default='ctgov', help='name of the elastic search index')
# host name
parser.add_argument('-host', default='localhost', help='Elastic search hostname')
# port number
parser.add_argument('-port', default='9200', help='Elastic search port number')
# ids file
parser.add_argument('-process_ids', help='file containing clinical ids to process')
# stop word file
parser.add_argument('-w', default=None, help='stop word directory (default: None)')
# umls directory
parser.add_argument('-u', default=None, help='umls directory (default: None)')
# pos tags
parser.add_argument('-p', default=None, help='part-of-speech admitted tag file (default: None)')
# number of processers to use
parser.add_argument('-c', default=1, type=int, help='number of processors (default: 1)')
return parser.parse_args(sys.argv[1:])
if __name__ == '__main__':
args = _process_args()
edata = load_data(args.w, args.u, args.p)
nct_tagging(args.index_name, args.host, int(args.port),
args.process_ids, edata[0], edata[1], edata[2], args.c)
log.info('task completed\n') |
#!/usr/bin/env python
import game
import gameio
r = 4 ;
l = 4;
boardsList = game.allocBoard(r, l);
mineBoard = boardsList[0];
statusBoard = boardsList[1];
mineBoard[0][0] = -1;
mineBoard[0][3] = -1;
print("initial board: ")
gameio.displayBoard(mineBoard, statusBoard);
game.uncoverLoc(mineBoard, statusBoard, 2, 2);
print("uncovering Loction(2,2):")
gameio.displayBoard(mineBoard, statusBoard);
|
import time
import datetime
import copy
import numpy as np
class Station:
def __init__(self, name, state):
self.name = name
self.state = {'state': '', 'since': datetime.datetime.now(),
'booked': {
'Alarm': 0.0,
'Producing': 0.0,
'Waiting': 0.0,
'Off': 0.0
},
'current': {
'Alarm': 0.0,
'Producing': 0.0,
'Waiting': 0.0,
'Off': 0.0
}
}
if state['Alarm']:
self.state['state'] = 'Alarm'
elif state['Producing']:
self.state['state'] = 'Producing'
elif state['Alive']:
self.state['state'] = 'Waiting'
else:
self.state['state'] = 'Off'
def new_status(self, state):
elapsed_time = (state[5] - self.state['since']).total_seconds()
self.book(self.state, elapsed_time)
if state[4]:
self.state['state'] = 'Alarm'
elif state[3]:
self.state['state'] = 'Producing'
elif state[2]:
self.state['state'] = 'Waiting'
else:
self.state['state'] = 'Off'
self.state['since'] = state[5]
def book(self, state, elapsed_time):
self.state['booked'][self.state['state']] += elapsed_time
def get_current_duration(self):
return (datetime.datetime.now() - self.state['since']).total_seconds()
def get_portion(self):
self.state['current']['Alarm'] = 0.0
self.state['current']['Producing'] = 0.0
self.state['current']['Waiting'] = 0.0
self.state['current']['Off'] = 0.0
self.state['current'][self.state['state']] = self.get_current_duration()
portion = [self.state['booked']['Alarm']+self.state['current']['Alarm'],
self.state['booked']['Producing']+self.state['current']['Producing'],
self.state['booked']['Waiting']+self.state['current']['Waiting'],
self.state['booked']['Off']+self.state['current']['Off']
]
print(portion)
return portion
class Conv_2(Station):
def __init__(self, d):
self.name = 'Bansystem DD02'
self.d = copy.deepcopy(d)
self.d['blue'][2] = 'LjusfyrBla'
self.d['green'][2] = 'LjusfyrGron'
self.d['yellow'][2] = 'LjusfyrGul'
self.d['white'][2] = 'LjusfyrVit'
self.d['stop'][2] = ''
self.d['do_blue'][2] = ''
self.d['do_green'][2] = 'LjusfyrGronBlink'
self.d['do_yellow'][2] = 'LjusfyrGulBlink'
self.d['do_white'][2] = ''
self.d['do_stop'][2] = ''
super(Conv_2, self).__init__(self.d, self.name)
|
#!/usr/bin/python
import rospy
import sys
import struct
import argparse
import time
import numpy
import traceback
from sensor_msgs.msg import Image
from snark.imaging import cv_image
import cv2
import rosbag
help_description='listen to a ros topic for raw image and write it in cv format to stdout'
help_example="""
example
read from published ros topic and output image
ros-to-image /camera/rgb/image_color | cv-cat "timestamp;view;null"
read from rosbags and output image
ros-to-image /camera/rgb/image_color /log/*.bag | cv-cat "timestamp;view;null"
"""
parser=argparse.ArgumentParser(description=help_description,epilog=help_example,formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('topic',help='topic to listen to')
parser.add_argument('bags', metavar='BAGFILE', nargs='*', help='read messages from bag files')
parser.add_argument('--flush',help='flush stdout after each image frame write',action='store_true')
args=parser.parse_args()
encoding_map={
'rgb8': cv2.CV_8UC3,
'rgba8': cv2.CV_8UC4,
"rgb16": cv2.CV_16UC3,
"rgba16": cv2.CV_16UC4,
"bgr8": cv2.CV_8UC3,
"bgra8": cv2.CV_8UC4,
"bgr16": cv2.CV_16UC3,
"bgra16": cv2.CV_16UC4,
"mono8": cv2.CV_8UC1,
"mono16": cv2.CV_16UC1,
"8UC1": cv2.CV_8UC1,
"8UC2": cv2.CV_8UC2,
"8UC3": cv2.CV_8UC3,
"8UC4": cv2.CV_8UC4,
"8SC1": cv2.CV_8SC1,
"8SC2": cv2.CV_8SC2,
"8SC3": cv2.CV_8SC3,
"8SC4": cv2.CV_8SC4,
"16UC1": cv2.CV_16UC1,
"16UC2": cv2.CV_16UC2,
"16UC3": cv2.CV_16UC3,
"16UC4": cv2.CV_16UC4,
"16SC1": cv2.CV_16SC1,
"16SC2": cv2.CV_16SC2,
"16SC3": cv2.CV_16SC3,
"16SC4": cv2.CV_16SC4,
"32SC1": cv2.CV_32SC1,
"32SC2": cv2.CV_32SC2,
"32SC3": cv2.CV_32SC3,
"32SC4": cv2.CV_32SC4,
"32FC1": cv2.CV_32FC1,
"32FC2": cv2.CV_32FC2,
"32FC3": cv2.CV_32FC3,
"32FC4": cv2.CV_32FC4,
"64FC1": cv2.CV_64FC1,
"64FC2": cv2.CV_64FC2,
"64FC3": cv2.CV_64FC3,
"64FC4": cv2.CV_64FC4,
"bayer_rggb8": cv2.CV_8UC4,
"bayer_bggr8": cv2.CV_8UC4,
"bayer_gbrg8": cv2.CV_8UC4,
"bayer_grbg8": cv2.CV_8UC4,
"bayer_rggb16": cv2.CV_16UC4,
"bayer_bggr16": cv2.CV_16UC4,
"bayer_gbrg16": cv2.CV_16UC4,
"bayer_grbg16": cv2.CV_16UC4
}
shutdown=False
def message_to_image(msg):
global shutdown
global args
try:
header=numpy.zeros((),dtype=cv_image.image.header_dtype)
t=msg.header.stamp
header['time']=t.secs*1000000+t.nsecs/1000
header['rows']=msg.height
header['cols']=msg.width
header['type']=encoding_map[msg.encoding]
header.tofile(sys.stdout)
#t=msg.header.stamp
#ts=struct.pack('q',t.secs*1000000+t.nsecs/1000)
#sys.stdout.write(ts)
#sys.stdout.write(struct.pack('I',msg.height))
#sys.stdout.write(struct.pack('I',msg.width))
#sys.stdout.write(struct.pack('I',encoding_map[msg.encoding]))
sys.stdout.write(msg.data)
if args.flush:
sys.stdout.flush()
except SystemExit:
shutdown=True
except:
shutdown=True
sys.stderr.write("exception!\n")
traceback.print_exc()
if len(args.bags):
for bagfile in args.bags:
with rosbag.Bag(bagfile) as bag:
for topic, msg, t in bag.read_messages(topics=args.topic):
if shutdown: exit(0)
message_to_image(msg)
else:
rospy.init_node('listener',anonymous=True,disable_signals=True)
subscription = rospy.Subscriber(args.topic, Image, message_to_image)
while not shutdown:
time.sleep(0.001)
rospy.signal_shutdown('shutdown')
|
# coding:utf8
"""
传统机器视觉方式寻找文档中的文本区域
"""
import cv2
import numpy as np
def preprocess(gray):
# 1. Sobel算子,x方向求梯度
sobel = cv2.Sobel(gray, cv2.CV_8U, 1, 0, ksize=3)
# 2. 二值化
ret, binary = cv2.threshold(sobel, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
# 3. 膨胀和腐蚀操作的核函数
element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (30, 9))
element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (24, 6))
# 4. 膨胀一次,让轮廓突出
dilation = cv2.dilate(binary, element2, iterations=1)
# 5. 腐蚀一次,去掉细节,如表格线等。注意这里去掉的是竖直的线
erosion = cv2.erode(dilation, element1, iterations=1)
# 6. 再次膨胀,让轮廓明显一些
dilation2 = cv2.dilate(erosion, element2, iterations=3)
# 7. 存储中间图片
# cv2.imwrite("binary.png", binary)
# cv2.imwrite("dilation.png", dilation)
# cv2.imwrite("erosion.png", erosion)
# cv2.imwrite("dilation2.png", dilation2)
return dilation2
def findTextRegion(img):
region = []
# 1. 查找轮廓
contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# 2. 筛选那些面积小的
for i in range(len(contours)):
cnt = contours[i]
# 计算该轮廓的面积
area = cv2.contourArea(cnt)
# 面积小的都筛选掉
if (area < 1000):
continue
# 轮廓近似,作用很小
epsilon = 0.001 * cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
# 找到最小的矩形,该矩形可能有方向
rect = cv2.minAreaRect(cnt)
print
"rect is: "
print
rect
# box是四个点的坐标
box = cv2.boxPoints(rect)
box = np.int0(box)
# 计算高和宽
height = abs(box[0][1] - box[2][1])
width = abs(box[0][0] - box[2][0])
# 筛选那些太细的矩形,留下扁的
if (height > width * 1.2):
continue
region.append(box)
return region
def detect(img):
# 1. 转化成灰度图
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 2. 形态学变换的预处理,得到可以查找矩形的图片
dilation = preprocess(gray)
# 3. 查找和筛选文字区域
region = findTextRegion(dilation)
# 4. 用绿线画出这些找到的轮廓
for box in region:
cv2.drawContours(img, [box], 0, (0, 255, 0), 2)
cv2.namedWindow("img", cv2.WINDOW_NORMAL)
cv2.imshow("img", img)
# 带轮廓的图片
cv2.imwrite("contours.png", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def find_line(img_path):
img_o = cv2.imread(img_path)
(bg_h, bg_w, _) = img_o.shape
img_l = np.zeros((bg_h, bg_w, 1), np.uint8)
img_p = np.zeros((bg_h, bg_w, 1), np.uint8)
img = cv2.imread(img_path, 0)
minLineLength = 100
maxLineGap = 50
# 1
kernel = np.ones((3, 3), np.uint8)
t_img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel, iterations=5)
lines = cv2.HoughLinesP(t_img, 1, np.pi / 180, 200, minLineLength, maxLineGap)
lines1 = lines[:, 0, :]
for x1, y1, x2, y2 in lines1[:]:
# cv2.line(img_o, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.line(img_p, (x1, y1), (x2, y2), 255, 2)
cv2.imshow("est", img_p)
cv2.waitKey(0)
# 2
kernel = np.ones((3, 3), np.uint8)
t_img = cv2.morphologyEx(img_p, cv2.MORPH_OPEN, kernel, iterations=5)
lines = cv2.HoughLinesP(t_img, 1, np.pi / 180, 200, minLineLength, maxLineGap)
img_p = np.zeros((bg_h, bg_w, 1), np.uint8)
lines1 = lines[:, 0, :]
for x1, y1, x2, y2 in lines1[:]:
# cv2.line(img_o, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.line(img_p, (x1, y1), (x2, y2), 255, 2)
cv2.imshow("est", img_p)
cv2.waitKey(0)
t_img = cv2.GaussianBlur(img_p, (3, 3), 0)
edges = cv2.Canny(t_img, 50, 200)
cv2.imshow("est", edges)
cv2.waitKey(0)
lines = cv2.HoughLines(edges, 1, np.pi / 180, 150)
lines1 = lines[:, 0, :]
for rho, theta in lines1[:]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 10000 * (-b))
y1 = int(y0 + 10000 * (a))
x2 = int(x0 - 10000 * (-b))
y2 = int(y0 - 10000 * (a))
cv2.line(img_o, (x1, y1), (x2, y2), (255, 0, 0), 2)
cv2.line(img_l, (x1, y1), (x2, y2), 255, 2)
cv2.imshow("est", img_l)
cv2.waitKey(0)
kernel = np.ones((5, 5), np.uint8)
img_l = cv2.morphologyEx(img_l, cv2.MORPH_CLOSE, kernel, iterations=5)
cv2.imshow("est", img_l)
cv2.waitKey(0)
# contours, hier = cv2.findContours(img_p, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# for c in contours:
# # find minimum area
# rect = cv2.minAreaRect(c)
# # calculate coordinates of the minimum area rectangle
# box = cv2.boxPoints(rect)
# # normalize coordinates to integers
# box = np.int0(box)
# # draw contours
# draw_img3 = cv2.drawContours(img_o.copy(), [box], 0, (0, 0, 255), 3)
# cv2.imshow("est", draw_img3)
# cv2.waitKey(0)
# draw_img0 = cv2.drawContours(img_o.copy(), contours, 0, (0, 255, 255), 1)
# cv2.imshow("est", draw_img0)
# cv2.waitKey(0)
# draw_img1 = cv2.drawContours(img_o.copy(), contours, 1, (255, 0, 255), 1)
# cv2.imshow("est", draw_img1)
# cv2.waitKey(0)
# draw_img2 = cv2.drawContours(img_o.copy(), contours, 2, (255, 255, 0), 3)
# cv2.imshow("est", draw_img2)
# cv2.waitKey(0)
# draw_img3 = cv2.drawContours(img_o.copy(), contours, -1, (0, 0, 255), 1)
# cv2.imshow("est", draw_img3)
cv2.waitKey(0)
# cv2.imwrite('111.jpg', draw_img3)
if __name__ == '__main__':
# find_line('./hed/out/bJW2weiuLJ_ORIGINAL.jpg')
# 读取文件
img = cv2.imread('3BA9E58C76.jpg')
detect(img)
|
# from __future__ import unicode_literals
# from django.db import models
# from django.conf import settings
# from django.db.models import ForeignKey
# from django.db.models.fields import (
# AutoField,
# CharField,
# IntegerField,
# DecimalField,
# BinaryField,
# DateTimeField
# )
#
#
#
# class Property(models.Model):
# class Meta:
# app_label = 'rpg'
# db_table = 'Properties'
#
# id = AutoField(primary_key=True)
# label = CharField(null=True, blank=True, max_length=1000)
# date_created = DateTimeField(auto_now=True) |
#!/usr/bin/env python
from distutils.core import setup
setup(name='gitli',
version='0.4',
description='Simple issue management for git',
long_description=
'''
gitli is a simple git extension to manage issues in single-developer projects.
The issues are stored in the current branch of the git repository. gitli is
**not** a distributed issue tracker so merges need to be done by hand for now.
To use gitli, simply type ``git li init``, then ``git li new 'issue title'``,
and ``git li list``.
Go to the `gitli homepage <https://github.com/bartdag/gitli>`_ to read the
documentation.
The script does not attempt to prevent goofs and error messages can make
children cry.
''',
author='Barthelemy Dagenais',
author_email='barthe@users.sourceforge.net',
license='BSD License',
url='https://github.com/bartdag/gitli',
py_modules=['gitli'],
scripts=['git-li'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Topic :: Software Development :: Bug Tracking',
],
)
|
from helper_code import find_challenge_files,load_header,load_recording,get_leads
import os
import sys
import numpy as np
import scipy as sp
import scipy.io
from shutil import rmtree
from multiprocessing import Pool
def extract_leads_wfdb_custom(src_path,dst_path,leads):
reduced_leads = leads
num_reduced_leads = len(reduced_leads)
full_header_files, full_recording_files = find_challenge_files(src_path)
if os.path.isdir(dst_path):
rmtree(dst_path)
if not os.path.isdir(dst_path):
os.mkdir(dst_path)
src_paths = [src_path for filename in full_header_files]
dst_paths = [dst_path for filename in full_header_files]
reduced_leadss = [reduced_leads for filename in full_header_files]
num_reduced_leadss = [num_reduced_leads for filename in full_header_files]
with Pool() as pool:
pool.starmap(resave_one, zip(full_header_files,full_recording_files,src_paths,dst_paths,reduced_leadss,num_reduced_leadss))
def resave_one(full_header_file,full_recording_file,src_path,dst_path,reduced_leads,num_reduced_leads):
# Load full header and recording.
full_header = load_header(full_header_file)
full_recording = load_recording(full_recording_file, 'val')
# Get full lead names from header and check that are reduced leads are available.
full_leads = get_leads(full_header)
num_full_leads = len(full_leads)
if np.shape(full_recording)[0] != num_full_leads:
print('The signal file {} is malformed: the dimensions of the signal file are inconsistent with the header file {}.'.format(full_recording_file, full_header_file))
sys.exit()
unavailable_leads = [lead for lead in reduced_leads if lead not in full_leads]
if unavailable_leads:
print('The lead(s) {} are not available in the header file {}.'.format(', '.join(unavailable_leads), full_header_file))
sys.exit()
# Create the reduced lead header and recording files.
head, tail = os.path.split(full_header_file)
reduced_header_file = os.path.join(dst_path, tail)
head, tail = os.path.split(full_recording_file)
reduced_recording_file = os.path.join(dst_path, tail)
root, extension = os.path.splitext(tail)
recording_id = root
# Initialize outputs.
full_lines = full_header.split('\n')
reduced_lines = list()
# For the first line, we need to update the recording number and the number of leads.
entries = full_lines[0].split()
entries[0] = recording_id
entries[1] = str(num_reduced_leads)
reduced_lines.append(' '.join(entries))
# For the next lines that describe the leads, we need to update the signal filename.
reduced_indices = list()
for lead in reduced_leads:
i = full_leads.index(lead)
reduced_indices.append(i)
entries = full_lines[i+1].split()
entries[0] = recording_id
reduced_lines.append(' '.join(entries))
# For the remaining lines that describe the other data, we do not need to update anything.
for i in range(num_full_leads+1, len(full_lines)):
entries = full_lines[i].split()
reduced_lines.append(' '.join(entries))
# Save the reduced lead header and recording files.
with open(reduced_header_file, 'w') as f:
f.write('\n'.join(reduced_lines))
reduced_recording = full_recording[reduced_indices, :]
d = {'val': reduced_recording}
sp.io.savemat(reduced_recording_file, d)
|
##############################################################################
# (c) Crown copyright Met Office. All rights reserved.
# For further details please refer to the file COPYRIGHT
# which you should have received as part of this distribution
##############################################################################
"""
Exercise the 'repository' module.
"""
import filecmp
from pathlib import Path
from subprocess import run, Popen
import shutil
import signal
import time
from typing import List, Tuple
from pytest import fixture, mark, raises # type: ignore
from _pytest.tmpdir import TempPathFactory # type: ignore
from fab import FabException
from fab.repository import repository_from_url, GitRepo, SubversionRepo
def _tree_compare(first: Path, second: Path) -> None:
"""
Compare two file trees to ensure they are identical.
"""
tree_comparison = filecmp.dircmp(str(first), str(second))
assert len(tree_comparison.left_only) == 0 \
and len(tree_comparison.right_only) == 0
_, mismatch, errors = filecmp.cmpfiles(str(first), str(second),
tree_comparison.common_files,
shallow=False)
assert len(mismatch) == 0 and len(errors) == 0
class TestSubversion:
"""
Tests of the Subversion repository interface.
"""
@fixture(scope='class')
def repo(self, tmp_path_factory: TempPathFactory) -> Tuple[Path, Path]:
"""
Set up a repository and return its path along with the path of the
original file tree.
"""
repo_path = tmp_path_factory.mktemp('repo', numbered=True)
command = ['svnadmin', 'create', str(repo_path)]
assert run(command).returncode == 0
tree_path = tmp_path_factory.mktemp('tree', numbered=True)
(tree_path / 'alpha').write_text("First file")
(tree_path / 'beta').mkdir()
(tree_path / 'beta' / 'gamma').write_text("Second file")
command = ['svn', 'import', '-m', "Initial import",
str(tree_path), f'file://{repo_path}/trunk']
assert run(command).returncode == 0
return repo_path, tree_path
def test_extract_from_file(self, repo: Tuple[Path, Path], tmp_path: Path):
"""
Checks that a source tree can be extracted from a Subversion
repository stored on disc.
"""
test_unit = SubversionRepo(f'file://{repo[0]}/trunk')
test_unit.extract(tmp_path)
_tree_compare(repo[1], tmp_path)
assert not (tmp_path / '.svn').exists()
def test_extract_from_svn(self, repo: Tuple[Path, Path], tmp_path: Path):
"""
Checks that a source tree can be extracted from a Subversion
repository accessed through its own protocol.
"""
command: List[str] = ['svnserve', '-r', str(repo[0]), '-X']
process = Popen(command)
test_unit = SubversionRepo('svn://localhost/trunk')
#
# It seems there can be a delay between the server starting and the
# listen socket opening. Thus we have a number of retries.
#
# TODO: Is there a better solution such that we don't try to connect
# until the socket is open?
#
for retry in range(3, 0, -1):
try:
test_unit.extract(tmp_path)
except FabException as ex:
if range == 0:
raise ex
time.sleep(1.0)
else:
break
_tree_compare(repo[1], tmp_path)
assert not (tmp_path / '.svn').exists()
process.wait(timeout=1)
assert process.returncode == 0
@mark.skip(reason="Too hard to test at the moment.")
def test_extract_from_http(self, repo: Tuple[Path, Path], tmp_path: Path):
"""
Checks that a source tree can be extracted from a Subversion
repository accessed through HTTP.
TODO: This is hard to test without a full Apache installation. For the
moment we forgo the test on the basis that it's too hard.
"""
pass
class TestGit:
"""
Tests of the Git repository interface.
"""
@fixture(scope='class')
def repo(self, tmp_path_factory: TempPathFactory) -> Tuple[Path, Path]:
"""
Set up a repository and return its path along with the path of the
original file tree.
"""
tree_path = tmp_path_factory.mktemp('tree', numbered=True)
(tree_path / 'alpha').write_text("First file")
(tree_path / 'beta').mkdir()
(tree_path / 'beta' / 'gamma').write_text("Second file")
repo_path = tmp_path_factory.mktemp('repo', numbered=True)
command = ['git', 'init', str(repo_path)]
assert run(command).returncode == 0
#
# We have to configure this information or the forthcoming commands
# will fail.
#
command = ['git', 'config', 'user.name', 'Testing Tester Tests']
assert run(command, cwd=str(repo_path)).returncode == 0
command = ['git', 'config', 'user.email', 'tester@example.com']
assert run(command, cwd=str(repo_path)).returncode == 0
for file_object in tree_path.glob('*'):
if file_object.is_dir():
shutil.copytree(str(file_object),
str(repo_path / file_object.name))
else:
shutil.copy(str(file_object),
str(repo_path / file_object.name))
command = ['git', 'add', '-A']
assert run(command, cwd=str(repo_path)).returncode == 0
command = ['git', 'commit', '-m', "Initial import"]
assert run(command, cwd=str(repo_path)).returncode == 0
return repo_path.absolute(), tree_path.absolute()
def test_extract_from_file(self, repo: Tuple[Path, Path], tmp_path: Path):
"""
Tests that a source tree can be extracted from a local repository.
"""
test_unit = GitRepo(f'file://{repo[0]}')
test_unit.extract(tmp_path)
_tree_compare(repo[1], tmp_path)
assert not (tmp_path / '.git').exists()
def test_missing_repo(self, tmp_path: Path):
"""
Tests that an error is returned if the repository is not there.
"""
fake_repo = tmp_path / "nonsuch.repo"
fake_repo.mkdir()
test_unit = GitRepo(f'file://{fake_repo}')
with raises(FabException) as ex:
test_unit.extract(tmp_path / 'working')
expected = "Fault exporting tree from Git repository:"
assert str(ex.value).startswith(expected)
@mark.skip(reason="The daemon doesn't seem to be installed.")
def test_extract_from_git(self, repo: Tuple[Path, Path], tmp_path: Path):
"""
Checks that a source tree can be extracted from a Git repository
accessed through its own protocol.
"""
command: List[str] = ['git', 'daemon', '--reuseaddr',
'--base-path='+str(repo[0].parent),
str(repo[0])]
process = Popen(command)
test_unit = GitRepo('git://localhost/'+repo[0].name)
test_unit.extract(tmp_path)
_tree_compare(repo[1], tmp_path)
assert not (tmp_path / '.git').exists()
process.send_signal(signal.SIGTERM)
process.wait(timeout=2)
assert process.returncode == -15
@mark.skip(reason="Too hard to test at the moment.")
def test_extract_from_http(self, repo: Tuple[Path, Path], tmp_path: Path):
"""
Checks that a source tree can be extracted from a Git repository
accessed through HTTP.
TODO: This is hard to test without a full Apache installation. For the
moment we forgo the test on the basis that it's too hard.
"""
pass
class TestRepoFromURL:
"""
Tests that a URL can be converted into the correct Repository object.
"""
@fixture(scope='class',
params=[
{'access_url': 'git://example.com/git',
'repo_class': GitRepo,
'repo_url': 'git://example.com/git'},
{'access_url': 'git+file:///tmp/git',
'repo_class': GitRepo,
'repo_url': 'file:///tmp/git'},
{'access_url': 'git+git://example.com/git',
'repo_class': GitRepo,
'repo_url': 'git://example.com/git'},
{'access_url': 'git+http://example.com/git',
'repo_class': GitRepo,
'repo_url': 'http://example.com/git'},
{'access_url': 'svn://example.com/svn',
'repo_class': SubversionRepo,
'repo_url': 'svn://example.com/svn'},
{'access_url': 'svn+file:///tmp/svn',
'repo_class': SubversionRepo,
'repo_url': 'file:///tmp/svn'},
{'access_url': 'svn+http://example.com/svn',
'repo_class': SubversionRepo,
'repo_url': 'http://example.com/svn'},
{'access_url': 'svn+svn://example.com/svn',
'repo_class': SubversionRepo,
'repo_url': 'svn://example.com/svn'},
{'access_url': 'file:///tmp/repo',
'repo_class': FabException,
'exception': "Unrecognised repository scheme: file+file"},
{'access_url': 'http://example.com/repo',
'repo_class': FabException,
'exception': "Unrecognised repository scheme: http+http"},
{'access_url': 'foo+file:///tmp/foo',
'repo_class': FabException,
'exception': "Unrecognised repository scheme: foo+file"}
])
def cases(self, request):
"""
Generates a set of test cases.
"""
yield request.param
def test_action(self, cases):
"""
Checks that each URL creates an appropriate Repository object.
"""
if issubclass(cases['repo_class'], Exception):
with raises(cases['repo_class']) as ex:
_ = repository_from_url(cases['access_url'])
assert ex.value.args[0] == cases['exception']
else:
repo = repository_from_url(cases['access_url'])
assert isinstance(repo, cases['repo_class'])
assert repo.url == cases['repo_url']
|
import os
import json
from slack_sdk.webhook import WebhookClient
from flask import make_response
def index(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>`.
"""
request_json = request.get_json()
request_notifications = (
request_json["notifications"] if "notifications" in request_json else None
)
project_name = request_json["project"]["name"]
project_key = request_json["project"]["projectKey"]
content_key_id = request_json["content"]["key_id"]
url = f"{os.environ["BACKLOG_ORG_URL"]}/view/{project_key}-{content_key_id}"
mentionList = getMentionList(request_notifications)
message = " ".join(mentionList) + " Notification from " + url
webhook_url = os.environ["SLACK_WEBHOOK_URL"]
webhook = WebhookClient(webhook_url)
webhook.send(text=message)
return make_response("", 200)
def getMentionList(request_notifications):
json_open = open("mention_list.json", "r")
json_load = json.load(json_open)
mentionList = list()
for notification in request_notifications:
if notification["user"]["nulabAccount"]["uniqueId"]:
uniqueId = notification["user"]["nulabAccount"]["uniqueId"]
if uniqueId in json_load:
mentionList.append(f"<@{json_load[uniqueId]}>")
return mentionList
|
def Drawsquare(sidelength):
turtle.forward(sidelength)
turtle.left(90)
turtle.forward(sidelength)
turtle.left(90)
turtle.forward(sidelength)
turtle.left(90)
turtle.forward(sidelength)
turtle.left(90)
|
# https://docs.python.org/2.7/library/stdtypes.html#bltin-file-objects - high-level file object returned by open() built-in
# https://docs.python.org/2.7/c-api/file.html - this describes the underlying C API that is used by Python. Not what I want
import os, subprocess
filepath = os.path.join(os.path.dirname(__file__), 'test.txt')
def get_a_file_object():
'''
High-level file objects (which will henceforth be called simply file objects) are returned by several Python functions
- subprocess.Popen().stdin (or sdout, or stderr)
- built-in open()
- os.fdopen(), NOT os.open()
- Build-in open() could be implemented with os.open() + os.fdopen()
Using the low-level file descriptor operations (e.g. os.open(), os.read(), etc.) instead of the high-level file interface will ignore aspects like
internal bufferring of data
'''
with open(filepath) as f:
print(type(f)) # <type 'file'>
# This function is obsolete. Use the subprocess module instead
#os.popen()
#pipe = subprocess.Popen('ls', stdout=subprocess.PIPE).stdout
#print(pipe.read()) # prints the contents of /Users/austinchang/tutorials/python/language
#pipe.close()
#print(type(pipe)) # <type 'file'>
#fd = os.open(filepath, os.O_RDONLY)
#print(fd) #3
#some_file = os.fdopen(fd)
#print(some_file.readline()) # This is line number 0
#some_file.close()
#print(type(some_file)) # <type 'file'>
if __name__ == '__main__':
get_a_file_object() |
# Made By DM
# This script downloads chrome extension crx packages from the store
# Serial Download
from tqdm import tqdm
import requests
from pymongo import MongoClient
from bson import json_util
import time
import json
import zipfile
import os
client = MongoClient('localhost', 27017)
db = client['Chrome-Webstore-Extensions'];
posts = db.posts
tqdm.monitor_interval = 0
for post in tqdm(posts.find({ 'Manifest': { "$exists" : False } }).batch_size(10)):
try :
ExtId=post.get('UID')
url = "https://clients2.google.com/service/update2/crx?response=redirect&prodversion=62.0.3202.94&x=id%3D"+ExtId+"%26uc"
response = response = requests.get(url, stream=True)
with open("Packages/"+ExtId+".crx", "wb") as handle:
for data in tqdm(response.iter_content()):
handle.write(data)
handle.close();
response.close();
d = None
data = None
filename= "manifest.json"
with zipfile.ZipFile("Packages/"+ExtId+".crx", "r") as z:
with z.open(filename) as f:
data = f.read()
d = json.loads(data)
C= {'Manifest' : d }
posts.update({ 'UID' : ExtId.replace(".crx" , "") } , { "$set": C })
os.remove("Packages/"+ExtId+".crx"); # Comment this line if packages need to be stored.
except Exception, e:
f = open('log_Download.txt', 'a')
f.write("\n" + post.get('UID'))
f.write('-An exceptional thing happed - %s' % e)
f.close()
pass
|
#!/usr/bin/python
import sys
import os
import time
import logging
from watchdog.events import PatternMatchingEventHandler
from watchdog.observers import Observer
from clint.textui import colored
from datetime import datetime
# get the current time
now = datetime.now()
# the template for initial code
template = """/*
author: @ankingcodes
created: %s
*/
#include<bits/stdc++.h>
#include<algorithm>
using namespace std;
#define ll long long
#define MOD 1000000000
int main(){
ios_base::sync_with_stdio(false);
cin.tie(NULL);
ll t;
return 0;
}
""" % (now)
fileNames = ['A.cpp','B.cpp','C.cpp','D.cpp','E.cpp','F.cpp']
practiceFiles = ['A.cpp','B.cpp','C.cpp']
def showHelp():
print(" ---CODEMON--- ")
print(colored.cyan(" a CLI tool for competitive coders"))
print("\nCOMMANDS: \n")
print("codemon - - - - - - - - - - - - - - - shows help")
print("codemon init <contestName> - - - - - initialises a contest")
print("codemon init -n <file> - - - - - - - creates file with given name")
print("codemon listen - - - - - - - - - - - compiles and gives output")
print("codemon practice <dirName> - - - - - starts a practice session")
def init(contestName,fileNames):
# create a directory with contest name
try:
print(colored.green('Make some files and folders for ' + colored.magenta(contestName)));
path = os.getcwd()
os.mkdir(path + '/' + contestName)
except OSError:
print("Failed! This directory already exists.")
else:
print(colored.yellow('Directory is made'))
# create files for contest (should be 6 cpp files)
for files in range(len(fileNames)):
f = open(path + '/' + contestName + '/' + fileNames[files],"w+")
f.write(template)
f.close()
# create input file
f = open(path + '/' + contestName + '/' + "input.txt","w+")
f.close()
print(colored.cyan('Files have been created'))
def isModified(event):
filename = os.path.basename(event.src_path)
foldername = os.path.basename(os.getcwd())
if filename != foldername and filename != "prog" and filename != "input.txt":
print(colored.yellow('\nChange made at '+ filename))
print(colored.cyan('\nCompiling '+ filename))
os.system('g++ ' + filename + ' -o ' + 'prog')
print('Running')
print(colored.yellow('Taking inputs from input.txt'))
os.system('./prog < input.txt')
def listen():
print(colored.yellow("Getting files in directory"))
path = os.getcwd()
dircontents = os.listdir(path)
if len(dircontents) != 0:
print(colored.magenta("Currently listening for file changes"))
patterns = "*"
ignore_patterns = ""
ignore_directories = False
case_sensitive = True
event_handler = PatternMatchingEventHandler(patterns, ignore_patterns, ignore_directories, case_sensitive)
event_handler.on_modified = isModified
observer = Observer()
observer.schedule(event_handler,path,recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
else:
print(colored.red("No files exist, check filename/path"))
def main():
if len(sys.argv) < 2:
showHelp()
else:
countArg=0;
for arg in sys.argv:
countArg+=1;
if arg == "init":
if sys.argv[countArg] == '-n':
file = sys.argv[countArg+1]
path = '.'
f = open(path + '/' + file + '.cpp',"w+")
f.write(template)
f.close()
print(colored.yellow("Created "+file+'.cpp'))
break;
else:
contestName = sys.argv[countArg]
init(contestName, fileNames)
elif arg == "listen":
listen()
elif arg == "practice":
contestName = sys.argv[countArg+1]
init(contestName, practiceFiles)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 13:43:57 2020
@author: Souwi
"""
from random import *
from conversion import *
from petalDec import *
from pruning import *
from randomGraphs import *
import pickle
import time
import timeit
def bfs(g, s,d):
dist = {}
toVisit = [s]
seen = {s:1}
dist[s] = 0
while toVisit != []:
current = toVisit[0]
del toVisit[0]
l = list(g[current].keys())
l2 = mixlist(l)
for neigh in l2:
if neigh not in seen:
dist[neigh] = dist[current] + 1
toVisit.append(neigh)
seen[neigh] = 1
if current == d:
break
return( dist[current])
g = pickle.load( open( "ia-reality.p", "rb" ) )
outedges = pickle.load( open( "Result2/Random/Randomia-reality.p", "rb" ) )
#Distance Graph
Dg1=[]
startG=timeit.default_timer()
for e in outedges:
Dg1.append(bfs(g, e[0],e[1]))
finG=timeit.default_timer()
print("finG", finG-startG)
pickle.dump(Dg1, open( "Distance Graph/RandomREia-reality.p", "wb" ) ) |
import os
from urllib.request import urlretrieve
import boto3
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
if not os.environ.get('UNIT_TEST', False):
db_host, port = os.environ.get('RDS_ENDPOINT').split(":")
db_user = os.environ.get('DB_USER')
region = os.environ.get('AWS_REGION')
database_name = os.environ.get('DATABASE_NAME')
rds = boto3.client('rds')
db_password_token = rds.generate_db_auth_token(db_host, port, db_user, Region=region)
"""
In the real world we'd store certs in layer to ensure consistency across services and mitigate avoidable container
startup penalty times, s3 connectivity issues, etc.
"""
cert_url = 'https://s3.amazonaws.com/rds-downloads/rds-combined-ca-bundle.pem'
cert_path = '/tmp/rds-combined-ca-bundle.pem'
urlretrieve(cert_url, cert_path)
connect_args = {
'user': db_user,
'password': db_password_token,
'host': db_host,
'database': database_name,
'charset': 'utf8',
'ssl_ca': cert_path
}
rds_uri = "mysql+mysqlconnector://{user}:{password}@{db_host}/{database}"
engine = create_engine(rds_uri, connect_args=connect_args)
rds_session = Session(bind=engine)
|
from scipy.optimize import minimize
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
V = np.matrix('123 37.5 70 30; 37.5 122 72 13.5; 70 72 321 -32; 30 13.5 -32 52')/100 # covariance
R = np.matrix('14; 12; 15; 7')/100 # return
# 风险预算优化
def calculate_portfolio_var(w,V):
# 计算组合风险的函数
w = np.matrix(w)
return (w*V*w.T)[0,0]
def calculate_risk_contribution(w,V):
# 计算单个资产对总体风险贡献度的函数
w = np.matrix(w)
sigma = np.sqrt(calculate_portfolio_var(w,V))
# 边际风险贡献
MRC = V*w.T
# 风险贡献
RC = np.multiply(MRC,w.T)/sigma
return RC
def risk_budget_objective(x,pars):
# 计算组合风险
V = pars[0]# 协方差矩阵
x_t = pars[1] # 组合中资产预期风险贡献度的目标向量
sig_p = np.sqrt(calculate_portfolio_var(x,V)) # portfolio sigma
risk_target = np.asmatrix(np.multiply(sig_p,x_t))
asset_RC = calculate_risk_contribution(x,V)
J = sum(np.square(asset_RC-risk_target.T))[0,0] # sum of squared error
return J
def total_weight_constraint(x):
return np.sum(x)-1.0
def long_only_constraint(x):
return x
# 根据资产预期目标风险贡献度来计算各资产的权重
def calcu_w(x):
w0 = [0.2, 0.2, 0.2, 0.6]
# x_t = [0.25, 0.25, 0.25, 0.25] # 目标是让四个资产风险贡献度相等,即都为25%
x_t = x
cons = ({'type': 'eq', 'fun': total_weight_constraint},
{'type': 'ineq', 'fun': long_only_constraint})
res= minimize(risk_budget_objective, w0, args=[V,x_t], method='SLSQP',constraints=cons, options={'disp': True})
w_rb = np.asmatrix(res.x)
return w_rb
# 将各资产风险贡献度绘制成柱状图
def plot_rc(w):
rc = calculate_risk_contribution(w, V)
rc = rc.tolist()
rc = [i[0] for i in rc]
rc = pd.DataFrame(rc,columns=['rick contribution'],index=[1,2,3,4])
#T.plot(rc, chart_type='column', title = 'Contribution to risk')
rc.plot()
# 假设四个资产的风险贡献度相等
w_rb = calcu_w([0.25, 0.25, 0.25, 0.25])
print('各资产权重:', w_rb)
plot_rc(w_rb)
plt.show()
name2 = input("Please intput your name:") |
'''
Created on Jun 20, 2021
@author: Rand
'''
from tensorflow.keras import optimizers
from tensorflow.keras import models
import random
import copy
import game
import model
# Assigns names for integers to avoid magic numbers
GAME_STATE_X = -1
GAME_STATE_O = 1
GAME_STATE_DRAW = 0
GAME_STATE_NOT_ENDED = 2
PLAYER_X_VAL = -1
PLAYER_O_VAL = 1
class Simulation:
def __init__(self):
self.trainingHistory=[]
# Simulate random game
def simulate(self, game):
boardHistory=[]
# Plays through random game while appending game state to training history
while (game.getGameResult()[0] == GAME_STATE_NOT_ENDED):
available=game.getAvailableMoves()
move=available[random.randrange(0, len(available))]
game.move(move)
boardHistory.append(game.getBigBoard())
# Get the history and build the training set
result=game.getGameResult()[0]
for boardState in boardHistory:
self.trainingHistory.append((result, copy.deepcopy(boardState)))
return result
# Simulates multiple random games
def simulateManyGames(self, numberOfGames):
for i in range(numberOfGames):
self.simulate(game.Game())
return self.trainingHistory
# Simulates a NeuralNetwork game
def simulateNeuralNetwork(self, nnPlayer, game, model):
playerToMove = PLAYER_X_VAL
while (game.getGameResult()[0] == GAME_STATE_NOT_ENDED):
availableMoves = game.getAvailableMoves()
# Checks if move should be random or based on neural network
if playerToMove == nnPlayer:
# Looks for best move
maxValue = 0
bestMove = availableMoves[0]
for availableMove in availableMoves:
# Get a copy of a board and prepares it for neural network
boardCopy = copy.deepcopy(game.getBigBoard())
boardCopy[availableMove[0]*27+availableMove[1]*9+availableMove[2]*3+availableMove[3]] = nnPlayer
if nnPlayer == PLAYER_X_VAL:
value = model.predict(boardCopy, 0)
else:
value = model.predict(boardCopy, 2)
if value > maxValue:
maxValue = value
bestMove = availableMove
selectedMove = bestMove
else:
# Random Move
selectedMove = availableMoves[random.randrange(0, len(availableMoves))]
game.move(selectedMove)
# Switches player
if playerToMove == PLAYER_X_VAL:
playerToMove = PLAYER_O_VAL
else:
playerToMove = PLAYER_X_VAL
return game.getGameResult()
# Simulates many neural network games
def simulateManyNeuralNetworkGames(self, nnPlayer, numberOfGames, model):
nnPlayerWins = 0
randomPlayerWins = 0
draws = 0
print ("NN player")
print (nnPlayer)
for i in range(numberOfGames):
# Gets result from individual game and changes broader results accordingly
result=self.simulateNeuralNetwork(nnPlayer, game.Game(), model)
if result[0] == nnPlayer:
nnPlayerWins = nnPlayerWins + 1
elif result[0] == GAME_STATE_DRAW:
draws = draws + 1
else: randomPlayerWins = randomPlayerWins + 1
totalWins = nnPlayerWins + randomPlayerWins + draws
print ('X Wins: ' + str(int(nnPlayerWins * 100/totalWins)) + '%')
print('O Wins: ' + str(int(randomPlayerWins * 100 / totalWins)) + '%')
print('Draws: ' + str(int(draws * 100 / totalWins)) + '%')
# returns overall accuracy of neural network
return (nnPlayerWins-randomPlayerWins)/totalWins
if __name__ == '__main__':
sim=Simulation()
# Sets variables based on optimizer results
learningRate=10**(-10.5)
opt=optimizers.Adam(learning_rate=learningRate)
batchSize=800
epochs=10
neuronCount=128
layerCount=8
initializer="random_normal"
# Trains neural network
trainingHistory=sim.simulateManyGames(100000)
ticTacToeModel = model.TicTacToeModel(neuronCount, layerCount, opt, initializer, epochs, batchSize)
ticTacToeModel.train(trainingHistory)
# Saves model so neural network doesn't need to be trained every time
models.save_model(
ticTacToeModel.model, "Models/model1", overwrite=True, include_optimizer=True, save_format=None,
signatures=None, options=None, save_traces=True
) |
# AWS Lambda Handler
#
# Use Dockerfile.lambda to package/upload (see the instructions in the Dockerfile)
#
from strategies.base import CEPDistrict,CEPSchool
from cep_estimatory import add_strategies
import os,datetime,json,time
import zipfile
from io import BytesIO
import base64
import sys
import boto3,botocore
#print("## ENV ## ",os.environ)
if "SENTRY_DSN" in os.environ:
import sentry_sdk
from sentry_sdk.integrations.aws_lambda import AwsLambdaIntegration
#print("initializing sentry dsn with ",os.environ["SENTRY_DSN"])
sentry_sdk.init(
dsn=os.environ["SENTRY_DSN"],
integrations=[AwsLambdaIntegration(timeout_warning=True)]
)
def lambda_handler(event, context, local_output=False):
print(boto3.__version__)
print(botocore.__version__)
# Receives JSON district as input (same as server.py api endpoint)
d_obj = event
if "zipped" in event:
print("Decompressing", len(event["zipped"]))
with BytesIO(base64.b64decode(event["zipped"])) as df:
with zipfile.ZipFile(df) as eventzip:
d_obj = json.loads(eventzip.open("data.json").read())
key = d_obj["key"]
max_groups = d_obj.get("max_groups",10)
if max_groups != None: max_groups = int(max_groups)
evaluate_by = d_obj.get("evaluate_by","reimbursement")
schools = d_obj["schools"]
district = CEPDistrict(d_obj["name"],d_obj["code"],state=d_obj["state_code"])
state = d_obj["state_code"]
i = 1
for row in schools:
# Expecting { school_code: {active, daily_breakfast_served,daily_lunch_served,total_eligible,total_enrolled }}
if not row.get("school_code",None) or not row.get("total_enrolled",None):
continue
row["School Name"] = row.get("school_name","School %i"%i)
row["School Code"] = row.get("school_code","school-%i"%i)
row["School Type"] = row.get("school_type","")
row['include_in_mealscount'] = row.get('active','true') and 'true' or 'false'
i += 1
district.add_school(CEPSchool(row))
strategies = d_obj.get("strategies_to_run",["Pairs","OneToOne","Exhaustive?evaluate_by=%s"% evaluate_by,"OneGroup","Spread","Binning","NYCMODA?fresh_starts=50&iterations=1000&ngroups=%s&evaluate_by=%s" % (max_groups,evaluate_by),"GreedyLP"])
add_strategies(
district,
*strategies
)
t0 = time.time()
district.run_strategies()
district.evaluate_strategies(max_groups=max_groups,evaluate_by=evaluate_by)
result = district.as_dict()
result["state_code"] = state
result["max_groups"] = max_groups
result["evaluate_by"] = evaluate_by
result["optimization_info"] = {
"timestamp":str(datetime.datetime.now()),
"time": time.time() - t0
}
# Posts resulting data to S3
# with "key"
n = datetime.datetime.now()
result_key = key
if local_output:
print( "Would output to s3bucket:%s" % result_key )
print( json.dumps(result,indent=1) )
else:
s3_client = boto3.client('s3')
s3_client.put_object(
Body = json.dumps(result),
Bucket=os.environ.get("S3_RESULTS_BUCKET","mealscount-results"),
Key=result_key,
ContentType="application/json",
ACL='public-read',
)
def test_run(event,n):
event["key"] = "test/%i/%02i/%02i/%02i%02i%02i-%s.json" % (
n.year,n.month,n.day,n.hour,n.minute,n.second,event["code"]
)
event["state_code"] = "test"
class TestContext(object):
env = {}
if "--zip_test" in sys.argv:
with BytesIO() as mf:
with zipfile.ZipFile(mf,mode='w',compression=zipfile.ZIP_BZIP2) as zf:
zf.writestr('data.json', json.dumps(event))
event = {"zipped": base64.b64encode(mf.getvalue()) }
return lambda_handler(event,TestContext(),local_output="AWS_ACCESS_KEY_ID" not in os.environ)
if __name__ == "__main__":
# For Local Testing
n = datetime.datetime.now()
with open(sys.argv[1]) as f:
event = json.loads(f.read())
test_run(event,n)
|
# -*- coding: utf-8 -*-
# Created by Hoanglv on 10/25/2019
from odoo import fields, models, api
from datetime import datetime
from dateutil.relativedelta import relativedelta
STATE_SELECTOR = [('active', 'Active'), ('deactive', 'Deactive')]
TYPE_SELECTOR = [('extend', 'Extend'), ('up', 'Up rank'), ('keep', 'Keep rank')]
DF = '%Y-%m-%d'
class CrmCustomerRankRule(models.Model):
_name = 'crm.customer.rank.rule'
_description = 'Customer rank rule'
name = fields.Char(string='Name')
rank_id = fields.Many2one('crm.customer.rank', string='Rank', required=True)
type = fields.Selection(TYPE_SELECTOR, string='Type', default='up', required=True)
duration_year = fields.Integer(string='Duration year')
target_revenue = fields.Integer(string='Target revenue', required=True)
description = fields.Text(string='Description')
ranks_allowed = fields.Many2many('crm.customer.rank', string='Rank allowed', required=True)
brand_id = fields.Many2one('res.brand', string='Brand', required=True)
state = fields.Selection(STATE_SELECTOR, string='State', default='active', required=True)
@api.multi
def action_active(self):
self.state = 'active'
@api.multi
def action_deactive(self):
self.state = 'deactive'
def get_year_extend_and_revenue(self, partner_id, date_extend):
PartnerRankConfirm = self.env['partner.rank.confirm']
PartnerRankHistory = self.env['partner.rank.history']
last_history = PartnerRankHistory.get_last_history(partner_id=partner_id)
if not last_history or (last_history and (not last_history.to_rank or last_history.to_rank != self.rank_id)
or date_extend > last_history.up_rank_expired_date):
return {
'year': 0,
'loyal_point': 0,
}
today = date_extend.strftime(DF) if date_extend else datetime.today().strftime(DF)
from_date = (datetime.strptime(today, DF) - relativedelta(months=12)).strftime('%Y-%m-01')
if last_history.extend_date:
if datetime.strptime(last_history.extend_date, DF) > datetime.strptime(today, DF) - relativedelta(years=1):
from_date = last_history.extend_date.strftime(DF)
else:
confirm = PartnerRankConfirm.search([('history_id', '=', last_history.id),
('type', 'not in', ['extend', 'auto_extend', 'extend_exception'])],
limit=1)
if confirm and datetime.strptime(confirm.register_date, DF) > datetime.strptime(today, DF) - relativedelta(years=1):
from_date = confirm.register_date.strftime(DF)
elif datetime.strptime(last_history.up_rank_date, DF) > datetime.strptime(today, DF) - relativedelta(years=1):
from_date = last_history.up_rank_date.strftime(DF)
get_customer_revenue = "SELECT SUM(revenue) AS total_revenue " \
"FROM res_partner_revenue " \
"WHERE revenue_date BETWEEN %s AND %s AND partner_id = %s"
self._cr.execute(get_customer_revenue, (from_date, today, partner_id,))
res = self._cr.dictfetchone()
return {
'year': res.get('total_revenue', 0) // self.target_revenue,
'revenue': res.get('total_revenue', 0),
}
|
import sys
import datetime
import math
def prepareResults(outputPath):
count = 0
nperms = 1000
result = ""
with open(outputPath) as infile:
approx_sum = 0
approx_max = 0
approx_min = 100000000 # good value for min approx
for line in infile:
if(count == nperms):
break
count += 1
data = line.split(';')
if(len(data) == 2):
lower_bound = float(data[0])
weight = float(data[1])
approx = 1
if(lower_bound > 0):
approx = weight/math.ceil(lower_bound)
approx_sum += approx
if (approx > approx_max):
approx_max = approx
if (approx < approx_min):
approx_min = approx
else:
print("ERROR", outputPath)
if count < nperms:
print("ERROR", outputPath);
result += "%s;%s;%s;%s;%s" % (alg, n, approx_sum/(1.0*count), approx_max, approx_min)
return result
if __name__ == '__main__':
now = datetime.datetime.now()
algs = ["r", "t", "rt", "sr", "srt", "r_g", "t_g", "rt_g", "sr_g", "srt_g"]
for alg in algs:
prefix_dir = ["20%_"+alg+"_"]
isSigned = 0
if(alg[0] == 's'):
prefix_dir.append("sig_perm_" + alg + "_")
isSigned = 1
else:
prefix_dir.append("perm_" + alg + "_")
for prefix in prefix_dir:
outputFile = open("results/"+prefix+".result", "w")
for n in range(10,501,5):
outputPath = "../../output/"+prefix+str(n)+".out"
# print(outputPath)
result = prepareResults(outputPath)+"\n"
outputFile.write(result)
outputFile.close()
|
# -*- coding=utf-8 -*-
# Author: MrGuan
# CreatData: 2019-08-12 02:23:38
# Make your life a story worth telling
import json
from datetime import datetime
class Block():
"""
接受数据及父区块哈希
"""
def __init__(self, transactions, prev_hash):
"""
:param transactions:交易列表
:param prev_hash: 父区块哈希值
"""
self.transactions = transactions
self.prev_hash = prev_hash
self.timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") # 时间戳
self.hash = None # 区块哈希值
self.nonce = None # 随机数
def __repr__(self):
"""
打印区块信息
:return:
"""
return "交易列表为:%s 区块哈希值: %s" % (self.transactions, self.hash)
class Transaction():
def __init__(self, sender, recipient, amount):
"""
初始化发送方,接收方,交易数量
:param sender:
:param recipient:
:param amount:
"""
if isinstance(sender, bytes):
self.sender = sender.decode('utf-8')
self.sender = sender
if isinstance(recipient, bytes):
self.recipient = recipient.decode('utf-8')
self.recipient = recipient
self.amount = amount
def set_sign(self, pubkey, signature):
"""
确保交易的可靠性,需要输入发送者的公钥和签名
:param pubkey: 公钥
:param signature: 签名
:return:
"""
self.pubkey = pubkey
self.signature = signature
def __repr__(self):
"""
交易分两种,如果发送人为空,则是挖矿奖励,如果不为空,则是普通交易
:return:
"""
if self.sender:
result = "从%s转至%s %d个加密货币" % (self.sender, self.recipient, self.amount)
else:
result = "%s挖矿获得%d个加密货币奖励" %(self.recipient, self.amount)
return result
class TransactionEncoder(json.JSONEncoder):
"""
定义Json的编码类,用来序列化Transaction
"""
def default(self, obj):
if isinstance(obj, Transaction):
return obj.__dict__
else:
return json.JSONEncoder.default(self, obj) |
#!/usr/bin/env python
# -*- coding:utf-8
"""
Author: Hao Li
Email: howardleeh@gmail.com
Github: https://github.com/SAmmer0
Created: 2018/4/9
"""
from datasource.sqlserver.zyyx.dbengine import zyyx
|
#!/usr/bin/python
import time
import os
from CheckForUSB import CheckForUSB
from DownloadCSVAndMoveIntoPosition import DownloadCSVAndMoveIntoPosition
from UploadData import UploadData
import datetime
# configuration
hibernate_hours = 1
heartbeat_seconds = 5
last_upload = 0
usb_notified = False
while True :
time.sleep(heartbeat_seconds)
# print time.time() - last_upload
if time.time() - last_upload > float(60*60*hibernate_hours) :
# Make sure that the USB is plugged in.
if not CheckForUSB() :
if not usb_notified :
os.system('''/usr/bin/osascript -e 'display notification "Connect your USB device."' ''')
usb_notified = True
# continue without resetting the last_upload time.
continue
# Run the upload and the download
if not UploadData() :
print 'User canceled.'
last_upload = time.time()
usb_notified = False
continue
# Run the download
today = datetime.date.today()
today_str = today.strftime("%m/%d/%Y")
monday = today - datetime.timedelta(days=today.weekday())
monday_str = monday.strftime("%m/%d/%Y")
DownloadCSVAndMoveIntoPosition(monday_str,today_str)
# Run the analytics
os.system('cd ../; python txtToRoot.py; python Improve.py --detailed --today --save --autogenerated; cd -;')
os.system('''/usr/bin/osascript -e 'display notification "New Prediction is available."' ''')
# at the end of everything...
last_upload = time.time()
usb_notified = False
print 'Exiting.'
|
"""
ETH DAO Arbitrage
Jon V (May 30th 2016)
"""
import time
from kraken import Kraken
from twilio.rest import TwilioRestClient
PERCENTAGE_RETURN = 1.10 # 10%
DAO_KRAKEN_DEPOSIT_ADDRESS = 'your_address'
ETH_KRAKEN_RETURN_ADDRESS = 'eth_address'
TWILIO_ACCOUNT_SID = "sid"
TWILIO_AUTH_TOKEN = "token"
def calc_DAO2ETH_price(number_of_start_ethereums, number_of_DAO):
order_price = (number_of_start_ethereums * PERCENTAGE_RETURN) / 0.99849 / number_of_DAO
return order_price
def load_last_eth_balance():
with open("eth_balance.txt", "r") as fd:
return fd.readline()
def save_last_eth_balance(eth_balance):
with open("eth_balance.txt", "w+") as fd:
fd.write(str(eth_balance))
def run():
print "Starting Arb"
k = Kraken()
last_eth_balance = float(load_last_eth_balance())
dao_balance = k.dao_balance()
eth_trade_price = round(calc_DAO2ETH_price(float(last_eth_balance), dao_balance), 5)
if k.latest_price() > eth_trade_price:
eth_trade_price = k.latest_price()
trade_id = k.trade(price=eth_trade_price, lots=dao_balance)
if trade_id:
while k.order_status(trade_id) != 'closed':
print "Waiting for trade to execute"
time.sleep(5)
# order was executed, check how much we made
if k.order_status(trade_id) == 'closed':
eth_balance = k.eth_balance()
if eth_balance > last_eth_balance:
print "Success! We made", eth_balance-last_eth_balance, "ethereum"
save_last_eth_balance(eth_balance)
send_sms(eth_balance)
# now, send to shapeshift
print "Shapeshift refid", k.withdrawn(eth_balance)
# check account for DAO and if > 1 that means money are here
while k.dao_balance() < 1:
time.sleep(5)
# check if all ok and go again
if k.dao_balance() > dao_balance:
print "We are going again!"
print "DAO Balance is", k.dao_balance()
else:
raise Exception
else:
raise Exception
def send_sms(eth_price):
client = TwilioRestClient(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)
client.sms.messages.create(
to="yournumber",
from_="from_number",
body="1353-2236 ETH:" + str(eth_price))
if __name__ == '__main__':
while True:
run()
|
from tkinter import filedialog
from tkinter import *
import pymysql
import mysql.connector
from tkinter import messagebox
from mysql.connector import Error
from mysql.connector import errorcode
from caresys import *
connection = pymysql.connect(host='localhost',database='vehicle',user='root',password='')
cursor = connection.cursor()
def detec(name):
res = caresysfunc(0,name,0,'us')
return res
def process(x):
plate_no = x[0]
color = x[1]
make = x[5]
b_type = x[3]
model = x[4]
x=(plate_no,color,make,b_type,model)
query = """INSERT INTO crime_record(plate_no, color, make,body_type,model)
VALUES (%s,%s,%s,%s,%s) """
cursor.execute(query,x)
connection.commit()
messagebox.showinfo("Success!","Inserted "+ str(x) +" into blacklist")
def fileDialog():
filename = filedialog.askopenfilename(title = "Select A File")
return filename
def fetch():
cursor.execute("SELECT * FROM crime_record")
rows = cursor.fetchall()
return (rows)
|
#!/usr/bin/env python
from __future__ import print_function
import chainer
import chainer.functions as F
import chainer.links as L
class VGG(chainer.Chain):
def __init__(self, class_labels=10):
super(VGG, self).__init__()
with self.init_scope():
self.l1_1 = L.Convolution2D(None, 64, 3, pad=1,nobias=True)
self.b1_1 = L.BatchNormalization(64)
self.l1_2 = L.Convolution2D(None, 64, 3, pad=1,nobias=True)
self.b1_2 = L.BatchNormalization(64)
self.l2_1 = L.Convolution2D(None, 128, 3, pad=1,nobias=True)
self.b2_1 = L.BatchNormalization(128)
self.l2_2 = L.Convolution2D(None, 128, 3, pad=1,nobias=True)
self.b2_2 = L.BatchNormalization(128)
self.l3_1 = L.Convolution2D(None, 256, 3, pad=1,nobias=True)
self.b3_1 = L.BatchNormalization(256)
self.l3_2 = L.Convolution2D(None, 256, 3, pad=1,nobias=True)
self.b3_2 = L.BatchNormalization(256)
self.l3_3 = L.Convolution2D(None, 256, 3, pad=1,nobias=True)
self.b3_3 = L.BatchNormalization(256)
self.l4_1 = L.Convolution2D(None, 512, 3, pad=1,nobias=True)
self.b4_1 = L.BatchNormalization(512)
self.l4_2 = L.Convolution2D(None, 512, 3, pad=1,nobias=True)
self.b4_2 = L.BatchNormalization(512)
self.l4_3 = L.Convolution2D(None, 512, 3, pad=1,nobias=True)
self.b4_3 = L.BatchNormalization(512)
self.l5_1 = L.Convolution2D(None, 512, 3, pad=1,nobias=True)
self.b5_1 = L.BatchNormalization(512)
self.l5_2 = L.Convolution2D(None, 512, 3, pad=1,nobias=True)
self.b5_2 = L.BatchNormalization(512)
self.l5_3 = L.Convolution2D(None, 512, 3, pad=1,nobias=True)
self.b5_3 = L.BatchNormalization(512)
# self.fc1 = L.Linear(None, 512, nobias=True)
self.fc1 = L.Linear(None, 128, nobias=True)
# self.bn_fc1 = L.BatchNormalization(512)
self.bn_fc1 = L.BatchNormalization(128)
self.fc2 = L.Linear(None, class_labels, nobias=True)
self.fc1_out = ''
def __call__(self, x):
# 64 channel blocks:
h = self.l1_1(x)
h = self.b1_1(h)
h = F.relu(h)
h = F.dropout(h, ratio=0.3)
h = self.l1_2(h)
h = self.b1_2(h)
h = F.relu(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 128 channel blocks:
h = self.l2_1(h)
h = self.b2_1(h)
h = F.relu(h)
h = F.dropout(h, ratio=0.4)
h = self.l2_2(h)
h = self.b2_2(h)
h = F.relu(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 256 channel blocks:
h = self.l3_1(h)
h = self.b3_1(h)
h = F.relu(h)
h = F.dropout(h, ratio=0.4)
h = self.l3_2(h)
h = self.b3_2(h)
h = F.relu(h)
h = F.dropout(h, ratio=0.4)
h = self.l3_3(h)
h = self.b3_3(h)
h = F.relu(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 512 channel blocks:
h = self.l4_1(h)
h = self.b4_1(h)
h = F.relu(h)
h = F.dropout(h, ratio=0.4)
h = self.l4_2(h)
h = self.b4_2(h)
h = F.relu(h)
h = F.dropout(h, ratio=0.4)
h = self.l4_3(h)
h = self.b4_3(h)
h = F.relu(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 512 channel blocks:
h = self.l5_1(h)
h = self.b5_1(h)
h = F.relu(h)
h = F.dropout(h, ratio=0.4)
h = self.l5_2(h)
h = self.b5_2(h)
h = F.relu(h)
h = F.dropout(h, ratio=0.4)
h = self.l5_3(h)
h = self.b5_3(h)
h = F.relu(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
h = F.dropout(h, ratio=0.5)
h = self.fc1(h)
self.fc1_out = h
h = self.bn_fc1(h)
h = F.relu(h)
h = F.dropout(h, ratio=0.5)
return self.fc2(h)
|
from bs4 import BeautifulSoup
from datetime import date, datetime
import requests
r =requests.get('https://zerocater.com/m/BYFJ/')
soup = BeautifulSoup(r.text)
def menu_scrape():
d = datetime.isocalendar(date.today())
#this matches the format of zerocator
today_match = str(d).replace(', ','-').replace('(','').replace(')','')
f = soup.find_all("div", {"data-date": today_match})
for x in f:
today_soup = BeautifulSoup(f)
menu_info(today_soup)
def menu_info(days_soup):
days_soup.find("h1", { "class" : "vendor-name"}
food_items = []
food_items_soup = BeautifulSoup(str(days_soup.find_all("h4", { "class" : "item-name" })))
for yummy in food_items_soup.stripped_strings:
if yummy != "," and yummy != "[" and yummy != "]":
food_items.append(yummy)
|
import sys
import xlwt
from xlwt import Workbook
#wb = Workbook()
#outfile = "19122007output"
#sheet = wb.add_sheet(outfile)
print('Python: {}'.format(sys.version))
import pyabf
import numpy as np
import matplotlib.pyplot as plt
filename = r'C:\Users\Elijah\Documents\NanoporeData\abfRaw\filtered_bessel8pole_500hz_19128004.abf'
abf = pyabf.ABF(filename)
print(abf)
# abf.headerLaunch() # display header information in a web browser
abf.setSweep(0)
print("sweep data (ADC):", abf.sweepY)
print("sweep command (DAC):", abf.sweepC)
print("sweep times (seconds):", abf.sweepX)
counter = 0
def dataPoints(begin, end):
for n in range(begin, end):
print(abf.sweepX[n], abf.sweepY[n])
#sheet.write(counter, 0, str(abf.sweepX[n])), sheet.write(counter, 1, str(abf.sweepY[n]))
global counter
counter += 1
dataPoints(8264, 14826)
dataPoints(14858, 27423)
dataPoints(27511, 27528)
dataPoints(49191, 49244)
dataPoints(49350, 49519)
dataPoints(49916, 50694)
dataPoints(50993, 51005)
dataPoints(51043, 51084)
dataPoints(51130, 63453)
dataPoints(63535, 66278)
dataPoints(87787, 87791)
dataPoints(110141, 112116)
dataPoints(112548, 113251)
dataPoints(155677, 155704)
dataPoints(155751, 155759)
dataPoints(175947, 176284)
dataPoints(176516, 176969)
dataPoints(177087, 177096)
dataPoints(177167, 177172)
dataPoints(177353, 182850)
dataPoints(182875, 182880)
dataPoints(183031, 183072)
dataPoints(183087, 183145)
dataPoints(183373, 183381)
dataPoints(224253, 224731)
dataPoints(224764, 233137)
dataPoints(233277, 233280)
dataPoints(233323, 235526)
dataPoints(235854, 235866)
dataPoints(235950, 236010)
dataPoints(236171, 236189)
dataPoints(236266, 239359)
dataPoints(239464, 240060)
dataPoints(240107, 254651)
dataPoints(280254, 281395)
dataPoints(281434, 290216)
dataPoints(290354, 291749)
dataPoints(292001, 292012)
dataPoints(292177, 293837)
dataPoints(293888, 293908)
dataPoints(293912, 293923)
dataPoints(294807, 294811)
dataPoints(294899, 295203)
dataPoints(295309, 295322)
dataPoints(295351, 295358)
dataPoints(295361, 295373)
print(counter) |
# Accepted
# Python 3
import numpy
ar = numpy.array([int(p) for p in input().split()])
arr = numpy.array([int(p) for p in input().split()])
print(numpy.inner(ar, arr))
print(numpy.outer(ar, arr))
|
# -*- coding: UTF-8 -*-
# @Time : 22/03/2019 17:43
# @Author : QYD
from utils import cal_r_max, median_draw_circle, convolution_image
import cv2 as cv
mode = ["wiener", "dia_conv"]
def rotational_deblur(img, theta, center=None, mode=mode[1]):
r_max = cal_r_max(img, center=center)
circle_list = median_draw_circle(r_max)
deblur_img = convolution_image(img, center=center, circle_list=circle_list, theta=theta, mode=mode)
return deblur_img
if __name__ == '__main__':
img_blur = cv.imread("../samples/origin_blur.jpg")
img_deblur = rotational_deblur(img=img_blur, theta=0.05)
cv.imwrite("../samples/origin_traditional_deblur.jpg", img_deblur)
|
"""
|正则表达式是用来操作字符串的一种逻辑公式
"""
import re
# # eg.1
# s = "webset: http://www.baidu.com"
# reg = "http://[w]{3}\.[a-z0-9]*\.com"
#
# result = re.findall(reg,s)
# print(result)
#
# # eg.2
# s = "hello world hello"
# reg = "hello"
#
# print(re.findall(reg,s))
# print(re.findall(reg,s)[0])
# 元字符
"""
. 代表换行符以外的任意字符 \n
\w 匹配字母/number/_/汉字
\s 匹配任意空白符
\d 匹配任意的数字 0-9
^ 匹配字符串的开始
$ 匹配字符串的结束
"""
# s = "23sgrdg工人 房356#¥%__"
# print(re.findall("\w",s))
# print(re.findall("\d",s))
# print(re.findall("\s",s))
# print(re.findall("^\d",s))
# 反义代码
"""
\W : not \w
\S : not \s
\D : not \d
"""
# 限定符
"""
# s = "webset: http://www.baidu.com"
# reg = "http://[w]{3}\.[a-z0-9]*\.com"
[]* : 代表它前面的正则表达式重复0次或多次
+ : 重复一次或多次
? : 重复0次或1次
{n} : 重复n次
{n,}: 重复n次或多次,至少重复n次
{n,m}: 重复n次到m次
"""
# s = "hhhhhh sd123父官5555....sjjdo"
# print(re.findall("\d{3}",s))
# print(re.findall("[\da-z]*",s))
# print(re.findall("[\da-z]+",s))
# print(re.findall("[\da-z]?",s))
# s1 = "my qq is 3465513"
# reg = "\d{5,12}"
# print(re.findall(reg,s1))
# 分组匹配
s = "my qq is 3465513, mail:10000"
reg = "(\d{7}).*(\d{5})"
print(re.findall(reg,s))
print(re.search(reg,s))
print(re.search(reg,s).group())
print(re.search(reg,s).group(2)) # 组2的内容
print(re.search(reg,s).group(1)) # 组一的内容
print(re.search(reg,s).group(0)) # 匹配的所有内容
|
from sqlalchemy import join
from datetime import datetime
import re
import time
import buildapi.model.meta as meta
from buildapi.model.reports import Report, IntervalsReport
from buildapi.model.util import get_time_interval, get_silos
from buildapi.model.util import NO_RESULT, SUCCESS, WARNINGS, FAILURE, \
SKIPPED, EXCEPTION, RETRY, SLAVE_SILOS, BUSY, IDLE
b = meta.status_db_meta.tables['builds']
s = meta.status_db_meta.tables['slaves']
bd = meta.status_db_meta.tables['builders']
def BuildsQuery(starttime=None, endtime=None, slave_id=None, builder_name=None,
get_builder_name=False):
"""Constructs the sqlalchemy query for fetching all builds from statusdb
in the specified time interval, satisfying some contraints.
Input: starttime - start time, UNIX timestamp (in seconds)
endtime - end time, UNIX timestamp (in seconds)
slave_id - slave id, if specified returns only the builds on this
slave
builder_name - builder name, if specified returns only the builds on
this builder
get_builder_name - boolean specifying whether or not to get the
builder name for each build
Output: query
"""
q = join(b, s, b.c.slave_id == s.c.id)
with_columns = [b.c.slave_id, s.c.name.label('slave_name'), b.c.result,
b.c.builder_id, b.c.starttime, b.c.endtime]
if get_builder_name or builder_name:
q = q.join(bd, bd.c.id == b.c.builder_id)
with_columns.append(bd.c.name.label('builder_name'))
q = q.select().with_only_columns(with_columns)
if slave_id != None:
q = q.where(b.c.slave_id == slave_id)
if builder_name != None:
q = q.where(bd.c.name == builder_name)
if starttime:
q = q.where(b.c.starttime >= starttime)
if endtime:
q = q.where(b.c.starttime <= endtime)
return q
def GetSlavesReport(starttime=None, endtime=None, int_size=0, last_int_size=0):
"""Get the slaves report for the speficied time interval.
Input: starttime - start time (UNIX timestamp in seconds), if not
specified, endtime minus 24 hours
endtime - end time (UNIX timestamp in seconds), if not specified,
starttime plus 24 hours or current time (if starttime is not
specified either)
last_int_size - the length in seconds for the last time interval
for which to compute fail and busy/idle percentage.
Output: SlavesReport
"""
starttime, endtime = get_time_interval(starttime, endtime)
starttime_date = datetime.fromtimestamp(starttime)
endtime_date = datetime.fromtimestamp(endtime)
report = SlavesReport(starttime, endtime, int_size=int_size,
last_int_size=last_int_size)
q = BuildsQuery(starttime=starttime_date, endtime=endtime_date)
q_results = q.execute()
for r in q_results:
params = dict((str(k), v) for (k, v) in dict(r).items())
build = Build(**params)
report.add(build)
return report
def GetSlaveDetailsReport(slave_id=None, starttime=None, endtime=None,
int_size=0, last_int_size=0):
"""Get the slave details report for a slave in the speficied time interval.
Input: slave_id - slave id
starttime - start time (UNIX timestamp in seconds), if not
specified, endtime minus 24 hours
endtime - end time (UNIX timestamp in seconds), if not specified,
starttime plus 24 hours or current time (if starttime is not
specified either)
int_size - break down results per interval (in seconds), if specified
last_int_size - the length in seconds for the last time interval
for which to compute fail and busy/idle percentage.
Output: SlaveDetailsReport
"""
starttime, endtime = get_time_interval(starttime, endtime)
starttime_date = datetime.fromtimestamp(starttime)
endtime_date = datetime.fromtimestamp(endtime)
report = SlaveDetailsReport(starttime, endtime, slave_id,
int_size=int_size, last_int_size=last_int_size)
q = BuildsQuery(slave_id=slave_id, get_builder_name=True,
starttime=starttime_date, endtime=endtime_date)
q_results = q.execute()
for r in q_results:
params = dict((str(k), v) for (k, v) in dict(r).items())
build = Build(**params)
report.add(build)
if not report.name:
report.name = build.slave_name
return report
def GetStatusBuildersReport(starttime=None, endtime=None):
"""Get the builders report based on statusdb for the speficied time
interval.
Input: starttime - start time (UNIX timestamp in seconds), if not
specified, endtime minus 24 hours
endtime - end time (UNIX timestamp in seconds), if not specified,
starttime plus 24 hours or current time (if starttime is not
specified either)
Output: BuildersReport
"""
starttime, endtime = get_time_interval(starttime, endtime)
starttime_date = datetime.fromtimestamp(starttime)
endtime_date = datetime.fromtimestamp(endtime)
report = BuildersReport(starttime, endtime)
q = BuildsQuery(starttime=starttime_date, endtime=endtime_date,
get_builder_name=True)
q_results = q.execute()
for r in q_results:
params = dict((str(k), v) for (k, v) in dict(r).items())
build = Build(**params)
report.add(build)
return report
def GetBuilderDetailsReport(builder_name=None, starttime=None, endtime=None):
"""Get the builder details report based on statusdb for a builder in the
speficied time interval.
Input: builder_name - builder name
starttime - start time (UNIX timestamp in seconds), if not
specified, endtime minus 24 hours
endtime - end time (UNIX timestamp in seconds), if not specified,
starttime plus 24 hours or current time (if starttime is not
specified either)
Output: BuilderDetailsReport
"""
starttime, endtime = get_time_interval(starttime, endtime)
starttime_date = datetime.fromtimestamp(starttime)
endtime_date = datetime.fromtimestamp(endtime)
report = BuilderDetailsReport(starttime, endtime, name=builder_name)
q = BuildsQuery(builder_name=builder_name, get_builder_name=True,
starttime=starttime_date, endtime=endtime_date)
q_results = q.execute()
for r in q_results:
params = dict((str(k), v) for (k, v) in dict(r).items())
build = Build(**params)
report.add(build)
return report
class SlavesReport(IntervalsReport):
"""Contains a summary Slave Report for each slave that had at least one
build within the specified timeframe.
"""
outdated = -1
def __init__(self, starttime, endtime, int_size=0, last_int_size=0):
IntervalsReport.__init__(self, starttime, endtime, int_size=int_size)
self.last_int_size = last_int_size
self.slaves = {}
self._busy = 0
self._avg_busy = 0
# outdated flags
self._num_busy = SlavesReport.outdated
self._avg_busy_time = SlavesReport.outdated
self.silos = sorted(SLAVE_SILOS.keys())
def add(self, build):
"""Update the report by adding a build to the corresponding Slave
Report.
"""
slave_id = build.slave_id
if slave_id not in self.slaves:
self.slaves[slave_id] = SlaveDetailsReport(self.starttime,
self.endtime, slave_id, name=build.slave_name,
last_int_size=self.last_int_size, summary=True)
self.slaves[slave_id].add(build)
self._num_busy = SlavesReport.outdated
self._avg_busy_time = SlavesReport.outdated
def total_slaves(self):
"""Total number of slaves."""
return len(self.slaves.keys())
def endtime_total_busy(self):
"""Total number of busy slaves at endtime."""
if self._num_busy == SlavesReport.outdated:
self._num_busy = 0
for slave in self.slaves.values():
if slave.endtime_status() == BUSY:
self._num_busy += 1
return self._num_busy
def endtime_total_idle(self):
"""Total number of idle slaves at endtime."""
return self.total_slaves() - self.endtime_total_busy()
def get_int_busy(self):
"""Number of busy machines per each interval."""
int_busy = [ 0 ] * self.int_no
for slave_id in self.slaves:
slave = self.slaves[slave_id]
disc_intervals = set()
intervals = sorted(slave.busy)
intervals.append((self.endtime, None, None)) # append fake interval
for inter in xrange(len(intervals) - 1):
start, end, _ = intervals[inter]
next_inter_start = intervals[inter + 1][0]
end = min(end or (next_inter_start - 1), self.endtime - 1)
start_idx = self.get_interval_index(start)
end_idx = self.get_interval_index(end)
disc_intervals.update(xrange(start_idx, end_idx))
for int_idx in disc_intervals:
int_busy[int_idx] += 1
return int_busy
def get_int_busy_silos(self):
"""Number of busy machines per each interval and per silos."""
total_slaves = {}
int_busy = { 'Totals': [ 0 ] * self.int_no }
for silos_name in SLAVE_SILOS:
int_busy[silos_name] = [ 0 ] * self.int_no
total_slaves[silos_name] = set()
for slave_id in self.slaves:
slave = self.slaves[slave_id]
silos_name = get_silos(slave.name)
disc_intervals = set()
intervals = sorted(slave.busy)
intervals.append((self.endtime, None, None)) # append fake interval
for inter in xrange(len(intervals) - 1):
start, end, _ = intervals[inter]
next_inter_start = intervals[inter + 1][0]
end = min(end or (next_inter_start - 1), self.endtime - 1)
start_idx = self.get_interval_index(start)
end_idx = self.get_interval_index(end)
disc_intervals.update(xrange(start_idx, end_idx))
for int_idx in disc_intervals:
int_busy[silos_name][int_idx] += 1
int_busy['Totals'][int_idx] += 1
total_slaves[silos_name].add(slave.name)
totals = dict([(silos_name, len(total_slaves[silos_name]))
for silos_name in total_slaves])
totals['Totals'] = self.total_slaves()
return int_busy, totals
def get_avg_busy(self):
"""Average across all slaves of slave busy time percentage."""
if self._avg_busy_time == SlavesReport.outdated:
busy_sum = 0
total = self.total_slaves()
for slave in self.slaves.values():
busy_sum += slave.get_ptg_busy()
self._avg_busy_time = busy_sum / total if total else 0
return self._avg_busy_time
def to_dict(self, summary=False):
json_obj = {
'starttime': self.starttime,
'endtime': self.endtime,
'last_int_size': self.last_int_size,
'slaves': [],
}
for slave_id in self.slaves:
slave_obj = self.slaves[slave_id].to_dict(summary=True)
json_obj['slaves'].append(slave_obj)
return json_obj
class SlaveDetailsReport(IntervalsReport):
def __init__(self, starttime, endtime, slave_id, name=None, int_size=0,
last_int_size=0, summary=False):
IntervalsReport.__init__(self, starttime, endtime, int_size=int_size)
self.slave_id = slave_id
self.name = name
self.last_int_size = last_int_size
self.summary = summary
# builds, only when summary=False
self.builds = []
self.last_build = None
self._d_sum = 0 # sum of all durations
self.busy = [] # busy intervals
# results
self.total = 0
self.results = {
NO_RESULT: 0,
SUCCESS: 0,
WARNINGS: 0,
FAILURE: 0,
SKIPPED: 0,
EXCEPTION: 0,
RETRY: 0,
}
self.timeframe = self.endtime - self.starttime \
if self.starttime and self.endtime else 0
self.int_total = [0] * self.int_no
self.results_int = {
SUCCESS: [0] * self.int_no,
WARNINGS: [0] * self.int_no,
FAILURE: [0] * self.int_no,
}
# last interval
self.last_int_sum = 0
self.last_int_total = 0
self.last_int_fail = 0
def add(self, build):
"""Update the slave report by analyzing a build's properties."""
self.total += 1
# results
result = build.result if build.result != None else NO_RESULT
if result in self.results:
self.results[result] += 1
# results per interval
int_idx = self.get_interval_index(build.starttime)
self.int_total[int_idx] += 1
if result in (FAILURE, SKIPPED, EXCEPTION, RETRY, NO_RESULT):
result = FAILURE
self.results_int[result][int_idx] += 1
# sum durations
self._d_sum += self._busy_time(build)
if build.starttime:
endtime = build.endtime if (build.endtime and
build.endtime > build.starttime) else None
self.busy.append((build.starttime, endtime, result))
# last interval
if build.starttime and self.endtime and self.last_int_size and (
build.starttime >= self.endtime - self.last_int_size):
self.last_int_sum += self._busy_time(build)
self.last_int_total += 1
if result in (FAILURE, SKIPPED, EXCEPTION, RETRY, NO_RESULT):
self.last_int_fail += 1
# status at endtime
if not self.last_build or (self.last_build and build.starttime and
build.starttime >= self.last_build.starttime):
self.last_build = build
if not self.summary:
self.builds.append(build)
def _busy_time(self, build):
"""Build run time within the report's timeframe."""
if build.duration:
return (min(build.endtime, self.endtime) -
max(build.starttime, self.starttime))
return 0
def endtime_status(self):
"""Slave status at endtime: BUSY or IDLE."""
if self.last_build and (not self.last_build.endtime or
self.last_build.endtime > self.endtime):
return BUSY
return IDLE
def get_avg_duration(self):
"""The average (mean) duration of the builds which run on this slave."""
return int(float(self._d_sum) / self.total) if self.total else 0
def get_results_fail_all(self):
"""The number of all failing builds (the sum of FAILURE, SKIPPED,
EXCEPTION, RETRY, NO_RESULT).
"""
return sum([self.results[result] for result in
(FAILURE, SKIPPED, EXCEPTION, RETRY, NO_RESULT)])
def get_ptg_results(self):
"""The results (SUCCESS, WARNINGS, etc. builds) as percentage."""
if self.total == 0:
return dict([(result, 0) for result in self.results])
return dict([(result, n * 100. / self.total) for (result, n) in
self.results.items()])
def get_ptg_int_results(self):
"""The results (SUCCESS, WARNINGS, etc. builds) as percentage per each
time interval (if int_size>0).
"""
r = {
SUCCESS: [0] * self.int_no,
WARNINGS: [0] * self.int_no,
FAILURE: [0] * self.int_no,
}
for i in xrange(self.int_no):
total = self.int_total[i]
for result in self.results_int.keys():
n = self.results_int[result][i]
r[result][i] = n * 100. / total if total else 0
return r
def get_last_int_ptg_fail(self):
"""The percentage of all failing builds within the last interval size.
"""
return self.last_int_fail * 100. / self.last_int_total \
if self.last_int_total else 0
def get_ptg_busy(self):
"""The percentage of slave busy time."""
return self._d_sum * 100. / self.timeframe if self.timeframe else -1
def get_last_int_ptg_busy(self):
"""The percentage of slave busy time within the last interval size."""
return self.last_int_sum * 100. / self.last_int_size \
if self.last_int_size else 0
def to_dict(self, summary=False):
json_obj = {
'slave_id': self.slave_id,
'name': self.name,
'starttime': self.starttime,
'endtime': self.endtime,
'busy': self.get_ptg_busy(),
'busy_last_int_size': self.get_last_int_ptg_busy(),
'avg_build_duration': self.get_avg_duration(),
'total': self.total,
'results_success': self.results[SUCCESS],
'results_warnings': self.results[WARNINGS],
'results_failure_all': self.get_results_fail_all(),
'results_failure': self.results[FAILURE],
'results_skipped': self.results[SKIPPED],
'results_exception': self.results[EXCEPTION],
'results_retry': self.results[RETRY],
'int_size': self.int_size,
'last_int_size': self.last_int_size,
}
if not summary:
json_obj['builds'] = []
for build in self.builds:
json_obj['builds'].append(build.to_dict(summary=True))
return json_obj
class BuildersReport(Report):
"""Contains a summary Builder Report for each builder that had at least
one build within the specified timeframe.
"""
def __init__(self, starttime, endtime):
Report.__init__(self)
self.starttime = starttime
self.endtime = endtime
self.builders = {}
def add(self, build):
"""Update the report by adding a build to the corresponding Builder
Report.
"""
builder_name = build.builder_name
if builder_name not in self.builders:
self.builders[builder_name] = BuilderDetailsReport(self.starttime,
self.endtime, name=builder_name, summary=True)
self.builders[builder_name].add(build)
def to_dict(self, summary=False):
json_obj = {
'starttime': self.starttime,
'endtime': self.endtime,
'builders': [],
}
for builder in self.builders:
json_obj['builders'].append(builder.to_dict(summary=True))
return json_obj
class BuilderDetailsReport(Report):
def __init__(self, starttime, endtime, name=None, summary=False):
Report.__init__(self)
self.name = name
self.starttime = starttime
self.endtime = endtime
self.summary = summary
self.slaves = {}
# sum of all durations
self._d_sum = 0
# results
self.total = 0
self.results = {
NO_RESULT: 0,
SUCCESS: 0,
WARNINGS: 0,
FAILURE: 0,
SKIPPED: 0,
EXCEPTION: 0,
RETRY: 0,
}
def add(self, build):
"""Update the builder report by analyzing a build's properties."""
self.total += 1
# results
result = build.result if build.result != None else NO_RESULT
if result in self.results:
self.results[result] += 1
# sum durations
self._d_sum += build.duration
# report per slave
slave_id = build.slave_id
if slave_id not in self.slaves:
self.slaves[slave_id] = SlaveDetailsReport(self.starttime,
self.endtime, slave_id, name=build.slave_name, summary=True)
if not self.summary:
self.slaves[slave_id].add(build)
def get_avg_duration(self):
"""The average (mean) duration of the builds which run on this slave."""
return int(float(self._d_sum) / self.total) if self.total else 0
def get_results_fail_all(self):
"""The number of all failing builds (the sum of FAILURE, SKIPPED,
EXCEPTION, RETRY, NO_RESULT).
"""
return sum([self.results[result] for result in
(FAILURE, SKIPPED, EXCEPTION, RETRY, NO_RESULT)])
def get_ptg_results(self):
"""The results (SUCCESS, WARNINGS, etc. builds) as percentage."""
if self.total == 0:
return dict([(result, 0) for result in self.results])
return dict([(result, n * 100. / self.total)
for (result, n) in self.results.items()])
def to_dict(self, summary=False):
json_obj = {
'name': self.name,
'starttime': self.starttime,
'endtime': self.endtime,
'avg_build_duration': self.get_avg_duration(),
'total': self.total,
'results_success': self.results[SUCCESS],
'results_warnings': self.results[WARNINGS],
'results_failure_all': self.get_results_fail_all(),
'results_failure': self.results[FAILURE],
'results_skipped': self.results[SKIPPED],
'results_exception': self.results[EXCEPTION],
'results_retry': self.results[RETRY],
}
if not summary:
json_obj['slaves'] = []
for slave_id in self.slaves:
slave_obj = self.slaves[slave_id].to_dict(summary=True)
json_obj['slaves'].append(slave_obj)
return json_obj
class Build(object):
def __init__(self, slave_id=None, slave_name=None, result=None,
builder_id=None, builder_name=None, starttime=None, endtime=None):
self.slave_id = slave_id
self.slave_name = slave_name
self.result = result
self.builder_id = builder_id
self.builder_name = builder_name
self.starttime = time.mktime(starttime.timetuple()) \
if starttime else None
self.endtime = time.mktime(endtime.timetuple()) if endtime else None
# some endtimes are like 1970-01-01 00:00:01
self.duration = max(0, self.endtime - self.starttime
if self.starttime and self.endtime else 0)
def to_dict(self):
json_obj = {
'slave_id': self.slave_id,
'slave_name': self.slave_name,
'builder_id': self.builder_id,
'starttime': self.starttime if self.starttime else 0,
'endtime': self.endtime if self.endtime else 0,
'duration': self.duration,
'result': self.result,
}
return json_obj
|
def distance(strand_a, strand_b):
diffrent = 0
if not strand_a and not strand_b:
return 0
if not strand_a:
raise ValueError("The strand_a cannot be empty!")
if not strand_b:
raise ValueError("The strand_b cannot be empty!")
if len(strand_a) != len(strand_b):
raise ValueError("The strands must be equal!")
for index in range(len(strand_a)):
if strand_a[index] != strand_b[index]:
diffrent += 1
return diffrent
|
# Read files
# Using argv to read filename from users
#从sys module导入argv功能
from sys import argv
#利用argvuoqu用户输入它想要打开的文件名
script, filename = argv
# !!! Function: open (). Exp.: open(filename, mode ='r')
# mode = 'r' : open for reading
# mode = 'w' : open for writing, truncating the file first
# mode = 'x' : create a new file and open it for writing
# mode = 'a' : open for writing, appending to the end of the file if it exists
# mode = 'b' : binary Mode
# mode = 't' : text mode (default)
# mode = '+' : open a disk file for updating (reading and writing)
# Intact: open(filename, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None)
# 打开文件名为filename的文件,并把他赋值给txt变量
txt = open(filename)
#打印打开的文件名,并在字符串中插入变量
print(f"Here is your file {filename}:")
# 利用.read命令来读取变量txt的字符串
print(txt.read())
txt.close()
print(txt.read())
# Using input () to read filename
print("Type the filename again:")
#使用input 函数来让用户输入它想要打开的文件的名字,并把它赋值给变量
file_again = input(">")
#使用open 函数打开文件,赋值给变量
txt_again = open(file_again)
#读取txt_again的字符串内容,并打印
print(txt_again.read())
txt_again.close()
# Notes
# Step1: Using import argv or input() to understand which file the user want to open
# Steo2: Using open () to list the content of the file to a variable
# Step3: Using .read to read and display the content.
|
from __future__ import absolute_import
import collections
from django import template
from django.core.urlresolvers import reverse
from django.utils.encoding import force_unicode
from commis.search.forms import SearchForm
from commis.utils.deleted_objects import get_deleted_objects
register = template.Library()
@register.simple_tag(takes_context=True)
def commis_nav_item(context, name, view_name):
request = context['request']
url = reverse(view_name)
active = request.path_info.startswith(url)
return '<li%s><a href="%s">%s</a></li>'%(' class="active"' if active else '', url, name)
@register.inclusion_tag('commis/delete_confirmation.html', takes_context=True)
def commis_delete_confirmation(context, obj):
request = context['request']
opts = obj._meta
deleted_objects, perms_needed, protected = get_deleted_objects(obj, request)
return {
'object': obj,
'object_name': force_unicode(opts.verbose_name),
'deleted_objects': deleted_objects,
'perms_lacking': perms_needed,
'protected': protected,
'opts': opts,
}
@register.inclusion_tag('commis/_json.html')
def commis_json(name, obj):
return {
'name': name,
'obj': obj,
'count': 0,
}
@register.inclusion_tag('commis/_json_tree.html', takes_context=True)
def commis_json_tree(context, key, value, parent=0):
root_context = context.get('root_context', context)
root_dict = root_context.dicts[0]
root_dict['count'] += 1
return {
'root_context': root_context,
'name': context['name'],
'count': root_dict['count'],
'key': key,
'value': value,
'cur_count': root_dict['count'],
'parent': parent,
'is_dict': isinstance(value, collections.Mapping),
'is_list': isinstance(value, collections.Sequence) and not isinstance(value, basestring),
}
@register.simple_tag()
def commis_run_list_class(entry):
if entry.startswith('recipe['):
return 'ui-state-default'
elif entry.startswith('role['):
return 'ui-state-highlight'
raise ValueError('Unknown entry %s'%entry)
@register.simple_tag()
def commis_run_list_name(entry):
return entry.split('[', 1)[1].rstrip(']')
@register.inclusion_tag('commis/_header_search.html', takes_context=True)
def commis_header_search(context):
return {'form': SearchForm(size=20)}
|
## CONSTANT ##
FIN_IDX = 0
RSV_1_IDX = 0
RSV_2_IDX = 0
RSV_3_IDX = 0
OPCODE_IDX = 0
MASK_IDX = 1
PAYLOAD_LEN_IDX = 1
MASK_KEY_IDX = 0
PAYLOAD_IDX = 0
def parse_frame(frame):
## GET ALL FRAME DETAIL
fin = frame[FIN_IDX] >> 7
rsv1 = (frame[RSV_1_IDX] >> 6) & 0x01
rsv2 = (frame[RSV_2_IDX] >> 5) & 0x01
rsv3 = (frame[RSV_3_IDX] >> 4) & 0x01
opcode = frame[OPCODE_IDX] & 0x0f
mask = frame[MASK_IDX] >> 7
payload_len = frame[PAYLOAD_LEN_IDX] & 0x7f
if (payload_len < 126):
PAYLOAD_IDX = 2
if mask == 1:
MASK_KEY_IDX = 2
PAYLOAD_IDX += 4
elif (payload_len == 126):
PAYLOAD_IDX = 4
if mask == 1:
MASK_KEY_IDX = 4
PAYLOAD_IDX += 4
elif (payload_len == 127):
PAYLOAD_IDX = 10
if mask == 1:
MASK_KEY_IDX = 10
PAYLOAD_IDX += 4
## GET MASK KEY ##
mask_key = frame[MASK_KEY_IDX:MASK_KEY_IDX+4]
## GET PAYLOAD ##
payload = frame[PAYLOAD_IDX:]
result = {
"FIN" : fin,
"RSV1" :rsv1,
"RSV2" :rsv2,
"RSV3" :rsv3,
"OPCODE" : opcode,
"MASK" : mask,
"PAYLOAD_LEN" : payload_len,
"MASK_KEY" : mask_key,
"PAYLOAD" : payload
}
return result
def build_frame(fin, rsv1, rsv2, rsv3, opcode, mask, payload_len, mask_key,
payload):
# ADD FIRST 4-BIT
frame = (fin << 3) + (rsv1 << 2) + (rsv2 << 1) + rsv3
# Append OPCODE to FRAME
frame = (frame << 4) + opcode
# Append MASK to FRAME
frame = (frame << 1) + mask
# Append PAYLOAD_LEN to FRAME
if (len(payload) >= 0 and len(payload) <= 125):
frame = (frame << 7) + len(payload)
elif (len(payload) >= 126 and len(payload) <= 65535):
frame = (frame << 7) + 126
frame = (frame << 16) + len(payload)
elif (len(payload) >= 65536):
frame = (frame << 7) + 127
frame = (frame << 64) + len(payload)
# Append MASK_KEY to FRAME
if (mask == 1):
frame = (frame << 32) + int.from_bytes(mask_key, byteorder='big')
# Append PAYLOAD to FRAME
# frame = int_to_bytes((frame << len(payload)) + int.from_bytes(payload, byteorder='big'))
frame = int_to_bytes((frame << len(payload)*8) + int.from_bytes(payload, byteorder='big'))
return frame
def get_real_payload(mask, mask_key, payload):
if (mask == 1):
hasil = b''
for i in range(len(payload)):
hasil = hasil + int_to_bytes(payload[i] ^ mask_key[i % 4])
return hasil
elif (mask == 0):
return payload
else:
raise ValueError
def int_to_bytes(x):
return x.to_bytes((x.bit_length() + 7) // 8, 'big')
def build_packet_payload(payloads):
packet_payload = b''
for payload in payloads:
packet_payload += payload
return packet_payload
# print(build_packet_payload([b'\x01', b'\x01', b'\x01\x02']))
# print(get_real_payload(1, b'%r\x14d', b'D\x01|\rD\x13u\x14U\x02'))
# print(build_frame(1, 1, 1, 1 , 1, 0, 1, 1, bytes('', 'utf-8')))
|
from django.shortcuts import render, get_object_or_404
# Create your views here.
from .models import Specialmoments
def allmoments(request):
moments = Specialmoments.objects
return render(request, 'specialmoments/specialmoments.html', {'moments':moments})
def detail2(request, specialmoments_id):
detailmoments = get_object_or_404(Specialmoments, pk=specialmoments_id)
return render(request, 'specialmoments/detail2.html', {'specialmoments':detailmoments})
|
__author__ = ['sibirrer', 'ajshajib']
import time
import sys
import numpy as np
from lenstronomy.Sampling.Samplers.pso import ParticleSwarmOptimizer
from lenstronomy.Util import sampling_util
import emcee
from schwimmbad import choose_pool
class Sampler(object):
"""
class which executes the different sampling methods
Available are: MCMC with emcee and comsoHammer and a Particle Swarm Optimizer.
This are examples and depending on your problem, you might find other/better solutions.
Feel free to sample with your convenient sampler!
"""
def __init__(self, likelihoodModule):
"""
:param likelihoodModule: instance of LikelihoodModule class
"""
self.chain = likelihoodModule
self.lower_limit, self.upper_limit = self.chain.param_limits
def pso(self, n_particles, n_iterations, lower_start=None, upper_start=None,
threadCount=1, init_pos=None, mpi=False, print_key='PSO'):
"""
Return the best fit for the lens model on catalogue basis with
particle swarm optimizer.
"""
if lower_start is None or upper_start is None:
lower_start, upper_start = np.array(self.lower_limit), np.array(self.upper_limit)
print("PSO initialises its particles with default values")
else:
lower_start = np.maximum(lower_start, self.lower_limit)
upper_start = np.minimum(upper_start, self.upper_limit)
pool = choose_pool(mpi=mpi, processes=threadCount, use_dill=True)
if mpi is True and pool.is_master():
print('MPI option chosen for PSO.')
pso = ParticleSwarmOptimizer(self.chain.logL,
lower_start, upper_start, n_particles,
pool=pool)
if init_pos is None:
init_pos = (upper_start - lower_start) / 2 + lower_start
pso.set_global_best(init_pos, [0]*len(init_pos),
self.chain.logL(init_pos))
if pool.is_master():
print('Computing the %s ...' % print_key)
time_start = time.time()
result, [chi2_list, pos_list, vel_list] = pso.optimize(n_iterations)
if pool.is_master():
kwargs_return = self.chain.param.args2kwargs(result)
print(pso.global_best.fitness * 2 / (max(
self.chain.effective_num_data_points(**kwargs_return), 1)), 'reduced X^2 of best position')
print(pso.global_best.fitness, 'logL')
print(self.chain.effective_num_data_points(**kwargs_return), 'effective number of data points')
print(kwargs_return.get('kwargs_lens', None), 'lens result')
print(kwargs_return.get('kwargs_source', None), 'source result')
print(kwargs_return.get('kwargs_lens_light', None), 'lens light result')
print(kwargs_return.get('kwargs_ps', None), 'point source result')
print(kwargs_return.get('kwargs_special', None), 'special param result')
time_end = time.time()
print(time_end - time_start, 'time used for ', print_key)
print('===================')
return result, [chi2_list, pos_list, vel_list, []]
def mcmc_emcee(self, n_walkers, n_run, n_burn, mean_start, sigma_start, mpi=False, progress=False, threadCount=1):
"""
Run MCMC with emcee.
:param n_walkers:
:type n_walkers:
:param n_run:
:type n_run:
:param n_burn:
:type n_burn:
:param mean_start:
:type mean_start:
:param sigma_start:
:type sigma_start:
:param mpi:
:type mpi:
:param progress:
:type progress:
:param threadCount:
:type threadCount:
:return:
:rtype:
"""
num_param, _ = self.chain.param.num_param()
p0 = sampling_util.sample_ball(mean_start, sigma_start, n_walkers)
time_start = time.time()
pool = choose_pool(mpi=mpi, processes=threadCount)
sampler = emcee.EnsembleSampler(n_walkers, num_param, self.chain.logL,
pool=pool)
sampler.run_mcmc(p0, n_burn + n_run, progress=progress)
flat_samples = sampler.get_chain(discard=n_burn, thin=1, flat=True)
dist = sampler.get_log_prob(flat=True, discard=n_burn, thin=1)
if pool.is_master():
print('Computing the MCMC...')
print('Number of walkers = ', n_walkers)
print('Burn-in iterations: ', n_burn)
print('Sampling iterations:', n_run)
time_end = time.time()
print(time_end - time_start, 'time taken for MCMC sampling')
return flat_samples, dist
|
import tensorflow as tf
import numpy as np
import random
import networkx as nx
import scipy.io as sio
import os
import sys
import pickle
import walk
from configs import *
def get_batch(arr, n_seqs, n_steps):
n = int(arr.shape[0]/n_seqs) * n_seqs
nn = int(arr.shape[1]/n_steps) * n_steps
arr = arr[:n, :nn]
for n in range(0, arr.shape[0], n_seqs):
for nn in range(0, arr.shape[1], n_steps):
x = arr[n:n+n_seqs, nn:nn+n_steps]
y = np.zeros_like(x)
y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0]
yield x, y
def build_encode_arr(corpus, vocab_to_int):
encode_input = np.zeros([len(corpus), len(corpus[0])], dtype=np.int32)
for i, path in enumerate(corpus):
tmp = list(map(lambda x: vocab_to_int[x], path))
encode_input[i] = np.array(tmp, dtype=np.int32)
return encode_input
def input_layer(n_steps, n_seqs):
input = tf.placeholder(dtype=tf.int32, shape=(
n_seqs, n_steps), name='input')
targets = tf.placeholder(dtype=tf.int32, shape=(
n_seqs, n_steps), name='targets')
keep_prob = tf.placeholder(dtype=tf.float32, name='keep_prob')
return input, targets, keep_prob
def basic_cell(lstm_cell_num, keep_prob):
lstm = tf.contrib.rnn.LSTMCell(lstm_cell_num, initializer=tf.orthogonal_initializer, forget_bias=0.0)
return tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
def hidden_layer(lstm_cell_num, lstm_layer_num, n_seqs, keep_prob):
multi_lstm = tf.contrib.rnn.MultiRNNCell(
[basic_cell(lstm_cell_num, keep_prob) for _ in range(lstm_layer_num)])
initial_state = multi_lstm.zero_state(n_seqs, tf.float32)
return multi_lstm, initial_state
def output_layer(lstm_output, in_size, out_size):
out = tf.concat(lstm_output, 1)
x = tf.reshape(out, [-1, in_size])
with tf.variable_scope('softmax'):
W = tf.Variable(tf.truncated_normal([in_size, out_size], stddev=0.1))
b = tf.Variable(tf.zeros(out_size))
logits = tf.matmul(x, W) + b
prob_distrib = tf.nn.softmax(logits, name='predictions')
return logits, prob_distrib
def cal_loss(logits, targets, class_num):
y_one_hot = tf.one_hot(targets, class_num)
y = tf.reshape(y_one_hot, logits.get_shape())
loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y)
return tf.reduce_mean(loss)
def optimizer(loss, learning_rate, grad_clip):
with tf.name_scope('gradient'):
tvars = tf.trainable_variables()
unclip_grad = tf.gradients(loss, tvars)
grad, _ = tf.clip_by_global_norm(unclip_grad, grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
return train_op.apply_gradients(zip(grad, tvars))
class LSTM:
def __init__(self, class_num,
n_steps,
n_seqs,
lstm_cell_num,
lstm_layer_num,
learning_rate,
grad_clip,
d):
# tf.reset_default_graph()
# input layer
self.input, self.targets, self.keep_prob = input_layer(n_steps, n_seqs)
# lstm layer
lstm_cell, self.initial_state = hidden_layer(
lstm_cell_num,
lstm_layer_num,
n_seqs,
self.keep_prob)
with tf.variable_scope('embedding_layer'):
embedding = tf.get_variable(name='embedding',
shape=[class_num, d],
initializer=tf.glorot_normal_initializer())
x_input = tf.nn.embedding_lookup(embedding, self.input)
self.embedding = embedding
output, state = tf.nn.dynamic_rnn(
lstm_cell, x_input, initial_state=self.initial_state)
self.out = output
self.final_state = state
self.logits, self.pred = output_layer(output, lstm_cell_num, class_num)
with tf.name_scope('loss'):
self.loss = cal_loss(self.logits, self.targets, class_num)
self.optimizer = optimizer(self.loss, learning_rate, grad_clip)
class Lap:
def __init__(self, lr, class_num, d):
self.adj = tf.placeholder(dtype=tf.float32, name='adj')
self.index = tf.placeholder(dtype=tf.int32, name='index')
with tf.variable_scope('embedding_layer', reuse=True):
embedding = tf.get_variable(name='embedding',
shape=[class_num, d],
initializer=tf.random_uniform_initializer())
D = tf.diag(tf.reduce_sum(self.adj, 1))
L = D - self.adj
batch_emb = tf.nn.embedding_lookup(embedding, self.index)
with tf.name_scope('lap_loss'):
self.lap_loss = 2 * \
tf.trace(
tf.matmul(tf.matmul(tf.transpose(batch_emb), L), batch_emb))
tvars = tf.trainable_variables('embedding_layer')
grad = tf.gradients(self.lap_loss, tvars)
self.lap_optimizer = tf.train.RMSPropOptimizer(lr).apply_gradients(zip(grad, tvars))
if __name__ == '__main__':
if FLAGS.format == 'mat':
mat = sio.loadmat(FLAGS.input)['network']
G = nx.from_scipy_sparse_matrix(mat)
elif FLAGS.format == 'adjlist':
G = nx.read_adjlist(FLAGS.input)
elif FLAGS.format == 'edgelist':
G = nx.read_edgelist(FLAGS.input)
else:
raise Exception(
"Unkown file format:{}.Valid format is 'mat','adjlist','edgelist'".format(FLAGS.format))
mat = nx.to_scipy_sparse_matrix(G)
filename = os.path.split(FLAGS.input)[-1]
file = os.path.splitext(filename)[0]
picklename = '{}_degree_random_walk_corpus_n{}_p{}_sb{}.pkl'.format(
file, FLAGS.node_num, FLAGS.path_length, FLAGS.sb)
picklefile = 'tmp/' + picklename
if not os.path.exists(picklefile):
print('Start Walking...\n **********')
for edge in G.edges():
sim = min(G.degree(edge[0]), G.degree(edge[1])) / \
(max(G.degree(edge[0]), G.degree(edge[1]))+FLAGS.sb)
G[edge[0]][edge[1]]['weight'] *= sim
G_ = walk.Walk(G)
G_.preprocess_transition_probs()
corpus = G_.simulate_walks(FLAGS.node_num, FLAGS.path_length)
print('Making Pickle File...')
with open(picklefile, 'wb') as f:
pickle.dump(corpus, f)
else:
print('Find Pickle!')
with open(picklefile, 'rb') as f:
corpus = pickle.load(f)
vocab = list(G.nodes())
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
encode_arr_input = build_encode_arr(corpus, vocab_to_int)
lstm = LSTM(len(vocab), FLAGS.timesteps, FLAGS.sequences, FLAGS.hidden_size,
FLAGS.layer, FLAGS.lr, FLAGS.grad_clip, FLAGS.representation_size)
lap = Lap(FLAGS.lap_lr, len(vocab), FLAGS.representation_size)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
print('training..')
for i in range(FLAGS.epoches):
for j in range(FLAGS.lstm_epoches):
new_state = sess.run(lstm.initial_state, {
lstm.keep_prob: FLAGS.keep_prob})
for x, y in get_batch(encode_arr_input, FLAGS.sequences, FLAGS.timesteps):
feed_dict = {lstm.input: x, lstm.targets: y,
lstm.keep_prob: FLAGS.keep_prob, lstm.initial_state: new_state}
batch_loss, new_state, _ = sess.run(
[lstm.loss, lstm.final_state, lstm.optimizer], feed_dict=feed_dict)
for k in range(FLAGS.lap_epoches):
adj = mat.toarray()
for index in range(0, adj.shape[0], FLAGS.batches):
batch_adj = adj[index:index+FLAGS.batches,
index:index+FLAGS.batches]
feed_dict = {Lap.adj: batch_adj, Lap.index: np.arange(
adj.shape[0])[index:index+FLAGS.batches]}
lap_loss, _ = sess.run(
[lap.lap_loss, lap.lap_optimizer], feed_dict=feed_dict)
np.save(FLAGS.output, sess.run(lstm.embedding))
print('Done!')
print('Save the representation to {}'.format(FLAGS.output))
|
import ajustador as aju
from ajustador.helpers import save_params
from ajustador import drawing
import measurements1 as ms1
import os
#must be in current working directory for this import to work, else use exec
import params_fitness,fit_commands
# a. simplest approach is to use CAPOOL (vs CASHELL, and CASLAB for spines)
# b. no spines
# c. use ghk (and ghkkluge=0.35e-6) once that is working/implemented in moose
ghkkluge=1
modeltype='d1d2'
rootdir='/home/avrama/moose/SPN_opt/'
#use 1 and 3 for testing, 200 and 8 for optimization
generations=200
popsiz=8
seed=62938
#after generations, do 25 more at a time and test for convergence
test_size=25
################## neuron /data specific specifications #############
ntype='D2'
dataname='D2_051311'
exp_to_fit = ms1.D2waves051311[[8,17, 19, 22]] #0, 6 are hyperpol
dirname=dataname+'_pas2_'+str(seed)
if not dirname in os.listdir(rootdir):
os.mkdir(rootdir+dirname)
os.chdir(rootdir+dirname)
######## set up parameters and fitness
params1,fitness=params_fitness.params_fitness(morph_file,ntype,modeltype)
# set-up and do optimization
fit3,mean_dict3,std_dict3,CV3=fit_commands.fit_commands(dirname,exp_to_fit,modeltype,ntype,fitness,params1,generations,popsiz, seed, test_size)
#########look at results
drawing.plot_history(fit3, fit3.measurement)
#Save parameters of good results from end of optimization, and all fitness values
startgood=1000 #set to 0 to print all
threshold=0.8 #set to large number to print all
save_params.save_params(fit3, startgood, threshold)
#to save the fit object
#save_params.persist(fit3,'.')
|
class GitRepo(object):
def __init__(self, name, http_addr, ssh_addr):
self.name = name
self.http_addr = http_addr
self.ssh_addr = ssh_addr
def __str__(self):
return str(self.name) + '\n' + str(self.http_addr) + '\n' + str(self.ssh_addr)
|
"""Report NEM emissions intensity using NGER data.
Copyright (C) 2017 Ben Elliston <bje@air.net.au>
"""
import sys
import json
import argparse
import urllib2
import datetime
import pandas as pd
import numpy as np
ntndp = {'Broken Hill Gas Turbines': 0.93,
'Eraring Power Station': 0.88,
'Jeeralang "B" Power Station': 0.76,
'Lonsdale Power Station': 1.04,
'Swanbank E Gas Turbine': 0.36,
'Tarong Power Station': 0.84,
'Tarong North Power Station': 0.8,
'Tamar Valley Combined Cycled Power Station': 0.37,
'Tamar Valley Peaking Power Station': 0.63,
'Torrens Island Power Station "A"': 0.64,
'Torrens Island Power Station "B"': 0.58,
'Townsville Gas Turbine': 0.43,
'Wivenhoe Power Station No. 2 Pump': 0,
'Yarwun Power Station': 0.53}
access_token = 'INSERT-TOKEN-HERE'
argparser = argparse.ArgumentParser()
argparser.add_argument('-d', type=str, default='http://services.aremi.d61.io/aemo/v7/csv/all')
argparser.add_argument('-n', type=str, default='NGERS - Designated generation facility data 2015-16.csv')
argparser.add_argument('-p', type=str, default='http://pv-map.apvi.org.au/api/v1/data/today.json?access_token=%s' % access_token)
args = argparser.parse_args()
def apvi(url):
"""Fetch APVI data."""
urlobj = urllib2.urlopen(url)
data = json.load(urlobj)
output = data['output'][-2]
ts = pd.Timestamp(output['ts'])
now = pd.Timestamp(pd.datetime.utcnow()).tz_localize('UTC')
delta = pd.Timedelta(minutes=30)
assert (now - ts) < delta, "APVI data is stale"
return output
nger = pd.read_csv(args.n, sep=',')
dispatch = pd.read_csv(urllib2.urlopen(args.d), sep=',')
pvoutput = apvi(args.p)
print '%s,' % datetime.datetime.now().isoformat("T"),
for rgn in ['NSW1', 'QLD1', 'SA1', 'TAS1', 'VIC1', 'ALL']:
emissions = 0
if rgn == 'ALL':
total_output = pvoutput['nsw'] + pvoutput['qld'] + pvoutput['sa'] + \
pvoutput['tas'] + pvoutput['vic']
else:
total_output = pvoutput[rgn.rstrip('1').lower()]
dispatch2 = dispatch if rgn == 'ALL' else dispatch[dispatch['Region'] == rgn]
for row in dispatch2.itertuples():
station_name = row[1]
output_mw = row[2]
lat = row[-2]
lon = row[-1]
if np.isnan(output_mw) or output_mw < 1:
continue
else:
total_output += output_mw
if row[16] in ['Hydro', 'Wind', 'Biomass', 'Landfill / Biogas',
'Renewable/ Biomass / Waste', 'Solar', 'Landfill, Biogas', 'Landfill Gas']:
continue
if station_name in ntndp:
intensity = ntndp[station_name]
else:
selected = nger[np.logical_and(np.isclose(nger['Latitude'], lat),
np.isclose(nger['Longitude'], lon))]
if not selected:
print >>sys.stderr, 'Not matched', station_name, lat, lon
continue
elif len(selected) > 1:
# Take average if there are multiple NGER records
intensity = selected['Emission intensity (t/Mwh)'].mean()
else:
intensity = selected['Emission intensity (t/Mwh)'].iloc[0]
emissions += output_mw * intensity
print '%.3f,' % (emissions / total_output),
print
|
import os
# environment variables
TOKEN = os.environ["TOKEN"]
REDIS_URL = os.environ["REDIS_URL"]
DOCKER_PATH_TO_USER_DIR = os.environ["DOCKER_PATH_TO_USER_DIR"]
DOCKER_PATH_TO_QR_CODE = os.environ["DOCKER_PATH_TO_QR_CODE"]
SLACK_WEBHOOK = os.environ["SLACK_WEBHOOK"]
SLACK_CHANNEL = os.environ["SLACK_CHANNEL"]
SLACK_TOKEN = os.environ["SLACK_TOKEN"]
SLACK_API_FILE_UPLOAD_PATH = os.environ["SLACK_API_FILE_UPLOAD_PATH"]
CHROME_PATH = os.environ["CHROME_PATH"]
# REDIS QUEUE NAME
REDIS_QUEUE_NAME = "ws"
# logger levels
DEBUG = "debug"
INFO = "info"
WARN = "warn"
ERROR = "error"
# API routes
HEALTH_CHECK_ROUTE = "/internal/ping"
WHATSAPP_CHECK_ROUTE = "/ws/check/<string:phone_number>/<path:ret_url>"
WHATSAPP_MESSAGE_ROUTE = "/ws/message/<string:phone_number>/<string:message>/<path:ret_url>"
WHATSAPP_LOGIN_ROUTE = "/ws/login/<path:ret_url>"
WHATSAPP_LOGOUT_ROUTE = "/ws/logout/<path:ret_url>"
# API task status
API_SUCCESS = "success"
API_FAILED = "failed"
# API response messages
API_AUTH_FAILED = "Request rejected. Token incorrect or not provided"
API_PONG = "<html><body>pong</body></html>"
API_INVALID_PATH = "Invalid path"
# API service types
WHATSAPP_CHECK = "whatsapp check"
WHATSAPP_MESSAGE = "whatsapp message"
WHATSAPP_LOGIN = "whatsapp login"
WHATSAPP_LOGOUT = "whatsapp logout"
# Search status
TRUE = True
FALSE = False
NONE = None
# URLs
WHATSAPP_WEB_URL = "http://web.whatsapp.com"
WHATSAPP_CHECK_URL = "https://web.whatsapp.com/send?phone={}"
WHATSAPP_MESSAGE_URL = "https://web.whatsapp.com/send?phone={}&text={}&source=&data="
# log messages
DRIVER_INITIALIZED = "Webdriver initialized"
QR_CODE_SENT = "QR code sent through slack"
WHATSAPP_WEB_ERROR = "Whatsapp web error"
WHATSAPP_CHECK_INITIALIZED = "Whatsapp check initialized"
WHATSAPP_CHECK_NOT_LOGGED_IN = "Whatsapp Web not logged in, please scan qr code"
WHATSAPP_CHECK_COMPLETED = "Whatsapp check completed successfully"
WHATSAPP_CHECK_FAILED = "Whatsapp check failed, exception raised"
WHATSAPP_MESSAGE_INITIALIZED = "Whatsapp messaging initialized"
RECIPIENT_NO_NUMBER = "Recipient number does not have whatsapp"
WHATSAPP_MESSAGE_COMPLETED = "Message sent successfully"
WHATSAPP_MESSAGE_FAILED = "Whatsapp messaging failed, exception raised"
WHATSAPP_LOGIN_INITIALIZED = "Whatsapp login initialized"
ALREADY_LOGGED_IN = "Whatsapp already logged in"
WHATSAPP_LOGIN_COMPLETED = "Log in successful"
WHATSAPP_LOGIN_FAILED = "Whatsapp login failed, exception raised"
WHATSAPP_LOGOUT_INITIALIZED = "Whatsapp logout initialized"
ALREADY_LOGGED_OUT = "Currently not logged in to any account"
WHATSAPP_LOGOUT_COMPLETED = "Log out successful"
WHATSAPP_LOGOUT_FAILED = "Whatsapp logout failed, exception raised"
|
# encoding: utf-8
# Copyright 2013 maker
# License
"""
Identities Cron jobs
"""
from maker.identities import integration
def cron_integration():
"Run integration"
try:
integration.sync()
except:
pass
|
import numpy as np
from Layers.Base import BaseLayer
class SoftMax(BaseLayer):
# constructor
def __init__(self):
super().__init__()
# store input vector
self.input = None
self.y_pred = None
# initialize weights
self.weights = None
def forward(self, input_tensor):
input_tensor = input_tensor - np.max(input_tensor)
x_exp = np.exp(input_tensor)
partition = np.sum(x_exp, axis=1, keepdims=True)
out = x_exp / partition
# print(np.sum(out, axis=1))
self.y_pred = out
return out
# E(n-1) = yhat * (En - sum(En,j * yj_hat))
def backward(self, error_tensor):
prod = error_tensor * self.y_pred
sum = np.sum(prod, axis=1, keepdims=True)
# sum = np.expand_dims(sum, axis=1)
out = self.y_pred * (error_tensor - sum)
return out
|
"""Este programa simula um robô de serviços, num restaurante com uma mesa de forma, tamanho e posição
aleatórios. Quando o utilizador clica na área da mesa, o robô inicia o serviço para essa mesa,
consistindo numa ida à mesa para receber um pedido, regresso ao balcão para preparar o pedido,
entrega do pedido à mesa, e regresso ao balcão. O robô tem uma bateria, pelo que tem que
ir a uma Docstation carregar, quando deteta que não vai conseguir finalizar o serviço."""
from graphics import*
import random
import time
import math
import menu
n=0
class Balcao:
def __init__(self, win, ponto1, ponto2): #Define o balcao
self.ponto1=ponto1
self.ponto2=ponto2
self.balcao=Rectangle(ponto1, ponto2)
self.balcao.setFill('brown')
self.balcao.draw(win)
class Mesa:
def __init__(self): #Define a mesa
self.centroX=[] #Lista com as coordenadas X do centro da mesa
self.centroY=[] #Lista com as coordenadas Y do centro da mesa
self.semilado=[] #Lista com ao tamanho do raio/semilado da mesa
def desenhar(self, win):
self.forma=random.randint(0,1) #Os valores 0 e 1 determinam se a mesa é circular ou retangular, respetivamente
self.centroX.append(random.randint(30, 350)) #O X do centro varia entre 30 e 350
self.centroY.append(random.randint(30, 350)) #O Y do centro varia entre 30 e 350
for i in range (2):
self.semilado.append(random.randint(18, 40))
if self.forma==0: #Caso seja circular
self.mesa=Circle(Point(self.centroX[0], self.centroY[0]), self.semilado[1])
self.mesa.setFill('tan')
self.mesa.draw(win)
elif self.forma==1: #Caso seja retangular
self.mesa=Rectangle(Point(self.centroX[0]-self.semilado[0], self.centroY[0]-self.semilado[1]),\
Point(self.centroX[0]+self.semilado[0], self.centroY[0]+self.semilado[1]))
self.mesa.setFill('tan')
self.mesa.draw(win)
class Robot:
def __init__(self, win, centro, Robotraio): #Define o robot
self.centro=centro
self.Robotraio=Robotraio
self.robot=Circle(centro, Robotraio)
self.robot.setFill('black')
self.robot.draw(win)
self.contador=contador=0 #Marca um contador, estabelecido a 0
self.bateria=Circle(centro, Robotraio/3)
self.bateria.setFill('lime green')
self.bateria.draw(win)
def Carregar(self, lc, hc, cor, contador): #Define o movimento de ir carregar (pelo x - lc, pelo y - hc)
self.bateria.setFill(cor)
for i in range(1000):
self.robot.move(lc,hc)
self.bateria.move(lc,hc)
update(200)
self.contador=self.contador+math.fabs(lc)+math.fabs(hc)
def Servico(self, lm, hm, contador): #Define o movimento do serviço [pelo x - lm, pelo y - hm]
for i in range(1000):
self.robot.move(lm, hm)
self.bateria.move(lm, hm)
update(200)
self.contador=self.contador+math.fabs(lm*1000)+math.fabs(hm*1000)
def Deslocacao(self, Mesa): #Movimento
if self.contador+4*(math.sqrt((self.dx*1000)**2+(self.dy*1000)**2))>=3585:
self.Carregar(-375/1000, 0, 'red', self.contador) #Muda de cor ao ir carregar
self.Carregar(0, 40/1000, 'red', self.contador)
self.bateria.setFill('blue') #Muda de cor ao carregar
self.contador=0
time.sleep(2)
self.Carregar(0, -40/1000, 'lime green', self.contador) #Volta à cor original
self.Carregar(375/1000, 0, 'lime green', self.contador)
for i in range (2):
self.Servico(self.dx, self.dy, self.contador)
time.sleep(2)
self.Servico(-self.dx, -self.dy, self.contador)
time.sleep(2)
def Move (self, win, Mesa): #Define os vetores de movimento
mesa=Mesa
self.dx=(mesa.centroX[0]-self.centro.getX())/1000
self.dy=(mesa.centroY[0]-self.centro.getY()+mesa.semilado[1]+15)/1000
while n==0:
self.posicao=win.getMouse()
if mesa.forma==1: #Caso seja retangular
if mesa.centroX[0]-mesa.semilado[0]<=self.posicao.getX()<=mesa.centroX[0]+mesa.semilado[0] and\
mesa.centroY[0]-mesa.semilado[1]<=self.posicao.getY()<=mesa.centroY[0]+mesa.semilado[1]: #Percurso do robot
self.Deslocacao(Mesa)
if mesa.forma==0: #Caso seja circular
if math.sqrt((self.posicao.getX()-int(mesa.centroX[0]))**2+(self.posicao.getY()-int(mesa.centroY[0]))**2)<=int(mesa.semilado[1]):
self.Deslocacao(Mesa)
if 450<=self.posicao.getX()<=500 and 0<=self.posicao.getY()<=50: #Voltar ao menu
win.close()
menu.menu()
class Docstation:
def __init__(self, win, vertice): #Desenhar a Docstation
self.vertice=vertice
self.docstation=Rectangle(Point(0,500), vertice)
self.docstation.setFill('red')
self.docstation.draw(win)
Text(Point(50, 485), "Docstation").draw(win)
class Voltar:
def __init__(self, win): #Desenhar o botao para voltar ao menu
self.botao=Rectangle(Point(450, 0), Point(500, 50))
self.botao.draw(win)
Text(Point(475, 25), "Voltar").draw(win)
def terceiraA():
win = GraphWin("Restaurante", 750, 750)
win.setCoords(0, 0, 500, 500)
balcaoObj=Balcao(win, Point(350, 440), Point(500, 500))
docs=Docstation(win, Point(100, 450))
mesaObj=Mesa()
mesaObj.desenhar(win)
Voltar(win)
robotObj=Robot(win, Point(425, 425), 10)
robotObj.Move(win,mesaObj) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 14 15:31:37 2019
@author: daliana
"""
from keras.models import Model
from keras.layers import Input, concatenate, Convolution2D, MaxPooling2D, core, Conv2DTranspose
def Unet (nClasses = 3, input_width = 128 , input_height = 128 , nChannels = 16):
inputs = Input((input_height, input_width, nChannels))
# contracting path
conv1 = Convolution2D(32, (3, 3), activation='relu', padding='same')(inputs)
conv1 = Convolution2D(32, (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, (3, 3), activation='relu', padding='same')(pool1)
conv2 = Convolution2D(64, (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, (3, 3), activation='relu', padding='same')(pool2)
conv3 = Convolution2D(128, (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(128, (3, 3), activation='relu', padding='same')(pool3)
conv4 = Convolution2D(128, (3, 3), activation='relu', padding='same')(conv4)
# pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
#
# conv5 = Convolution2D(128, (3, 3), activation='relu', padding='same')(pool4)
# conv5 = Convolution2D(128, (3, 3), activation='relu', padding='same')(conv5)
# pool5 = MaxPooling2D(pool_size=(2, 2))(conv5)
#
# conv6 = Convolution2D(128, (3, 3), activation='relu', padding='same')(pool5)
# conv6 = Convolution2D(128, (3, 3), activation='relu', padding='same')(conv6)
# pool6 = MaxPooling2D(pool_size=(2, 2))(conv6)
#
# conv7 = Convolution2D(256, (3, 3), activation='relu', padding='same')(pool6)
# conv7 = Convolution2D(256, (3, 3), activation='relu', padding='same')(conv7)
# pool7 = MaxPooling2D(pool_size=(2, 2))(conv7)
#
# conv8 = Convolution2D(512, (3, 3), activation='relu', padding='same')(pool7)
# conv8 = Convolution2D(512, (3, 3), activation='relu', padding='same')(conv8)
# expansive path
# up1 = Conv2DTranspose(256, (3, 3), strides=(2, 2), padding='same') (conv8)
# merge1 = concatenate([conv7,up1], axis = 3)
# conv9 = Convolution2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge1)
# conv9 = Convolution2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
#
# up2 = Conv2DTranspose(128, (3, 3), strides=(2, 2), padding='same') (conv9)
# merge2 = concatenate([conv6,up2], axis = 3)
# conv10 = Convolution2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge2)
# conv10 = Convolution2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv10)
#
# up3 = Conv2DTranspose(128, (3, 3), strides=(2, 2), padding='same') (conv10)
# merge3 = concatenate([conv5,up3], axis = 3)
# conv11 = Convolution2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge3)
# conv11 = Convolution2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv11)
#
# up4 = Conv2DTranspose(128, (3, 3), strides=(2, 2), padding='same') (conv11)
# merge4 = concatenate([conv4,up4], axis = 3)
# conv12 = Convolution2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge4)
# conv12 = Convolution2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv12)
up5 = Conv2DTranspose(128, (3, 3), strides=(2, 2), padding='same') (conv4)
merge5 = concatenate([conv3,up5], axis = 3)
conv13 = Convolution2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge5)
conv13 = Convolution2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv13)
up6 = Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same') (conv13)
merge6 = concatenate([conv2,up6], axis = 3)
conv14 = Convolution2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv14 = Convolution2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv14)
up7 = Conv2DTranspose(32, (3, 3), strides=(2, 2), padding='same') (conv14)
merge7 = concatenate([conv1,up7], axis = 3)
conv15 = Convolution2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv15 = Convolution2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv15)
conv16 = Convolution2D(nClasses, (1, 1), activation='relu',padding='same')(conv15)
conv17 = core.Activation('softmax')(conv16)
model = Model(inputs, conv17)
return model
if __name__ == "__main__":
model = Unet()
model.summary() |
"""Handle Home Assistant requests."""
import logging
import os
from typing import Dict, Optional, Generator
import requests
_LOGGER = logging.getLogger(__name__)
class HomeAssistant:
"""Handle Home Assistant API requests."""
def __init__(self):
"""Initialize Home Assistant API."""
self.url = "http://hassio/homeassistant/api"
self.headers = {"Authorization": f"Bearer {os.environ.get('HASSIO_TOKEN')}"}
def send_stt(
self, data_gen: Generator[bytes, None, None]
) -> Optional[Dict[str, Optional[str]]]:
"""Send audio stream to STT handler."""
headers = {
**self.headers,
"X-Speech-Content": "format=wav; codec=pcm; sample_rate=16000; bit_rate=16; channel=1; language=en-US",
}
_LOGGER.info("Sending audio stream to Home Assistant STT")
req = requests.post(f"{self.url}/stt/cloud", data=data_gen, headers=headers)
if req.status_code != 200:
return None
return req.json()
def send_conversation(self, text: str) -> Optional[str]:
"""Send Conversation text to API."""
_LOGGER.info("Send text to Home Assistant conversation")
req = requests.post(
f"{self.url}/conversation/process",
json={"text": text, "conversation_id": "ada"},
headers=self.headers,
)
if req.status_code != 200:
return None
return req.json()
def send_tts(self, text: str) -> Optional[str]:
"""Send a text for TTS."""
_LOGGER.info("Send text to Home Assistant TTS")
req = requests.post(
f"{self.url}/tts_get_url",
json={"platform": "cloud", "message": text},
headers=self.headers,
)
if req.status_code != 200:
return None
return req.json()
|
import tkinter
from tkinter import messagebox
app = tkinter.Tk()
def Display():
messagebox.showinfo("Hi Everone ",message= "Need coffee urgently")
CLICK_BUTTON = tkinter.Button(app, text="HEY I AM A BUTTON", bg="red", command=Display)
CLICK_BUTTON.pack()
app.mainloop()
|
# (C) Copyright 2005-2023 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
from traits.api import Float, HasTraits, Trait
class Part(HasTraits):
cost = Trait(0.0)
class Widget(HasTraits):
part1 = Trait(Part)
part2 = Trait(Part)
cost = Float(0.0)
def __init__(self):
self.part1 = Part()
self.part2 = Part()
self.part1.on_trait_change(self.update_cost, "cost")
self.part2.on_trait_change(self.update_cost, "cost")
def update_cost(self):
self.cost = self.part1.cost + self.part2.cost
|
from flask_wtf import FlaskForm
from wtforms import SubmitField, RadioField
from flask_wtf.file import FileField, FileAllowed, FileRequired
class UploadForm(FlaskForm):
upload = FileField('Image', validators=[
FileRequired(),
FileAllowed(['jpg', 'png'], 'Images only!')
])
base = RadioField('Base', choices=[('Piano','Piano'),('Acoustic','Acoustic'), ("Electronic", "Pop/Electronic")])
submit = SubmitField("Submit") |
import os
import glob
import numpy as np
import lsst.afw.image as afwImage
from astropy.io import fits
import matplotlib.pyplot as plt
def blkavg(arr,x1,x2,y1,y2):
arr = arr[x1:x2,y1:y2]
arr = arr.reshape((arr.shape[0], -1, 1))
arr = np.mean(arr, axis=1)
return arr
def createFlat(flist):
tempFlats = []
for i in range(4):
temp = []
tmp1 = afwImage.ExposureF(flist[0+i])
tmp2 = afwImage.ExposureF(flist[7-i])
temp.append(tmp1.getMaskedImage().getImage().getArray())
temp.append(tmp2.getMaskedImage().getImage().getArray())
tempFlats.append(np.median(temp, axis = 0))
tempOff = tempFlats[0]
tempOffMask = tempFlats[1]
tempOnMask = tempFlats[2]
tempOn = tempFlats[3]
tempOnA = blkavg(tempOn, 0, 1024, 500, 600)
tempOnC = blkavg(tempOnMask, 0, 1024, 500, 600)
tempOnB = blkavg(tempOnMask, 0, 1024, 50, 150)
tempOnAC = tempOnA - tempOnC
tempOnACB = tempOnAC + tempOnB
print tempOnACB.shape
tempOn2D = np.repeat(tempOnACB, 1024, axis=1)
tempOnBias = tempOn - tempOn2D
tempOffA = blkavg(tempOff, 0, 1024, 500, 600)
tempOffC = blkavg(tempOffMask, 0, 1024, 500, 600)
tempOffB = blkavg(tempOffMask, 0, 1024, 50, 150)
tempOffAC = tempOffA - tempOffC
tempOffACB = tempOffAC + tempOffB
tempOff2D = np.repeat(tempOffACB, 1024, axis=1)
tempOffBias = tempOff - tempOff2D
flat = tempOnBias - tempOffBias
norm = np.median(flat)
flat = flat/norm
return flat
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Create a special flat for SOFI")
parser.add_argument("--inputdir", default=".", help="Input directory")
parser.add_argument("--outputdir", default=".", help="Output directory")
parser.add_argument("--prefix", default="FLAT_", help="Prefix")
args = parser.parse_args()
inputdir = args.inputdir
outputdir = args.outputdir
filename = args.prefix +"*.fits"
flist = glob.glob(os.path.join(inputdir, filename))
print flist
specialFlat = createFlat(flist)
hdu = fits.PrimaryHDU(specialFlat)
fn = args.prefix + ".fits"
hdu.writeto(os.path.join(outputdir,fn), clobber=True)
|
import requests
import time
from io import BytesIO
from PIL import Image
import os
import numpy as np
# 获取验证码的网址
CAPT_URL = "http://jwzx.usc.edu.cn/Core/verify_code.ashx"
# 验证码的保存路径
CAPT_PATH = "capt/"
if not os.path.exists(CAPT_PATH):
os.mkdir(CAPT_PATH)
# 将验证码转为灰度图时用到的"lookup table"
THRESHOLD = 165
LUT = [0]*THRESHOLD + [1]*(256 - THRESHOLD)
def capt_fetch():
"""
从网站获取验证码,将验证码转为Image对象
"""
# 从网站获取验证码
capt_raw = requests.get(CAPT_URL)
# 将二进制的验证码图片写入IO流
f = BytesIO(capt_raw.content)
# 将验证码转换为Image对象
capt = Image.open(f)
return capt
def capt_download():
"""
将Image类型的验证码对象保存到本地
"""
capt = capt_fetch()
capt.show()
text = input("请输入验证码中的字符:")
suffix = str(int(time.time() * 1e3))
capt.save(CAPT_PATH + text + "_" + suffix + ".png")
def capt_process(capt):
"""
图像预处理:将验证码图片转为二值型图片,按字符切割
:param capt: 验证码Image对象
:return capt_per_char_list: 一个数组包含四个元素,每个元素是一张包含单个字符的二值型图片
"""
capt_gray = capt.convert("L")
capt_bw = capt_gray.point(LUT, "1")
capt_per_char_list = []
for i in range(4):
x = i * 20
capt_per_char = capt_bw.crop((x, 0, x + 20, 24))
capt_per_char_list.append(capt_per_char)
return capt_per_char_list
def capt_inference(capt_per_char):
"""
提取图像特征
:param capt_per_char: 由单个字符组成的二值型图片
:return char_features:一个数组,包含 capt_per_char中字符的特征
"""
char_array = np.array(capt_per_char)
total_pixels = np.sum(char_array)
cols_pixels = np.sum(char_array, 0)
rows_pixels = np.sum(char_array, 1)
char_features = np.append(cols_pixels, rows_pixels)
char_features = np.append(total_pixels, char_features)
return char_features.tolist()
def train():
"""
将预分类的验证码图片集转化为字符特征训练集
:require capt_process(): 图像预处理
:require capt_inference(): 提取图像特征
:param:
:return train_table: 验证码字符特征训练集
:return train_labels: 验证码字符预分类结果
"""
files = os.listdir(CAPT_PATH)
train_table = []
train_labels = []
for f in files:
train_labels += list(f.split("_")[0])
capt = Image.open(CAPT_PATH + f)
capt_per_char_list = capt_process(capt)
for capt_per_char in capt_per_char_list:
char_features = capt_inference(capt_per_char)
train_table.append(char_features)
return train_table, train_labels
def nnc(train_table, test_vec, train_labels):
"""
Nearest Neighbour Classification(近邻分类法),
根据已知特征矩阵的分类情况,预测未分类的特征向量所属类别
:param train_table: 预分类的特征矩阵
:param test_vec: 特征向量, 长度必须与矩阵的列数相等
:param labels: 特征矩阵的类别向量
:return : 预测特征向量所属的类别
"""
dist_mat = np.square(np.subtract(train_table, test_vec))
dist_vec = np.sum(dist_mat, axis = 1)
pos = np.argmin(dist_vec)
return train_labels[pos]
def test():
"""
测试模型分类效果
:require capt_fetch(): 从网站获取验证码
:require capt_process(): 图像预处理
:require capt_inference(): 提取图像特征
:train_table, train_labels: train_table, train_labels = train()
:param:
:return capt: 验证码图片
:return test_labels: 验证码识别结果
"""
test_labels = []
capt = capt_fetch()
capt_per_char_list = capt_process(capt)
for capt_per_char in capt_per_char_list:
char_features = capt_inference(capt_per_char)
label = nnc(train_table, char_features, train_labels)
test_labels.append(label)
test_labels = "".join(test_labels)
capt.save('test.png')
return capt, test_labels
# 下载200张图片到本地
for i in range(200):
capt_download()
# 模型的训练与测试
train_table, train_labels = train()
test_capt, test_labels = test()
|
import imutils
from imutils import paths
import face_recognition
import pickle
import cv2
import os, sys, inspect
import numpy as np
'''Encoding a new face'''
def new_face(img_path, name):
#check if encoded pickle file exists, otherwise write to new file
if os.path.exists('encodings.pickle'):
data = pickle.loads(open('encodings.pickle', "rb").read())
knownData= data['encodings']
else:
knownData={}
#convert image
img = cv2.imread(img_path)
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
name = str(name) #Identification name
boxes = face_recognition.face_locations(rgb, model='hog')
encoding = face_recognition.face_encodings(rgb, boxes)
knownData[name]=encoding
#write encoding to pickle file
data = {"encodings":knownData}
f=open('encodings.pickle', "wb")
f.write(pickle.dumps(data))
f.close()
return
'''Loss function to compare difference between new image and reference image'''
def compute_loss2(y_truth, y_est):
value = np.sum(np.power(y_truth-y_est,2))/len(y_est) #check this
return value
'''Identifying someone from a given picture'''
def identify(img_path):
if not os.path.exists('encodings.pickle'):
return ("No faces stored in system")
data = pickle.loads(open('encodings.pickle', "rb").read())
knownData= data['encodings']
img = cv2.imread(img_path)
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
rgb = imutils.resize(img, width=750)
r = img.shape[1] / float(rgb.shape[1])
# detect the (x, y)-coordinates of the bounding boxes
# corresponding to each face in the input frame, then compute
# the facial embeddings for each face
boxes = face_recognition.face_locations(rgb, model = 'hog')
encodings = face_recognition.face_encodings(rgb, boxes)
names = []
scores = []
# loop over the facial encodings
for encoding in encodings:
# attempt to match each face in the input image to our known
# encodings
minDist = 100
for person in knownData:
value = compute_loss2(encoding, knownData[person])
if value < minDist:
minDist = value
identity = person
print(identity, minDist)
if minDist > 0.17:
identity = "Unknown"
# update the list of names
names.append(identity)
if identity is 'Unknown':
scores.append(minDist)
else:
scores.append(float(minDist))
#currently only returns identity of one person per image
return(names[0])
|
import os, inspect, glob, time
import numpy as np
PACK_PATH = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))+"/.."
class DataSet(object):
def __init__(self, key_tr):
print("\n** Prepare the Dataset")
self.data_path = os.path.join(PACK_PATH, "dataset")
self.symbols = ['_V_', '_F_', '_O_', '_!_', '_e_', '_j_', '_E_', '_P_', '_f_', '_p_', '_Q_']
self.subdir = glob.glob(os.path.join(self.data_path, "*"))
self.subdir.sort() # sorting the subdir list is optional.
for idx, sd in enumerate(self.subdir):
self.subdir[idx] = sd.split('/')[-1]
# List
self.key_tr = key_tr
self.key_tot = self.subdir
# Dictionary
self.list_total = {}
for ktot in self.key_tot:
self.list_total["%s" %(ktot)] = glob.glob(os.path.join(self.data_path, "%s" %(ktot), "*.npy"))
self.list_total["%s" %(ktot)].sort() # Must be sorted.
# Information of dataset
self.am_tot = len(self.subdir)
self.am_tr = len(self.key_tr)
self.am_each = len(self.list_total[self.subdir[0]])
self.data_dim = np.load(self.list_total[self.subdir[0]][0]).shape[0]
# Variable for using dataset
self.kidx_tr = 0
self.kidx_tot = 0
self.didx_tr = 0
self.didx_tot = 0
print("Total Record : %d" %(self.am_tot))
print("Trining Set : %d" %(self.am_tr))
print("Each record was parsed to %d sequence." %(self.am_each))
print("Each data has %d dimension." %(self.data_dim))
def next_batch(self, batch_size, sequence_length, v_key=None):
data_bat = np.zeros((0, sequence_length, self.data_dim), float)
data_bunch = np.zeros((0, self.data_dim), float)
if(v_key is None): # training batch
index_bank = self.didx_tr
while(True): # collect mini batch set
while(True): # collect sequence set
list_from_key = self.list_total[self.key_tr[self.kidx_tr]]
np_data = np.load(list_from_key[self.didx_tr])
self.didx_tr = self.didx_tr + 1
if(self.didx_tr > (self.am_each - sequence_length)):
self.kidx_tr = (self.kidx_tr + 1) % (self.am_tr)
self.didx_tr = 0
data_bunch = np.zeros((0, self.data_dim), float)
if(data_bunch.shape[0] >= sequence_length): # break the loop when sequences are collected
break
data_tmp = np_data.reshape((1, self.data_dim))
data_bunch = np.append(data_bunch, data_tmp, axis=0)
data_tmp = data_bunch.reshape((1, sequence_length, self.data_dim))
data_bat = np.append(data_bat, data_tmp, axis=0)
if(data_bat.shape[0] >= batch_size): # break the loop when mini batch is collected
break
self.didx_tr = (index_bank + 1) % (self.am_each - sequence_length + 1)
return np.nan_to_num(data_bat) # replace nan to zero using np.nan_to_num
else: # Usually used with 1 of the batch size.
list_seqname = [] # it used for confirm the anomaly.
index_bank = self.didx_tot
cnt_ansymb = 0
while(True): # collect sequence set
list_from_key = self.list_total[v_key]
list_seqname.append(list_from_key[self.didx_tot])
if(data_bunch.shape[0] >= sequence_length): # break the loop when sequences are collected
break
for symb in self.symbols: # count anomaly symbol (maximum 1 at one file).
if(symb in list_from_key[self.didx_tot]):
cnt_ansymb += 1
break
np_data = np.load(list_from_key[self.didx_tot])
self.didx_tot = self.didx_tot + 1
if(self.didx_tot > (self.am_each - sequence_length)):
print("Cannot make bunch anymore. (length %d at %d)" %(sequence_length, self.didx_tot))
self.didx_tot = 0
return None, None
data_tmp = np_data.reshape((1, self.data_dim))
data_bunch = np.append(data_bunch, data_tmp, axis=0)
data_tmp = data_bunch.reshape((1, sequence_length, self.data_dim))
data_bat = np.append(data_bat, data_tmp, axis=0)
self.didx_tot = (index_bank + 1) % (self.am_each - sequence_length + 1)
return np.nan_to_num(data_bat), list_seqname # replace nan to zero using np.nan_to_num
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for C{setup.py}, Twisted's distutils integration file.
"""
from __future__ import division, absolute_import
import os, sys
import twisted
from twisted.trial.unittest import SynchronousTestCase
from twisted.python.filepath import FilePath
from twisted.python.dist import getExtensions
# Get rid of the UTF-8 encoding and bytes topfiles segment when FilePath
# supports unicode. #2366, #4736, #5203. Also #4743, which requires checking
# setup.py, not just the topfiles directory.
if not FilePath(twisted.__file__.encode('utf-8')).sibling(b'topfiles').child(b'setup.py').exists():
sourceSkip = "Only applies to source checkout of Twisted"
else:
sourceSkip = None
class TwistedExtensionsTests(SynchronousTestCase):
if sourceSkip is not None:
skip = sourceSkip
def setUp(self):
"""
Change the working directory to the parent of the C{twisted} package so
that L{twisted.python.dist.getExtensions} finds Twisted's own extension
definitions.
"""
self.addCleanup(os.chdir, os.getcwd())
os.chdir(FilePath(twisted.__file__).parent().parent().path)
def test_initgroups(self):
"""
If C{os.initgroups} is present (Python 2.7 and Python 3.3 and newer),
L{twisted.python._initgroups} is not returned as an extension to build
from L{getExtensions}.
"""
extensions = getExtensions()
found = None
for extension in extensions:
if extension.name == "twisted.python._initgroups":
found = extension
if sys.version_info[:2] >= (2, 7):
self.assertIdentical(
None, found,
"Should not have found twisted.python._initgroups extension "
"definition.")
else:
self.assertNotIdentical(
None, found,
"Should have found twisted.python._initgroups extension "
"definition.")
|
from django.shortcuts import render, redirect
from django.contrib import messages
from .models import Contact
from .forms import ContactForm
from cart.models import Order
def refund_request(request):
if request.method == 'POST':
form = ContactForm(request.POST or None)
if form.is_valid():
email = request.POST.get("email")
order_code = request.POST.get("order_code")
subject = request.POST.get("subject")
message = request.POST.get("message")
order = Order.objects.filter(user= request.user , ordered=True, code=order_code)
if order.exists():
order = Order.objects.get(user= request.user , ordered=True, code=order_code)
contact = Contact(
user = request.user,
email = email,
order_code = order_code,
subject = subject,
message = message
)
contact.save()
order.refund_requested = True
order.save()
messages.info(request, 'thanks for contact us, we will response soon')
return redirect("/")
else:
messages.info(request, 'we dont have any orders with this code')
return redirect("contact:contact")
else:
messages.info(request, 'faild, try again')
return redirect("contact:contact")
else:
form = ContactForm()
template_name = 'contact/contact.html'
context ={}
return render(request, template_name, context)
|
from selenium import webdriver
from selenium.webdriver.support.ui import Select
link = "http://suninjuly.github.io/selects1.html"
browser = webdriver.Chrome()
browser.get(link)
# находим элементы
num1_in_text = browser.find_element_by_id("num1")
num2_in_text = browser.find_element_by_id("num2")
# вытаскиваем заданные числа
num1 = int(num1_in_text.text)
num2 = int(num2_in_text.text)
# находим и выбираем значение, которое является суммой этих 2 чисел
select = Select(browser.find_element_by_class_name("custom-select"))
select.select_by_value(str(num1 + num2))
# нажимаем на кнопку
send_button = browser.find_element_by_class_name("btn-default")
send_button.click()
|
# http://www.codechef.com/problems/GCD2
# Using Python here isn't cheating right? ;)
def gcd(a, b):
if b == 0:
return a
else:
return gcd(b, a%b)
t = int(raw_input())
while t:
line = raw_input().split()
a = int(line[0])
b = int(line[1])
print gcd(a, b)
t -= 1 |
from setuptools import setup
try:
from jupyterpip import cmdclass
except:
import pip, importlib
pip.main(['install', 'jupyter-pip']); cmdclass = importlib.import_module('jupyterpip').cmdclass
setup(
name='nbjsmol',
packages=['nbjsmol'],
# ... more setup.py stuff here ...
install_requires=["jupyter-pip"],
cmdclass=cmdclass('nbjsmol', 'nbjsmol/main'),
) |
import warnings
from copy import deepcopy
from dataclasses import make_dataclass, field
from typing import Type, Dict, List, Tuple, TypeVar, NewType, Union
import pydantic
from pydantic import BaseModel, create_model, root_validator, BaseConfig
from pydantic.dataclasses import dataclass as pydantic_dataclass
from sqlalchemy import inspect, PrimaryKeyConstraint
from sqlalchemy.orm import ColumnProperty
from .exceptions import MultipleSingleUniqueNotSupportedException, \
SchemaException, \
CompositePrimaryKeyConstraintNotSupportedException, \
MultiplePrimaryKeyNotSupportedException, \
ColumnTypeNotSupportedException, \
UnknownError, PrimaryMissing
import uuid
from typing import Optional
from fastapi import Body, Query
from sqlalchemy import UniqueConstraint
from .type import MatchingPatternInString, \
RangeFromComparisonOperators, \
Ordering, \
RangeToComparisonOperators, \
ExtraFieldTypePrefix, \
ExtraFieldType, \
ItemComparisonOperators
BaseModelT = TypeVar('BaseModelT', bound=BaseModel)
class OrmConfig(BaseConfig):
orm_mode = True
def _uuid_to_str(value, values):
if value is not None:
return str(value)
def _add_orm_model_config_into_pydantic_model(pydantic_model):
validators = pydantic_model.__validators__
field_definitions = {
name: (field.outer_type_, field.field_info.default)
for name, field in pydantic_model.__fields__.items()
}
return create_model(f'{pydantic_model.__name__}WithValidators',
**field_definitions,
__config__=OrmConfig,
__validators__=validators)
def _add_validators(model: Type[BaseModelT], validators) -> Type[BaseModelT]:
"""
Create a new BaseModel with the exact same fields as `model`
but making them all optional and no default
"""
config = model.Config
field_definitions = {
name: (field.outer_type_, field.field_info.default)
for name, field in model.__fields__.items()
}
return create_model(f'{model.__name__}WithValidators',
**field_definitions,
__config__=config,
__validators__={**validators})
def _model_from_dataclass(kls: 'StdlibDataclass') -> Type[BaseModel]:
"""Converts a stdlib dataclass to a pydantic BaseModel"""
return pydantic_dataclass(kls).__pydantic_model__
def _filter_out_none(_, values):
values_ = deepcopy(values)
for k, v in values_.items():
if v is None:
values.pop(k)
return values
def _original_data_to_alias(alias_name_dict):
def core(_, values):
for original_name, alias_name in alias_name_dict.items():
if original_name in values:
values[alias_name] = values.pop(original_name)
return values
return core
def _to_require_but_default(model: Type[BaseModelT]) -> Type[BaseModelT]:
"""
Create a new BaseModel with the exact same fields as `model`
but making them all require but there are default value
"""
config = model.Config
field_definitions = {}
for name, field in model.__fields__.items():
field_definitions[name] = (field.outer_type_, field.field_info.default)
return create_model(f'RequireButDefault{model.__name__}', **field_definitions,
__config__=config) # type: ignore[arg-type]
def _filter_none(request_or_response_object):
received_request = deepcopy(request_or_response_object.__dict__)
if 'insert' in received_request:
insert_item_without_null = []
for received_insert in received_request['insert']:
received_insert_ = deepcopy(received_insert)
for received_insert_item, received_insert_value in received_insert_.__dict__.items():
if hasattr(received_insert_value, '__module__'):
if received_insert_value.__module__ == 'fastapi.params' or received_insert_value is None:
delattr(received_insert, received_insert_item)
elif received_insert_value is None:
delattr(received_insert, received_insert_item)
insert_item_without_null.append(received_insert)
setattr(request_or_response_object, 'insert', insert_item_without_null)
else:
for name, value in received_request.items():
if hasattr(value, '__module__'):
if value.__module__ == 'fastapi.params' or value is None:
delattr(request_or_response_object, name)
elif value is None:
delattr(request_or_response_object, name)
class ApiParameterSchemaBuilder:
unsupported_data_types = ["BLOB"]
partial_supported_data_types = ["INTERVAL", "JSON", "JSONB"]
def __init__(self, db_model: Type, exclude_column=None):
if exclude_column is None:
self._exclude_column = []
else:
self._exclude_column = exclude_column
self.__db_model = db_model
self.alias_mapper: Dict[str, str] = self._alias_mapping_builder()
self.primary_key_str, self._primary_key_dataclass_model, self._primary_key_field_definition = self._extract_primary()
self.unique_fields: List[str] = self._extract_unique()
self.uuid_type_columns = []
self.str_type_columns = []
self.number_type_columns = []
self.datetime_type_columns = []
self.timedelta_type_columns = []
self.bool_type_columns = []
self.json_type_columns = []
self.array_type_columns = []
self.all_field = self._extract_all_field()
def _alias_mapping_builder(self) -> Dict[str, str]:
# extract all field and check the alias_name in info and build a mapping
# return dictionary
# key: original name
# value : alias name
alias_mapping = {}
mapper = inspect(self.__db_model)
for attr in mapper.attrs:
if isinstance(attr, ColumnProperty):
if attr.columns:
column, = attr.columns
if 'alias_name' in column.info:
name = column.info['alias_name']
alias_mapping[attr.key] = name
return alias_mapping
def _extract_unique(self) -> List[str]:
# get the unique columns with alias name
# service change alias to original
# handle:
# composite unique constraint
# single unique
# exception:
# use composite unique constraint if more than one column using unique
# can not use composite unique constraint and single unique at same time
#
unique_column_list = []
composite_unique_constraint = []
if hasattr(self.__db_model, '__table_args__'):
for constraints in self.__db_model.__table_args__:
if isinstance(constraints, UniqueConstraint):
for constraint in constraints:
column_name = constraint.key
if column_name in self.alias_mapper:
unique_column_name = self.alias_mapper[column_name]
else:
unique_column_name = column_name
composite_unique_constraint.append(unique_column_name)
mapper = inspect(self.__db_model)
for attr in mapper.attrs:
if isinstance(attr, ColumnProperty):
if attr.columns:
column, = attr.columns
if column.unique:
column_name = attr.key
if column_name in self.alias_mapper:
unique_column_name = self.alias_mapper[column_name]
else:
unique_column_name = column_name
unique_column_list.append(unique_column_name)
if unique_column_list and composite_unique_constraint:
invalid = set(unique_column_list) - set(composite_unique_constraint)
if invalid:
raise SchemaException("Use single unique constraint and composite unique constraint "
"at same time is not supported ")
if len(unique_column_list) > 1 and not composite_unique_constraint:
raise MultipleSingleUniqueNotSupportedException(
" In case you need composite unique constraint, "
"FastAPi CRUD builder is not support to define multiple unique=True "
"but specifying UniqueConstraint(…) in __table_args__."
f'''
__table_args__ = (
UniqueConstraint({''.join(unique_column_list)}),
)
''')
return unique_column_list or composite_unique_constraint
def _extract_primary(self) -> str:
# get the primary columns with alias
# handle:
# primary key
# exception:
# composite primary key constraint not supported
# can not more than one primary key
primary_columns_model = None
primary_field_definitions = {}
primary_column_name = None
primary = False
if hasattr(self.__db_model, '__table_args__'):
for constraints in self.__db_model.__table_args__:
if isinstance(constraints, PrimaryKeyConstraint):
raise CompositePrimaryKeyConstraintNotSupportedException(
'Primary Key Constraint not supported')
mapper = inspect(self.__db_model)
for attr in mapper.attrs:
if isinstance(attr, ColumnProperty):
if attr.columns:
column, = attr.columns
if column.primary_key:
if not primary:
column_type = str(column.type)
try:
python_type = column.type.python_type
if column_type in self.unsupported_data_types:
raise ColumnTypeNotSupportedException(
f'The type of column {attr.key} ({column_type}) not supported yet')
if column_type in self.partial_supported_data_types:
warnings.warn(
f'The type of column {attr.key} ({column_type}) '
f'is not support data query (as a query parameters )')
except NotImplementedError:
if column_type == "UUID":
python_type = uuid.UUID
else:
raise ColumnTypeNotSupportedException(
f'The type of column {attr.key} ({column_type}) not supported yet')
# handle if python type is UUID
if python_type.__name__ in ['str',
'int',
'float',
'Decimal',
'UUID',
'bool',
'date',
'time',
'datetime']:
column_type = python_type
else:
raise ColumnTypeNotSupportedException(
f'The type of column {attr.key} ({column_type}) not supported yet')
default = self._extra_default_value(column)
if default is ...:
warnings.warn(
f'The column of {attr.key} has not default value '
f'and it is not nullable but in exclude_list'
f'it may throw error when you write data through Fast-qucikcrud greated API')
if attr.key in self._exclude_column:
continue
column_name = attr.key
if column_name in self.alias_mapper:
primary_column_name = self.alias_mapper[column_name]
else:
primary_column_name = attr.key
primary_field_definitions = (primary_column_name, column_type, default)
primary_columns_model = make_dataclass('PrimaryKeyModel',
[(primary_field_definitions[0],
primary_field_definitions[1],
Query(primary_field_definitions[2]))],
namespace={
'__post_init__': lambda
self_object: self._value_of_list_to_str(
self_object,
self.uuid_type_columns)
})
primary = True
else:
raise MultiplePrimaryKeyNotSupportedException(
f'multiple primary key not supported; {str(mapper.mapped_table)} ')
if not primary_column_name:
raise PrimaryMissing("Primary key is required")
assert primary_column_name and primary_columns_model and primary_field_definitions
return primary_column_name, primary_columns_model, primary_field_definitions
@staticmethod
def _value_of_list_to_str(request_or_response_object, columns):
received_request = deepcopy(request_or_response_object.__dict__)
if isinstance(columns, str):
columns = [columns]
if 'insert' in request_or_response_object.__dict__:
insert_str_list = []
for insert_item in request_or_response_object.__dict__['insert']:
for column in columns:
for insert_item_column, _ in insert_item.__dict__.items():
if column in insert_item_column:
value_ = insert_item.__dict__[insert_item_column]
if value_ is not None:
if isinstance(value_, list):
str_value_ = [str(i) for i in value_]
else:
str_value_ = str(value_)
setattr(insert_item, insert_item_column, str_value_)
insert_str_list.append(insert_item)
setattr(request_or_response_object, 'insert', insert_str_list)
else:
for column in columns:
for received_column_name, received_column_value in received_request.items():
if column in received_column_name:
value_ = received_request[received_column_name]
if value_ is not None:
if isinstance(value_, list):
str_value_ = [str(i) for i in value_]
else:
str_value_ = str(value_)
setattr(request_or_response_object, received_column_name, str_value_)
@staticmethod
def _get_many_string_matching_patterns_description_builder():
return '''<br >Composite string field matching pattern<h5/>
<br /> Allow to select more than one pattern for string query
<br /> <a> https://www.postgresql.org/docs/9.3/functions-matching.html <a/>'''
@staticmethod
def _get_many_order_by_columns_description_builder(all_columns, regex_validation, primary_name):
return f'''<br> support column:
<br> {all_columns} <hr><br> support ordering:
<br> {list(map(str, Ordering))}
<hr>
<br> field input validation regex
<br> {regex_validation}
<hr>
<br />example:
<br />  {primary_name}:ASC
<br />  {primary_name}: DESC
<br />  {primary_name} : DESC
<br />  {primary_name} (default sort by ASC)'''
@staticmethod
def _extra_default_value(column):
if not column.nullable:
if column.default is not None:
default = column.default.arg
elif column.server_default is not None:
default = None
elif column.primary_key:
default = None
else:
default = ...
else:
if column.default is not None:
default = column.default.arg
else:
default = None
return default
def _extract_all_field(self) -> List[dict]:
fields: List[dict] = []
mapper = inspect(self.__db_model)
for attr in mapper.attrs:
if isinstance(attr, ColumnProperty):
if attr.columns:
column, = attr.columns
default = self._extra_default_value(column)
if attr.key in self._exclude_column:
continue
column_name = attr.key
if column_name in self.alias_mapper:
column_name = self.alias_mapper[column_name]
column_type = str(column.type)
try:
python_type = column.type.python_type
if column_type in self.unsupported_data_types:
raise ColumnTypeNotSupportedException(
f'The type of column {attr.key} ({column_type}) not supported yet')
if column_type in self.partial_supported_data_types:
warnings.warn(
f'The type of column {attr.key} ({column_type}) '
f'is not support data query (as a query parameters )')
except NotImplementedError:
if column_type == "UUID":
python_type = uuid.UUID
else:
raise ColumnTypeNotSupportedException(
f'The type of column {attr.key} ({column_type}) not supported yet')
# string filter
if python_type.__name__ in ['str']:
self.str_type_columns.append(column_name)
# uuid filter
elif python_type.__name__ in ['UUID']:
self.uuid_type_columns.append(column_name)
# number filter
elif python_type.__name__ in ['int', 'float', 'Decimal']:
self.number_type_columns.append(column_name)
# date filter
elif python_type.__name__ in ['date', 'time', 'datetime']:
self.datetime_type_columns.append(column_name)
# timedelta filter
elif python_type.__name__ in ['timedelta']:
self.timedelta_type_columns.append(column_name)
# bool filter
elif python_type.__name__ in ['bool']:
self.bool_type_columns.append(column_name)
# json filter
elif python_type.__name__ in ['dict']:
self.json_type_columns.append(column_name)
# array filter
elif python_type.__name__ in ['list']:
self.array_type_columns.append(column_name)
base_column_detail, = column.base_columns
if hasattr(base_column_detail.type, 'item_type'):
item_type = base_column_detail.type.item_type.python_type
fields.append({'column_name': column_name,
'column_type': List[item_type],
'column_default': default})
continue
else:
raise ColumnTypeNotSupportedException(
f'The type of column {attr.key} ({column_type}) not supported yet')
if column_type == "JSONB":
fields.append({'column_name': column_name,
'column_type': Union[python_type, list],
'column_default': default})
else:
fields.append({'column_name': column_name,
'column_type': python_type,
'column_default': default})
return fields
@staticmethod
def _assign_str_matching_pattern(field_of_param: dict, result_: List[dict]) -> List[dict]:
for i in [
{'column_name': field_of_param['column_name'] + ExtraFieldTypePrefix.Str + ExtraFieldType.Matching_pattern,
'column_type': Optional[List[MatchingPatternInString]],
'column_default': [MatchingPatternInString.case_sensitive]},
{'column_name': field_of_param['column_name'] + ExtraFieldTypePrefix.Str,
'column_type': Optional[List[field_of_param['column_type']]],
'column_default': None}
]:
result_.append(i)
return result_
@staticmethod
def _assign_list_comparison(field_of_param, result_: List[dict]) -> List[dict]:
for i in [
{
'column_name': field_of_param[
'column_name'] + f'{ExtraFieldTypePrefix.List}{ExtraFieldType.Comparison_operator}',
'column_type': Optional[ItemComparisonOperators],
'column_default': ItemComparisonOperators.In},
{'column_name': field_of_param['column_name'] + ExtraFieldTypePrefix.List,
'column_type': Optional[List[field_of_param['column_type']]],
'column_default': None}
]:
result_.append(i)
return result_
@staticmethod
def _assign_range_comparison(field_of_param, result_: List[dict]) -> List[dict]:
for i in [
{'column_name': field_of_param[
'column_name'] + f'{ExtraFieldTypePrefix.From}{ExtraFieldType.Comparison_operator}',
'column_type': Optional[RangeFromComparisonOperators],
'column_default': RangeFromComparisonOperators.Greater_than_or_equal_to},
{'column_name': field_of_param[
'column_name'] + f'{ExtraFieldTypePrefix.To}{ExtraFieldType.Comparison_operator}',
'column_type': Optional[RangeToComparisonOperators],
'column_default': RangeToComparisonOperators.Less_than.Less_than_or_equal_to},
]:
result_.append(i)
for i in [
{'column_name': field_of_param['column_name'] + ExtraFieldTypePrefix.From,
'column_type': Optional[NewType(ExtraFieldTypePrefix.From, field_of_param['column_type'])],
'column_default': None},
{'column_name': field_of_param['column_name'] + ExtraFieldTypePrefix.To,
'column_type': Optional[NewType(ExtraFieldTypePrefix.To, field_of_param['column_type'])],
'column_default': None}
]:
result_.append(i)
return result_
def _get_fizzy_query_param(self, exclude_column: List[str] = None) -> List[dict]:
if not exclude_column:
exclude_column = []
fields_: dict = deepcopy(self.all_field)
result = []
for field_ in fields_:
if field_['column_name'] in exclude_column:
continue
field_['column_default'] = None
if field_['column_name'] in self.str_type_columns:
result = self._assign_str_matching_pattern(field_, result)
result = self._assign_list_comparison(field_, result)
elif field_['column_name'] in self.uuid_type_columns or \
field_['column_name'] in self.bool_type_columns:
result = self._assign_list_comparison(field_, result)
elif field_['column_name'] in self.number_type_columns or \
field_['column_name'] in self.datetime_type_columns:
result = self._assign_range_comparison(field_, result)
result = self._assign_list_comparison(field_, result)
return result
def _assign_pagination_param(self, result_: List[dict]) -> List[dict]:
all_column_ = [i['column_name'] for i in self.all_field]
regex_validation = "(?=(" + '|'.join(all_column_) + r")?\s?:?\s*?(?=(" + '|'.join(
list(map(str, Ordering))) + r"))?)"
columns_with_ordering = pydantic.constr(regex=regex_validation)
for i in [
('limit', Optional[int], Query(None)),
('offset', Optional[int], Query(None)),
('order_by_columns', Optional[List[columns_with_ordering]], Query(
# [f"{self._primary_key}:ASC"],
None,
description=self._get_many_order_by_columns_description_builder(
all_columns=all_column_,
regex_validation=regex_validation,
primary_name=self.primary_key_str)))
]:
result_.append(i)
return result_
def upsert_one(self) -> Tuple:
request_validation = [lambda self_object: _filter_none(self_object)]
request_fields = []
response_fields = []
# Create on_conflict Model
all_column_ = [i['column_name'] for i in self.all_field]
conflict_columns = ('update_columns',
Optional[List[str]],
Body(set(all_column_) - set(self.unique_fields),
description='update_columns should contain which columns you want to update '
f'when the unique columns got conflict'))
conflict_model = make_dataclass('Upsert_one_request_update_columns_when_conflict_request_body_model',
[conflict_columns])
on_conflict_handle = [('on_conflict', Optional[conflict_model],
Body(None))]
# Create Request and Response Model
all_field = deepcopy(self.all_field)
for i in all_field:
request_fields.append((i['column_name'],
i['column_type'],
Body(i['column_default'])))
response_fields.append((i['column_name'],
i['column_type'],
Body(i['column_default'])))
# Ready the uuid to str validator
if self.uuid_type_columns:
request_validation.append(lambda self_object: self._value_of_list_to_str(self_object,
self.uuid_type_columns))
#
request_body_model = make_dataclass('Upsert_one_request_model',
request_fields + on_conflict_handle,
namespace={
'__post_init__': lambda self_object: [i(self_object)
for i in request_validation]
})
response_model_dataclass = make_dataclass('Upsert_one_response_model',
response_fields)
response_model_pydantic = _model_from_dataclass(response_model_dataclass)
response_model = _to_require_but_default(response_model_pydantic)
if self.alias_mapper and response_model:
validator_function = root_validator(pre=True, allow_reuse=True)(_original_data_to_alias(self.alias_mapper))
response_model = _add_validators(response_model, {"root_validator": validator_function})
return None, request_body_model, response_model
def upsert_many(self) -> Tuple:
insert_fields = []
response_fields = []
# Create on_conflict Model
all_column_ = [i['column_name'] for i in self.all_field]
conflict_columns = ('update_columns',
Optional[List[str]],
Body(set(all_column_) - set(self.unique_fields),
description='update_columns should contain which columns you want to update '
f'when the unique columns got conflict'))
conflict_model = make_dataclass('Upsert_many_request_update_columns_when_conflict_request_body_model',
[conflict_columns])
on_conflict_handle = [('on_conflict', Optional[conflict_model],
Body(None))]
# Ready the Request and Response Model
all_field = deepcopy(self.all_field)
for i in all_field:
insert_fields.append((i['column_name'],
i['column_type'],
field(default=Body(i['column_default']))))
response_fields.append((i['column_name'],
i['column_type'],
Body(i['column_default'])))
#
# # Ready uuid_to_str validator
# if self.uuid_type_columns:
# for uuid_name in self.uuid_type_columns:
# validator_function = validator(uuid_name, allow_reuse=True)(_uuid_to_str)
# request_validator_dict[f'{uuid_name}_validator'] = validator_function
#
# # Add filter out none field validator and uuid_to_str validaor
# request_validator_dict['root_validator'] = root_validator(allow_reuse=True)(
# _filter_out_none) # <- should be check none has filted and uuid is str
#
# insert_item_field = make_dataclass('UpsertManyInsertItemRequestModel',
# insert_fields
# )
# insert_item_field_model_pydantic = _model_from_dataclass(insert_item_field)
# insert_item_field_model_pydantic = _add_validators(insert_item_field_model_pydantic, request_validator_dict)
request_validation = [lambda self_object: _filter_none(self_object)]
if self.uuid_type_columns:
request_validation.append(lambda self_object: self._value_of_list_to_str(self_object,
self.uuid_type_columns))
insert_item_field_model_pydantic = make_dataclass('UpsertManyInsertItemRequestModel',
insert_fields
)
# Create List Model with contains item
insert_list_field = [('insert', List[insert_item_field_model_pydantic], Body(...))]
request_body_model = make_dataclass('UpsertManyRequestBody',
insert_list_field + on_conflict_handle
,
namespace={
'__post_init__': lambda self_object: [validator_(self_object)
for validator_ in
request_validation]}
)
response_model_dataclass = make_dataclass('UpsertManyResponseItemModel',
response_fields)
response_model_pydantic = _model_from_dataclass(response_model_dataclass)
response_item_model = _to_require_but_default(response_model_pydantic)
if self.alias_mapper and response_item_model:
validator_function = root_validator(pre=True, allow_reuse=True)(_original_data_to_alias(self.alias_mapper))
response_item_model = _add_validators(response_item_model, {"root_validator": validator_function})
response_model = create_model(
'UpsertManyResponseListModel',
**{'__root__': (List[response_item_model], None)}
)
return None, request_body_model, response_model
def find_many(self) -> Tuple:
query_param: List[dict] = self._get_fizzy_query_param()
query_param: List[dict] = self._assign_pagination_param(query_param)
response_fields = []
all_field = deepcopy(self.all_field)
for i in all_field:
response_fields.append((i['column_name'],
i['column_type'],
None))
# i['column_type']))
request_fields = []
for i in query_param:
if isinstance(i, Tuple):
request_fields.append(i)
elif isinstance(i, dict):
request_fields.append((i['column_name'],
i['column_type'],
Query(i['column_default'])))
else:
raise UnknownError(f'Unknown error, {i}')
request_validation = [lambda self_object: _filter_none(self_object)]
if self.uuid_type_columns:
request_validation.append(lambda self_object: self._value_of_list_to_str(self_object,
self.uuid_type_columns))
request_query_model = make_dataclass('FindManyRequestBody',
request_fields,
namespace={
'__post_init__': lambda self_object: [validator_(self_object)
for validator_ in
request_validation]}
)
response_model_dataclass = make_dataclass('FindManyResponseItemModel',
response_fields,
)
response_list_item_model = _model_from_dataclass(response_model_dataclass)
if self.alias_mapper and response_list_item_model:
validator_function = root_validator(pre=True, allow_reuse=True)(_original_data_to_alias(self.alias_mapper))
response_list_item_model = _add_validators(response_list_item_model, {"root_validator": validator_function})
response_list_item_model = _add_orm_model_config_into_pydantic_model(response_list_item_model)
response_model = create_model(
'FindManyResponseListModel',
**{'__root__': (List[response_list_item_model], None)}
)
return request_query_model, None, response_model
def find_one(self) -> Tuple:
query_param: List[dict] = self._get_fizzy_query_param(self.primary_key_str)
response_fields = []
all_field = deepcopy(self.all_field)
for i in all_field:
response_fields.append((i['column_name'],
i['column_type'],
Body(i['column_default'])))
request_fields = []
for i in query_param:
if isinstance(i, Tuple):
request_fields.append(i)
elif isinstance(i, dict):
request_fields.append((i['column_name'],
i['column_type'],
Query(i['column_default'])))
else:
raise UnknownError(f'Unknown error, {i}')
request_validation = [lambda self_object: _filter_none(self_object)]
if self.uuid_type_columns:
request_validation.append(lambda self_object: self._value_of_list_to_str(self_object,
self.uuid_type_columns))
request_query_model = make_dataclass('FindOneRequestBody',
request_fields,
namespace={
'__post_init__': lambda self_object: [validator_(self_object)
for validator_ in
request_validation]
}
)
response_model_dataclass = make_dataclass('FindOneResponseModel',
response_fields,
namespace={
'__post_init__': lambda self_object: [validator_(self_object)
for validator_ in
request_validation]}
)
response_model = _model_from_dataclass(response_model_dataclass)
if self.alias_mapper and response_model:
validator_function = root_validator(pre=True, allow_reuse=True)(_original_data_to_alias(self.alias_mapper))
response_model = _add_validators(response_model, {"root_validator": validator_function})
response_model = _add_orm_model_config_into_pydantic_model(response_model)
return self._primary_key_dataclass_model, request_query_model, None, response_model
def delete_one(self) -> Tuple:
query_param: List[dict] = self._get_fizzy_query_param(self.primary_key_str)
response_fields = []
all_field = deepcopy(self.all_field)
for i in all_field:
response_fields.append((i['column_name'],
i['column_type'],
Body(i['column_default'])))
request_fields = []
for i in query_param:
if isinstance(i, Tuple):
request_fields.append(i)
elif isinstance(i, dict):
request_fields.append((i['column_name'],
i['column_type'],
Query(i['column_default'])))
else:
raise UnknownError(f'Unknown error, {i}')
request_validation = [lambda self_object: _filter_none(self_object)]
if self.uuid_type_columns:
request_validation.append(lambda self_object: self._value_of_list_to_str(self_object,
self.uuid_type_columns))
response_validation = [lambda self_object: self._value_of_list_to_str(self_object,
self.uuid_type_columns)]
request_query_model = make_dataclass('DeleteOneRequestBody',
request_fields,
namespace={
'__post_init__': lambda self_object: [validator_(self_object)
for validator_ in
request_validation]
}
)
response_model = make_dataclass('DeleteOneResponseModel',
[(self._primary_key_field_definition[0],
self._primary_key_field_definition[1],
...)],
namespace={
'__post_init__': lambda self_object: [validator_(self_object)
for validator_ in
response_validation]}
)
response_model = _model_from_dataclass(response_model)
return self._primary_key_dataclass_model, request_query_model, None, response_model
def delete_many(self) -> Tuple:
query_param: List[dict] = self._get_fizzy_query_param()
response_fields = []
all_field = deepcopy(self.all_field)
for i in all_field:
response_fields.append((i['column_name'],
i['column_type'],
Body(i['column_default'])))
request_fields = []
for i in query_param:
if isinstance(i, Tuple):
request_fields.append(i)
elif isinstance(i, dict):
request_fields.append((i['column_name'],
i['column_type'],
Query(i['column_default'])))
else:
raise UnknownError(f'Unknown error, {i}')
request_validation = [lambda self_object: _filter_none(self_object)]
if self.uuid_type_columns:
request_validation.append(lambda self_object: self._value_of_list_to_str(self_object,
self.uuid_type_columns))
response_validation = [lambda self_object: self._value_of_list_to_str(self_object,
self.uuid_type_columns)]
request_query_model = make_dataclass('DeleteManyRequestBody',
request_fields,
namespace={
'__post_init__': lambda self_object: [validator_(self_object)
for validator_ in
request_validation]
}
)
response_model = make_dataclass('DeleteManyResponseModel',
[(self._primary_key_field_definition[0],
self._primary_key_field_definition[1],
...)],
namespace={
'__post_init__': lambda self_object: [validator_(self_object)
for validator_ in
response_validation]}
)
response_model = _model_from_dataclass(response_model)
response_model = create_model(
'DeleteManyResponseListModel',
**{'__root__': (List[response_model], None)}
)
return None, request_query_model, None, response_model
def patch(self) -> Tuple:
query_param: List[dict] = self._get_fizzy_query_param(self.primary_key_str)
response_fields = []
all_field = deepcopy(self.all_field)
request_body_fields = []
for i in all_field:
response_fields.append((i['column_name'],
i['column_type'],
Body(i['column_default'])))
if i['column_name'] is not self.primary_key_str:
request_body_fields.append((i['column_name'],
i['column_type'],
Body(None)))
request_query_fields = []
for i in query_param:
if isinstance(i, dict):
request_query_fields.append((i['column_name'],
i['column_type'],
Query(i['column_default'])))
else:
raise UnknownError(f'Unknown error, {i}')
request_validation = [lambda self_object: _filter_none(self_object)]
if self.uuid_type_columns:
request_validation.append(lambda self_object: self._value_of_list_to_str(self_object,
self.uuid_type_columns))
request_query_model = make_dataclass('PatchOneRequestQueryBody',
request_query_fields,
namespace={
'__post_init__': lambda self_object: [validator_(self_object)
for validator_ in
request_validation]
}
)
request_body_model = make_dataclass('PatchOneRequestBodyBody',
request_body_fields,
namespace={
'__post_init__': lambda self_object: [validator_(self_object)
for validator_ in
request_validation]
}
)
response_model_dataclass = make_dataclass('PatchOneResponseModel',
response_fields,
namespace={
'__post_init__': lambda self_object: [validator_(self_object)
for validator_ in
request_validation]}
)
response_model = _model_from_dataclass(response_model_dataclass)
if self.alias_mapper and response_model:
validator_function = root_validator(pre=True, allow_reuse=True)(_original_data_to_alias(self.alias_mapper))
response_model = _add_validators(response_model, {"root_validator": validator_function})
return self._primary_key_dataclass_model, request_query_model, request_body_model, response_model
def update_one(self) -> Tuple:
query_param: List[dict] = self._get_fizzy_query_param(self.primary_key_str)
response_fields = []
all_field = deepcopy(self.all_field)
request_body_fields = []
for i in all_field:
response_fields.append((i['column_name'],
i['column_type'],
Body(i['column_default'])))
if i['column_name'] is not self.primary_key_str:
request_body_fields.append((i['column_name'],
i['column_type'],
Body(...)))
request_query_fields = []
for i in query_param:
# if isinstance(i, Tuple):
# request_query_fields.append(i)
# request_body_fields.append()
if isinstance(i, dict):
request_query_fields.append((i['column_name'],
i['column_type'],
Query(i['column_default'])))
else:
raise UnknownError(f'Unknown error, {i}')
request_validation = [lambda self_object: _filter_none(self_object)]
if self.uuid_type_columns:
request_validation.append(lambda self_object: self._value_of_list_to_str(self_object,
self.uuid_type_columns))
request_query_model = make_dataclass('UpdateOneRequestQueryBody',
request_query_fields,
namespace={
'__post_init__': lambda self_object: [validator_(self_object)
for validator_ in
request_validation]
}
)
request_body_model = make_dataclass('UpdateOneRequestBodyBody',
request_body_fields,
namespace={
'__post_init__': lambda self_object: [validator_(self_object)
for validator_ in
request_validation]
}
)
response_model_dataclass = make_dataclass('UpdateOneResponseModel',
response_fields,
namespace={
'__post_init__': lambda self_object: [validator_(self_object)
for validator_ in
request_validation]}
)
response_model = _model_from_dataclass(response_model_dataclass)
if self.alias_mapper and response_model:
validator_function = root_validator(pre=True, allow_reuse=True)(_original_data_to_alias(self.alias_mapper))
response_model = _add_validators(response_model, {"root_validator": validator_function})
return self._primary_key_dataclass_model, request_query_model, request_body_model, response_model
def update_many(self) -> Tuple:
'''
In update many, it allow you update some columns into the same value in limit of a scope,
you can get the limit of scope by using request query.
And fill out the columns (except the primary key column and unique columns) you want to update
and the update value in the request body
The response will show you the update result
:return: url param dataclass model
'''
query_param: List[dict] = self._get_fizzy_query_param()
response_fields = []
all_field = deepcopy(self.all_field)
request_body_fields = []
for i in all_field:
response_fields.append((i['column_name'],
i['column_type'],
Body(i['column_default'])))
if i['column_name'] not in [self.primary_key_str]:
request_body_fields.append((i['column_name'],
i['column_type'],
Body(...)))
request_query_fields = []
for i in query_param:
# if isinstance(i, Tuple):
# request_query_fields.append(i)
# request_body_fields.append()
if isinstance(i, dict):
request_query_fields.append((i['column_name'],
i['column_type'],
Query(i['column_default'])))
else:
raise UnknownError(f'Unknown error, {i}')
request_validation = [lambda self_object: _filter_none(self_object)]
if self.uuid_type_columns:
request_validation.append(lambda self_object: self._value_of_list_to_str(self_object,
self.uuid_type_columns))
request_query_model = make_dataclass('UpdateManyRequestQueryBody',
request_query_fields,
namespace={
'__post_init__': lambda self_object: [validator_(self_object)
for validator_ in
request_validation]
}
)
request_body_model = make_dataclass('UpdateManyRequestBodyBody',
request_body_fields,
namespace={
'__post_init__': lambda self_object: [validator_(self_object)
for validator_ in
request_validation]
}
)
response_model_dataclass = make_dataclass('UpdateManyResponseModel',
response_fields,
)
response_model_pydantic = _model_from_dataclass(response_model_dataclass)
if self.alias_mapper and response_model_dataclass:
validator_function = root_validator(pre=True, allow_reuse=True)(_original_data_to_alias(self.alias_mapper))
response_model_pydantic = _add_validators(response_model_pydantic, {"root_validator": validator_function})
response_model = create_model(
'UpdateManyResponseListModel',
**{'__root__': (List[response_model_pydantic], None)}
)
return None, request_query_model, request_body_model, response_model
def patch_many(self) -> Tuple:
'''
In update many, it allow you update some columns into the same value in limit of a scope,
you can get the limit of scope by using request query.
And fill out the columns (except the primary key column and unique columns) you want to update
and the update value in the request body
The response will show you the update result
:return: url param dataclass model
'''
query_param: List[dict] = self._get_fizzy_query_param()
response_fields = []
all_field = deepcopy(self.all_field)
request_body_fields = []
for i in all_field:
response_fields.append((i['column_name'],
i['column_type'],
Body(i['column_default'])))
if i['column_name'] not in [self.primary_key_str]:
request_body_fields.append((i['column_name'],
i['column_type'],
Body(None)))
request_query_fields = []
for i in query_param:
if isinstance(i, dict):
request_query_fields.append((i['column_name'],
i['column_type'],
Query(i['column_default'])))
else:
raise UnknownError(f'Unknown error, {i}')
request_validation = [lambda self_object: _filter_none(self_object)]
if self.uuid_type_columns:
request_validation.append(lambda self_object: self._value_of_list_to_str(self_object,
self.uuid_type_columns))
request_query_model = make_dataclass('PatchManyRequestQueryBody',
request_query_fields,
namespace={
'__post_init__': lambda self_object: [validator_(self_object)
for validator_ in
request_validation]
}
)
request_body_model = make_dataclass('PatchManyRequestBodyBody',
request_body_fields,
namespace={
'__post_init__': lambda self_object: [validator_(self_object)
for validator_ in
request_validation]
}
)
response_model_dataclass = make_dataclass('PatchManyResponseModel',
response_fields,
namespace={
'__post_init__': lambda self_object: [validator_(self_object)
for validator_ in
request_validation]}
)
response_model_pydantic = _model_from_dataclass(response_model_dataclass)
if self.alias_mapper and response_model_dataclass:
validator_function = root_validator(pre=True, allow_reuse=True)(_original_data_to_alias(self.alias_mapper))
response_model_pydantic = _add_validators(response_model_pydantic, {"root_validator": validator_function})
response_model = create_model(
'PatchManyResponseListModel',
**{'__root__': (List[response_model_pydantic], None)}
)
return None, request_query_model, request_body_model, response_model
def post_redirect_get(self) -> Tuple:
request_validation = [lambda self_object: _filter_none(self_object)]
request_body_fields = []
response_body_fields = []
# Create Request and Response Model
all_field = deepcopy(self.all_field)
for i in all_field:
request_body_fields.append((i['column_name'],
i['column_type'],
Body(i['column_default'])))
response_body_fields.append((i['column_name'],
i['column_type'],
Body(i['column_default'])))
# Ready the uuid to str validator
if self.uuid_type_columns:
request_validation.append(lambda self_object: self._value_of_list_to_str(self_object,
self.uuid_type_columns))
#
request_body_model = make_dataclass('PostAndRedirectRequestModel',
request_body_fields,
namespace={
'__post_init__': lambda self_object: [i(self_object)
for i in request_validation]
})
response_model_dataclass = make_dataclass('PostAndRedirectResponseModel',
response_body_fields)
response_model = _model_from_dataclass(response_model_dataclass)
if self.alias_mapper and response_model:
validator_function = root_validator(pre=True, allow_reuse=True)(_original_data_to_alias(self.alias_mapper))
response_model = _add_validators(response_model, {"root_validator": validator_function})
return None, request_body_model, response_model
|
import setuptools
name = "bdendro"
variables = {}
with open("{}/version.py".format(name), mode="r") as f:
exec(f.read(), variables)
version = variables["__version__"]
setuptools.setup(
name=name,
version=version,
packages=setuptools.find_packages(),
python_requires=">= 3.6",
install_requires=["bokeh >= 1.1.0",
"numpy >= 1.10.1",
"scipy >= 1.0.0"],
extra_requires={"doc": ["bokeh >= 2.0.0",
"selenium",
"Sphinx >= 2.0.0",
"sphinxcontrib-fulltoc >= 1.2.0"]},
description="Bokeh helpers for visualizing a dendrogram.",
author="naubuan",
url="https://github.com/naubuan/bdendro",
classifiers=["License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineearing :: Visualization"])
|
import random as rnd
import math as ma
from scipy import stats
m = 10000
aleatorios = []
for i in range(m):
aleatorios.append(rnd.random())
subcadenas = []
for i in range(0,m-1):
if aleatorios[i] < aleatorios[i+1]:
subcadenas.append('+')
else:
subcadenas.append('-')
#print(aleatorios,subcadenas)
total_cadenas = 1
ini = subcadenas[0]
for i in range(0,len(subcadenas)-1):
if ini != subcadenas[i+1]:
total_cadenas = total_cadenas + 1
ini = subcadenas[i+1]
print("Total Cadenas: ",total_cadenas)
media = (2*m - 1)/3
varianza = (16*m - 29)/90
desvio = ma.sqrt(varianza)
print("media: ",media)
print("varianza: ",varianza)
print("desvio: ",desvio)
#calculo la Z
z_calculado = ma.fabs((total_cadenas - media)/desvio)
print("Z calculado: ",z_calculado)
alpha = 0.05
c = 1-(alpha/2)
#Z_1-alpha/2
z_alpha2 = stats.norm.ppf(c)
print("Z en 1 - (alpha/2): ",z_alpha2)
if z_calculado <= z_alpha2:
print("La secuencia de numeros es independiente y por lo tanto la secuencia es aleatoria")
else:
print("La secuencia de numeros NO es Independiente y por lo tanto la secuencia NO es aleatoria")
|
# -*- coding: utf-8 -*-
import sys,math
a=open(sys.argv[1]).read()
b=open(sys.argv[2], 'w')
alp=' აბგდევზთიკლმნოპჟრსტუფქღყშჩცძწჭხჯჰ'
singles={}
couples={}
for x in alp:
singles[x]=0
for y in alp:
couples[x+y]=0
for x in range(len(a)-1):
singles[a[x]]+=1/len(a)
couples[a[x:x+2]]+=1/(len(a)-1)
singles[a[len(a)-1]]+=1/len(a)
entSin=format(-sum([singles[x]*math.log2(singles[x]) if singles[x]>0 else 0 for x in singles]), '.7f')
entCou=format(-sum([couples[x]*math.log2(couples[x]) if couples[x]>0 else 0 for x in couples]), '.7f')
b.write(entSin+'\n'+entCou+'\n'+format(float(entCou)-float(entSin), '.7f'))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-02 22:40
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Advice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pair', models.CharField(max_length=30)),
('position', models.CharField(max_length=30)),
('diff', models.FloatField(default=0)),
('price', models.FloatField()),
('tweet', models.CharField(max_length=280)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Session',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('RUNNING', 'Running'), ('ENDED', 'Ended')], max_length=25)),
('ma1', models.PositiveSmallIntegerField()),
('ma2', models.PositiveSmallIntegerField()),
('data_range', models.CharField(default='3h', max_length=25)),
('data_group', models.CharField(default='1m', max_length=25)),
('data_interval', models.CharField(default='10', max_length=25)),
('btc_balance_at_start', models.FloatField(default=0)),
('euro_balance_at_start', models.FloatField(default=0)),
('btc_balance', models.FloatField(default=0)),
('euro_balance', models.FloatField(default=0)),
('started_at', models.DateTimeField(null=True)),
('ended_at', models.DateTimeField(null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Trade',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_id', models.IntegerField(default=0)),
('amount', models.FloatField()),
('price', models.FloatField()),
('total', models.FloatField()),
('fee', models.FloatField()),
('type', models.CharField(choices=[('BUY', 'Buy'), ('SELL', 'Sell')], max_length=25)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('session', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, related_name='trades', to='trader.Session')),
],
),
]
|
from chem21repo.api_clients import C21RESTRequests, RESTError
from chem21repo.drupal import drupal_node_factory
from chem21repo.repo.models import UniqueFile, Question
from django.core.management.base import BaseCommand
import os
class Command(BaseCommand):
help = 'Download all drupal files'
def handle(self, *args, **options):
files = UniqueFile.objects.exclude(remote_id__isnull=True).exclude(
remote_id=0).filter(type__in=["video", "image"])
# print [(f.filename, f.questions.all()) for f in files]
"""
for q in Question.objects.all():
if q.video and not q.video.remote_id:
print "Storing remote ID for %s" % q.title
text = q.text
try:
q.drupal.pull()
except RESTError, e:
print "Unable to pull file"
print q.drupal.api.response
q.text = text
q.save()
"""
for f in UniqueFile.objects.exclude(remote_id__isnull=True).exclude(
remote_id=0).filter(type__in=["video", "image"]):
download = False
for path in f.local_paths:
if UniqueFile.storage.exists(path):
print "Exists: %s" % path
else:
download = True
if not download:
"File already downloaded"
continue
print f.filename
print f.questions.all()
try:
f.drupal.pull()
except RESTError, e:
print "Unable to pull file"
|
import time
def validarNumero():
objetivo = None
while objetivo == None:
objetivo_string = input('Ingresa un número que quieras saber su raíz cuadrada: ')
if(objetivo_string.isdigit()):
objetivo = int(objetivo_string)
return objetivo
else: print('ERROR: Ingresa un numero entero por favor:')
def raiz_cuadrada_normal():
objetivo = validarNumero()
tiempo_inicial = time.time()
resultado = 0
while resultado**2 < objetivo:
resultado += 1
if resultado**2 == objetivo:
break
if resultado**2 == objetivo:
print(f'raiz cuadrada de {objetivo} es {resultado}')
else:
print(f'No existe una raíz exacta de {objetivo}')
print(f'El programa demoró {time.time() - tiempo_inicial} segundos ')
def aproximacion():
objetivo = validarNumero()
epsilon = 0.01
paso = epsilon**2
respuesta = 0.0
tiempo_inicio = time.time()
while abs(respuesta**2 - objetivo) >= epsilon and respuesta <= objetivo:
print(abs(respuesta**2 - objetivo), respuesta)
respuesta += paso
tiempo_total = time.time() - tiempo_inicio
if abs(respuesta**2 - objetivo) >= epsilon:
print(f'No se encontro la raiz cuadrada del objetivo')
print(f'Tardo {tiempo_total} segundos')
else:
print(f'La raiz cuadrada de {objetivo} es {respuesta}')
print(f'Tardo {tiempo_total} segundos')
def busqueda_binaria():
objetivo = validarNumero()
inicio = time.time()
epsilon = 0.001
bajo = 0.0
alto = max(1.0, objetivo)
respuesta = (alto + bajo) / 2
num = 0
while abs(respuesta**2 - objetivo) >= epsilon:
print(f'bajo={bajo}, alto={alto}, respuesta={respuesta}')
if respuesta**2 < objetivo:
bajo = respuesta
else:
alto = respuesta
respuesta = (alto + bajo) / 2
num += 1
print(f'La raiz cuadrada de {objetivo} es {respuesta}')
fin = time.time()
print(f'Para resolver hizo {num} iteraciones y se demoro {fin - inicio} segundos\nInicio: {time.asctime(time.localtime(inicio))}\nFin: {time.asctime(time.localtime(fin))}')
def run():
punto_acceso = 0
while True:
try:
while True:
menu = """ Encuentra la raiz cuadrada 💰
[1] - Busqueda Normal
[2] - Busqueda Binaria
[3] - Busqueda Aproximada
[0] - Salir
Elije una opciones : """
opcion = int(input(menu))
#Validar Opcion
if opcion >= 0 and opcion <=3:
print('Opcion Correcta')
break
else:
print('* * * * * * E R R O R * * * * * *')
print('Por favor, Ingresa solo valores del Menu')
#Acciones del Menu
if opcion == 0:
punto_acceso = 1
print('Bye bye, vuelve pronto!')
elif opcion == 1:
raiz_cuadrada_normal()
elif opcion == 2:
busqueda_binaria()
else:
aproximacion()
except:
print('* * * * * * E R R O R * * * * * *')
if(punto_acceso == 1):
break
if __name__ == '__main__':
try:
run()
except:
run() |
def read_csv(path_to_csv_file, delimiter=","):
try:
with open(path_to_csv_file) as inp:
new_file = []
for line in inp:
line = line.strip()
new_line = [i for i in line.split(delimiter)]
if '"' in line:
word = ''
new_line_2 = []
for elem in new_line:
if elem[0] == '"' and elem[-1] == '"':
new_line_2.append(elem[1:(len(elem) - 1)])
elif elem[0] == '"':
word += elem[1:(len(elem))] + delimiter
elif elem[-1] == '"':
word += elem[0:(len(elem) - 1)]
new_line_2.append(word)
word = ''
elif len(word) != 0:
word += elem + delimiter
else:
new_line_2.append(elem)
new_line = new_line_2
new_file.append(new_line)
return new_file
except FileNotFoundError:
print("Error, such file doesn't exist")
return []
def write_csv(path_to_csv_file: str, data: list, delimiter=','):
if type(path_to_csv_file) != str or type(data) != list or type(delimiter) != str:
print("Arguments: path_to_csv_file (str, create/rewrite file), data (list), delimeter (str: 'symbol')")
else:
with open(path_to_csv_file, "w") as file:
for elem in data:
for i in range(len(elem) - 1):
if delimiter in elem[i]:
file.write('"' + elem[i] + '"')
else:
file.write(elem[i] + delimiter)
if delimiter in elem[-1]:
file.write('"' + elem[-1] + '"')
else:
file.write(elem[-1])
file.write('\n')
|
#!/usr/bin/env python
'''
$: line break
*: paragraph break
'''
import os, sys, string, re, json
grade_1 = [1401, 1603, 1903, 1505, 1804, 1807, 1502, 1703, 2001, '0000']
grade_2 = [350, 2203, 2401, 330, 2201, 2202, 310, 2102, 2302]
grade_3 = [420, 2403, 2701, 320, 450, 2503, 410, 2101, 2504]
grade_4 = [520, 2402, 2904, 2601, 2801, 3105, 2803, 2902, 3106]
item_to_grade = {}
for idx, grade in enumerate([grade_1, grade_2, grade_3, grade_4]):
for item in grade:
item_to_grade[str(item)] = idx + 1
with open('../data/moby-passages-36/passages-with-line-breaks.tsv') as f:
item_to_passage = {}
for line in f:
item_number, passage = line.strip().split('\t')
passage = passage[:passage.index('#')].replace('$$', ' * ').replace('$', ' $ ')
item_to_passage[item_number] = passage
with open('../data/moby-passages-36/from-susan/lts-20200703.json') as f:
LTS = json.load(f)
LTS['jane'] = {'sight_word': [], 'decodable': 1, 'grade_level_if_decodable': 1}
LTS['dad'] = {'sight_word': -1, 'decodable': 1, 'grade_level_if_decodable': 1}
LTS['hill'] = {'sight_word': 1, 'decodable': 1, 'grade_level_if_decodable': 1}
LTS['yet'] = {'sight_word': [], 'decodable': 1, 'grade_level_if_decodable': 1}
LTS['top'] = {'sight_word': 1, 'decodable': 1, 'grade_level_if_decodable': 1}
LTS['flowers'] = {'sight_word': 1, 'decodable': 1, 'grade_level_if_decodable': 1}
with open('../data/moby-passages-36/item_to_complete_passage_revised.json') as f:
item_to_complete_passage = json.load(f)
for item_number, passage in item_to_passage.items():
words = [x for x in passage.split(' ') if x]
folder = 'passage_' + item_number
if not os.path.exists(folder):
os.makedirs(folder)
f = open(os.path.join(folder, 'passage_' + item_number + '.tex'), 'w')
passage_title = item_to_complete_passage[item_number][0]
print('''\\documentclass{article}[12pt]
\\usepackage{geometry}
\\usepackage{xcolor}
\\usepackage{times}
\\usepackage{ulem}
\\definecolor{darkgreen}{rgb}{.1,.6,.1}
\\usepackage{helvet}
\\setlength\\parindent{0pt}
\\renewcommand{\\familydefault}{\\sfdefault}
\\renewcommand{\\baselinestretch}{1.7}
\\renewcommand{\\ULthickness}{1pt}
\\def\colorul#1#2{\\color{#1}\\uline{{\\color{black}#2}}\\color{black}}
\\begin{document}
''', file=f)
# print(item_to_grade)
print('\\textbf{' + passage_title +'. Grade ' + str(item_to_grade[item_number])+ '.}\n\n{\\colorul{darkgreen}{Not sight word}, \\colorul{red}{Not decodable}, \\colorul{blue}{Not decodable at grade level}\\\\\n\n', file=f)
for word in words:
# print(word)
if word == '$':
print('\\\\', file=f)
continue
if word == '*':
# print('', file=f)
print('\\\\', file=f)
# print('', file=f)
continue
w = word.lower().replace(',', '').replace('.', '').replace('"', '').replace('!', '').replace('?', '')
if w[0] == w[-1] == '\'':
w = w[1:-1]
if w.isdigit() or LTS[w]['sight_word'] == [-1]:
NotSightWord = NotDecodable = NotDecodableAtGradeLevel = False
else:
NotSightWord = not LTS[w]['sight_word']
NotDecodable = not LTS[w]['decodable']
NotDecodableAtGradeLevel = False
if not NotDecodable and LTS[w]['grade_level_if_decodable'] > item_to_grade[item_number]:
NotDecodableAtGradeLevel = True
output_word = word
if NotSightWord:
output_word = '\colorul{darkgreen}{' + output_word + '}'
if NotDecodable:
output_word = '\colorul{red}{' + output_word + '}'
if NotDecodableAtGradeLevel:
output_word = '\colorul{blue}{' + output_word + '}'
print(output_word, end='\,\,\,', file=f)
print('\n\\end{document}', file=f)
f.close()
# break
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.