text stringlengths 38 1.54M |
|---|
# [Bisect-Lower-Bound, Classic]
# https://leetcode.com/problems/find-k-th-smallest-pair-distance/
# 719. Find K-th Smallest Pair Distance
# https://www.youtube.com/watch?v=WHfljqX61Y8&t=1180s
# Given an integer array, return the k-th smallest distance among all the
# pairs. The distance of a pair (A, B) is defined as the absolute difference
# between A and B.
#
# Example 1:
# Input:
# nums = [1,3,1]
# k = 1
# Output: 0
# Explanation:
# Here are all the pairs:
# (1,3) -> 2
# (1,1) -> 0
# (3,1) -> 2
# Then the 1st smallest distance pair is (1,1), and its distance is 0.
#
# Note:
# 2 <= len(nums) <= 10000.
# 0 <= nums[i] < 1000000.
# 1 <= k <= len(nums) * (len(nums) - 1) / 2.
# Heap
import heapq
class Solution1(object):
def smallestDistancePair(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
nums.sort()
heap = [
(nums[i + 1] - nums[i], i, i + 1)
for i in range(len(nums) - 1)
]
heapq.heapify(heap)
for _ in range(k):
dist, base, neighbour = heapq.heappop(heap)
if neighbour + 1 < len(nums):
heapq.heappush(
heap,
(
nums[neighbour + 1] - nums[base],
base,
neighbour + 1
)
)
return dist
# Binary Search
class Solution2(object):
def _le_m_count(self, m, nums):
count = 0
j = 1
for i in range(len(nums) - 1):
while j < len(nums) and nums[j] - nums[i] <= m:
j += 1
count += j - i - 1
return count
def smallestDistancePair(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
nums.sort()
l = 0
r = nums[-1] - nums[0] + 1
while l < r:
m = l + (r - l) / 2
m_count = self._le_m_count(m, nums)
if m_count >= k:
r = m
else:
l = m + 1
return l
|
import json
import pyrebase
from helpers import find
import time
import re
class Etl:
def __init__(self, data_path=None, config_path=None):
if data_path == None:
self.data_path = ""
if config_path == None:
config_path = "../"
# with open(data_path+"data.json") as f:
# self.DATA = json.load(f)
with open(self.data_path+"sites.json") as f:
self.SITES = json.load(f)
with open(self.data_path+"images.json") as f:
self.IMAGES = json.load(f)
with open(config_path+"config.json") as f:
config = json.load(f)
self.firebase = pyrebase.initialize_app(config['firebase'])
self.db = self.firebase.database()
def get_coins(self):
with open(self.data_path+"data.json") as f:
self.DATA = json.load(f)
tags = list(self.DATA.keys())
coins = []
for tag in tags:
coins = coins+list(self.DATA[tag].keys())
return list(set(coins))
def create_coins_dict(self, coins_list):
tokens = ["BTC", "BNB", "ETH", "BUSD", "ZEC", "ALPACA"]
tokens = self.IMAGES.keys()
reg_str = "^(\w+)({})$".format("|".join([t for t in tokens]))
with open(self.data_path+"data.json") as f:
self.DATA = json.load(f)
coins_dict = {}
for coin in coins_list:
tags = list(self.DATA.keys())
S = []
for tag in tags:
_tag_coins = self.DATA[tag].keys()
if coin in _tag_coins:
_data = self.DATA[tag][coin]
keys = _data.keys()
d = {"site": _data["name"],
"url": _data["url"],
"token_earned": _data["earn"],
"tag": _data["tag"]
}
if "apy" in list(_data.keys()):
d["apy"] = _data["apy"]
elif "apr" in list(_data.keys()):
d["apr"] = _data["apr"]
S.append(d)
dtop = {"info": S}
img_coin = coin
if re.search(reg_str, coin):
img_coin = find(coin, tokens)
try:
img_uri = self.IMAGES[img_coin]
if img_uri == None:
img_uri = self.IMAGES["BNB"]
except KeyError:
img_uri = self.IMAGES["BNB"]
dtop["image_uri"] = img_uri
coins_dict[coin] = dtop
return coins_dict
def update_coins_db(self):
coins_list = self.get_coins()
coins_dict = self.create_coins_dict(coins_list)
self.db.update({"coins": coins_dict})
def update_historic_db(self):
with open(self.data_path+"data.json") as f:
self.DATA = json.load(f)
farm_tags = self.DATA.keys()
for tag in farm_tags:
_tag_coins = self.DATA[tag].keys()
for coin in _tag_coins:
_coin_keys = self.DATA[tag][coin].keys()
out = self.db.child("historic").child(
tag).child(coin).get().val()
if out != None:
out = dict(out)
vals = out["values"]
t = out["t"]
t.append(time.time())
if "apr" in _coin_keys:
vals.append(
self.DATA[tag][coin]["apr"])
elif "apy" in _coin_keys:
vals.append(
self.DATA[tag][coin]["apy"])
self.db.child("historic").child(
tag).child(coin).update({"t": t})
self.db.child("historic").child(tag).child(
coin).update({"values": vals})
elif out == None:
if "apr" in _coin_keys:
val = self.DATA[tag][coin]["apr"]
elif "apy" in _coin_keys:
val = self.DATA[tag][coin]["apy"]
if type(val) == float:
newd = {"t": [time.time()], "values": [val]}
self.db.child("historic").child(
tag).child(coin).set(newd)
if __name__ == "__main__":
e = Etl()
e.update_coins_db()
|
import sys
if(len(sys.argv) < 2) or (len(sys.argv) > 2):
print('Incorrect argument count')
fp = open('counters.txt','r')
raw = fp.readlines()
fp.close()
counters = []
for line in raw:
counters.append(int(line.strip()))
currentCounter = counters[0]
usedCounter = sys.argv[1]
fp = open('counters.txt','w')
fp.write(str(currentCounter) + '\n' + str(usedCounter))
fp.close
|
# This code has to be added to __init__.py in folder .../devices/sensor
class Power():
def __family__(self):
return "Power"
def __getWatt__(self):
raise NotImplementedError
@api("Power", 0)
@request("GET", "sensor/power/*")
@response(contentType=M_JSON)
def powerWildcard(self):
values = {}
power = self.__getWatt__()
values["kW"] = "%.03f" % (power * 1000)
values["W"] = "%.03f" % power
values["mW"] = "%.03f" % (power / 1000)
return values
@api("Power")
@request("GET", "sensor/power/kW")
@response("%.03f")
def getKilowatt(self):
return self.__getWatt__() / 1000
@api("Power")
@request("GET", "sensor/power/W")
@response("%.03f")
def getWatt(self):
return self.__getWatt__()
@api("Power")
@request("GET", "sensor/power/mW")
@response("%.03f")
def getMilliwatt(self):
return self.__getWatt__() * 1000
|
from lesson_package import utils
def sing():
return 'fdklgoirhkshj'
def cry():
return utils.say_twice('fkoguoujnsbwrg') |
import numpy as np
from tfsnippet.dataflows import DataMapper
from tfsnippet.utils import generate_random_seed
__all__ = ['BaseSampler', 'BernoulliSampler', 'UniformNoiseSampler']
class BaseSampler(DataMapper):
"""Base class for samplers."""
def sample(self, x):
"""
Sample array according to `x`.
Args:
x (np.ndarray): The input `x` array.
Returns:
np.ndarray: The sampled array.
"""
raise NotImplementedError()
def _transform(self, x):
return self.sample(x),
class BernoulliSampler(BaseSampler):
"""
A :class:`DataMapper` which can sample 0/1 integers according to the
input probability. The input is assumed to be float numbers range within
[0, 1) or [0, 1].
"""
def __init__(self, dtype=np.int32, random_state=None):
"""
Construct a new :class:`BernoulliSampler`.
Args:
dtype: The data type of the sampled array. Default `np.int32`.
random_state (RandomState): Optional numpy RandomState for sampling.
(default :obj:`None`, construct a new :class:`RandomState`).
"""
self._dtype = dtype
self._random_state = \
random_state or np.random.RandomState(generate_random_seed())
@property
def dtype(self):
"""Get the data type of the sampled array."""
return self._dtype
def sample(self, x):
rng = self._random_state or np.random
sampled = np.asarray(
rng.uniform(0., 1., size=x.shape) < x, dtype=self._dtype)
return sampled
class UniformNoiseSampler(BaseSampler):
"""
A :class:`DataMapper` which can add uniform noise onto the input array.
The data type of the returned array will be the same as the input array,
unless `dtype` is specified at construction.
"""
def __init__(self, minval=0., maxval=1., dtype=None, random_state=None):
"""
Construct a new :class:`UniformNoiseSampler`.
Args:
minval: The lower bound of the uniform noise (included).
maxval: The upper bound of the uniform noise (excluded).
dtype: The data type of the sampled array. Default `np.int32`.
random_state (RandomState): Optional numpy RandomState for sampling.
(default :obj:`None`, construct a new :class:`RandomState`).
"""
self._minval = minval
self._maxval = maxval
self._dtype = np.dtype(dtype) if dtype is not None else None
self._random_state = \
random_state or np.random.RandomState(generate_random_seed())
@property
def minval(self):
"""Get the lower bound of the uniform noise (included)."""
return self._minval
@property
def maxval(self):
"""Get the upper bound of the uniform noise (excluded)."""
return self._maxval
@property
def dtype(self):
"""Get the data type of the sampled array."""
return self._dtype
def sample(self, x):
rng = self._random_state or np.random
dtype = self._dtype or x.dtype
noise = rng.uniform(self._minval, self._maxval, size=x.shape)
return np.asarray(x + noise, dtype=dtype)
|
#coding=utf-8
#1.导入selenium库
from selenium import webdriver
#2.设置启动所需浏览器
br=webdriver.Chrome()
#3.打开目标网页
br.get("https://www.baidu.com")
#通过id进行定位
# br.find_element_by_id("kw").send_keys("55开")
#通过name定位
# br.find_element_by_name("wd").send_keys("美国大选")
#通过class定位
# br.find_element_by_class_name("s_ipt").send_keys("塔朗普")
#通过tag标签进行定位
# br.find_element_by_tag_name("input").send_keys("拜登")
#通过link定位
# br.find_element_by_link_text("hao123").click()
#通过partial_link
# br.find_element_by_partial_link_text("闻").click()
#通过xpath定位
# br.find_element_by_xpath("//*[@id='kw']").send_keys("巧碧螺")
#通过css定位
br.find_element_by_css_selector("#kw").send_keys("张大仙")
|
from app import app, db
import pandas as pd
import sqlalchemy as sa
from uszipcode import ZipcodeSearchEngine
import numpy as np
import datetime
from sklearn import preprocessing
import xml.etree.ElementTree as ET
#to check if the db is empty
def is_db_empty():
con = sa.create_engine(app.config['SQLALCHEMY_DATABASE_URI'])
table_names = sa.inspect(con).get_table_names()
is_empty = table_names == []
return is_empty
#code to initialise the db first time to run the app
def initialise_database():
#reads the csv files to import the data
zipcode_data = pd.read_csv('datasets/app_zipcode_data.csv').drop('Unnamed: 0', axis = 1)
crime_data = pd.read_csv('datasets/app_crime_data.csv').drop('Unnamed: 0', axis = 1)
weather_data = pd.read_csv('datasets/app_weather_data.csv').drop('Unnamed: 0', axis = 1)
zipcodes_boundaries = pd.read_csv('datasets/zipcodes_boundaries.csv')
#used to import the dataframe data into database
con = sa.create_engine(app.config['SQLALCHEMY_DATABASE_URI'])
#add for each of the zipcodes in the table the boundary from the dataframe
zipcode_data = (pd.merge(zipcode_data, zipcodes_boundaries, left_on=['zipcode'], right_on=['ZIP'])).drop(['ZIP'], axis = 1)
zipcode_data['geometry'] = zipcode_data['geometry'].apply(getCoordinates)
zipcode_data['geometry'] = zipcode_data['geometry'].astype(str)
#populating the tables from the dataframes
zipcode_data.to_sql(name='zipcode', if_exists='replace', index=False, con=con)
crime_data.to_sql(name='crime_data', if_exists='replace', index=False, con=con)
weather_data.to_sql(name='weather', if_exists='replace', index=False, con=con)
#gets coordinates of the zipcode boundary from KML text
def getCoordinates(s):
root = ET.fromstring(s)
if(root.tag == 'Polygon'):
coordString = root[0][0][0].text
else:
coordString = root[0][0][0][0].text
coordSets = coordString.split(' ')
longs = []
lats = []
res = []
for c in coordSets:
coords = c.split(',')
longs.append(coords[0])
lats.append(coords[1])
for la, lo in zip(lats, longs):
coord = {"lat": float(la), "lng": float(lo)}
res.append(coord)
return res
#adding crime data to the database
def add_crime_data_to_DB(crimes_raw):
crimes_cut = import_crime_data(crimes_raw)
vcrime = crimes_cut.loc[
crimes_cut['crime_code'].isin(['210', '220', '230', '231', '623', '624', '110', '120', '121'])]
burglary = crimes_cut.loc[crimes_cut['crime_code'].isin(['310', '320'])]
# used to import the dataframe data into database
con = sa.create_engine(app.config['SQLALCHEMY_DATABASE_URI'])
data = import_crime_type(vcrime, 1)
data.to_sql(name='crime_data', if_exists='replace', index=False, con=con)
data = import_crime_type(burglary, 2)
data.to_sql(name='crime_data', if_exists='append', index=False, con=con)
#preprocess the imported data
def import_crime_data(crimes_raw):
# selecting only the columns we will be interested in
crimes_cut = crimes_raw[['Date Occurred', 'Time Occurred', 'Crime Code', 'Location ']]
# renaming the columns
crimes_cut.rename(
columns={'Date Occurred': 'date', 'Time Occurred': 'time', 'Crime Code': 'crime_code', 'Location ': 'location'},
inplace=True)
# getting the data from the last two years
dates_filter = (crimes_cut['date'] > '2015-12-31') & (crimes_cut['date'] < '2018-06-01')
crimes_cut = crimes_cut[dates_filter]
#getting the data for certain crime codes (in this case violent crime and burglaries)
crimes_cut = crimes_cut.loc[
crimes_cut['crime_code'].isin(['210', '220', '230', '231', '623', '624', '110', '120', '121', '310', '320'])];
#changing values of time feature
crimes_cut['time'] = (crimes_cut['time']/100).astype(int)
crimes_cut.is_copy = False
# creating categories for the hours (by 8 hour groups)
crimes_cut.loc[(crimes_cut['time'] >= 0) & (crimes_cut['time'] < 8), 'time'] = 0
crimes_cut.loc[(crimes_cut['time'] >= 8) & (crimes_cut['time'] < 16), 'time'] = 1
crimes_cut.loc[(crimes_cut['time'] >= 16) & (crimes_cut['time'] < 24), 'time'] = 2
# creating latitude and longitude columns
crimes_cut[['latitude', 'longitude']] = crimes_cut['location'].str.split(',\s+', expand=True)
crimes_cut['latitude'] = crimes_cut['latitude'].str.replace("(", '').astype(float)
crimes_cut['longitude'] = crimes_cut['longitude'].str.replace(")", '').astype(float)
crimes_cut = crimes_cut.drop(['location'], axis=1)
# get the zipcodes based on coordinates
search = ZipcodeSearchEngine()
# deleting the records that have null values or 0 in the relevant columns
crimes_cut = crimes_cut.dropna(subset=['date', 'time', 'crime_code', 'latitude', 'longitude'])
crimes_cut = crimes_cut[(crimes_cut['latitude'] != 0) & (crimes_cut['longitude'] != 0)]
codes = [(search.by_coordinate(lat, lng, returns = 1))[0].Zipcode for lat, lng in zip(crimes_cut['latitude'], crimes_cut['longitude'])]
crimes_cut['zipcode'] = codes
return crimes_cut
#create a dataset that contains all the combinations of zipcode, date and time
def df_crossjoin(df1, df2, **kwargs):
df1['_tmpkey'] = 1
df2['_tmpkey'] = 1
res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)
#res.index = pd.MultiIndex.from_product((df1.index, df2.index))
df1.drop('_tmpkey', axis=1, inplace=True)
df2.drop('_tmpkey', axis=1, inplace=True)
return res
# add to the database the crime data by crime type
def import_crime_type(df, type):
# counting the crimes by zipcode, date and time range
counts = pd.DataFrame({'count': df.groupby(['zipcode', 'date', 'time']).size()}).reset_index()
dfzipcode = pd.DataFrame({'zipcode' : df['zipcode'].unique()}).sort_values(by = 'zipcode')
dfdate = pd.DataFrame({'date' : df['date'].unique()}).sort_values(by = 'date')
dftime = pd.DataFrame({'time' : df['time'].unique()}).sort_values(by = 'time')
dfx = df_crossjoin(dfzipcode, dfdate, suffixes=('_orig', '_dest'))
aux = df_crossjoin(dfx, dftime, suffixes=('_orig', '_dest'))
# create a dataset that contains all the combinations of zipcode, date and time
data = pd.merge(aux, counts, on=['zipcode', 'date', 'time'], how='outer')
data['count'].fillna(0, inplace=True)
#adding a column with year and month as it is needed for countings
data['year_month'] = data['date'].map(lambda x: 100 * x.year + x.month)
data.rename(columns={'count': 'vcrime'}, inplace=True)
temp = count_by_loc(data)
temp_time = count_by_loc_time(data)
temp_t = count_by_time(data)
# merging into the train dataset to contain the number of robberies happened the previous month, and the previous 6 months
data = pd.merge(data, temp, on=['zipcode', 'year_month'])
# merging into the train dataset to contain the number of robberies happened at different times the previous 6 months
data = pd.merge(data, temp_time, on=['zipcode', 'year_month', 'time'])
# merging into the train dataset to contain the number of robberies happened at different times the previous 6 months
data = pd.merge(data, temp_t, on=['year_month', 'time'])
zipcodes = pd.read_sql_table("zipcode", con=app.config['SQLALCHEMY_DATABASE_URI'])
zipcodes = zipcodes[["zipcode", "population"]]
#changing the type from object to int and then merge with data
data['zipcode'] = data['zipcode'].astype('int')
zipcodes['zipcode'] = zipcodes['zipcode'].astype('int')
data = pd.merge(data, zipcodes, on='zipcode')
#for normalising the data values
scaler = preprocessing.MinMaxScaler()
scaled_data = scaler.fit_transform(data.loc[:, ['count_1m_loc', 'count_6m_loc', 'count_2y_loc',
'count_1m_loc_time', 'count_6m_loc_time', 'count_2y_loc_time',
'count_1m_time', 'count_6m_time', 'count_2y_time',
]])
scaled_data = pd.DataFrame(scaled_data, columns=['count_1m_loc', 'count_6m_loc', 'count_2y_loc',
'count_1m_loc_time', 'count_6m_loc_time', 'count_2y_loc_time',
'count_1m_time', 'count_6m_time', 'count_2y_time',
])
data = data.reset_index().drop('index', axis=1)
data.loc[:,
['count_1m_loc', 'count_6m_loc', 'count_2y_loc', 'count_1m_loc_time', 'count_6m_loc_time', 'count_2y_loc_time',
'count_1m_time', 'count_6m_time', 'count_2y_time']] = scaled_data.loc[:,
['count_1m_loc', 'count_6m_loc', 'count_2y_loc',
'count_1m_loc_time', 'count_6m_loc_time',
'count_2y_loc_time',
'count_1m_time', 'count_6m_time', 'count_2y_time',
]]
#to only extract this months details
now = datetime.datetime.now()
date = 100 * now.year + now.month
data['year_month'] = data['year_month'].astype(int)
data = data[data['year_month'] == date]
#creating an additional column
data.loc[:,'crimes_per_pop'] = data['count_2y_loc']/data['population']
data = data.replace(np.inf, 0)
#dropping the unnecessary columns and the resulting duplicates
data = data.drop(['date', 'vcrime', 'population'], axis=1)
data = data.drop_duplicates()
data.loc[:,'crime_type'] = int(type)
scaled_data = scaler.fit_transform(data.loc[:, ['crimes_per_pop']])
scaled_data = pd.DataFrame(scaled_data, columns=['crimes_per_pop'])
data.loc[:, ['crimes_per_pop']] = scaled_data.loc[:, ['crimes_per_pop']]
data = data.replace(np.inf, 0)
data = data.fillna(0)
return data
# counting the robberies by zipcode that happened prev month, 6 months and 2 years
def count_by_loc(data):
temp = data.groupby(['zipcode', 'year_month']).agg({'vcrime': 'sum'}).reset_index()
temp['order_within_group'] = temp.groupby('zipcode').cumcount()
temp['count_1m_loc'] = (temp.groupby('zipcode')['vcrime']
.apply(lambda x: pd.rolling_sum(x, window=1, min_periods=0)
.shift()
.fillna(0)))
# counting the v crimes in the previous 6 months for each zipcode
temp['count_6m_loc'] = (temp.groupby('zipcode')['vcrime']
.apply(lambda x: pd.rolling_sum(x, window=6, min_periods=0)
.shift()
.fillna(0)))
temp['count_2y_loc'] = (temp.groupby('zipcode')['vcrime']
.apply(lambda x: pd.rolling_sum(x, window=24, min_periods=0)
.shift()
.fillna(0)))
temp = temp.drop(['vcrime', 'order_within_group'], axis=1)
return temp
# counting the robberies by zipcode, time and month in prev month, 6 months and 2 years
def count_by_loc_time(data):
temp_time = data.groupby(['zipcode', 'time', 'year_month']).agg({'vcrime': 'sum'}).reset_index()
temp_time['order_within_group'] = temp_time.groupby('zipcode').cumcount()
temp_time['count_1m_loc_time'] = (temp_time.groupby(['zipcode', 'time'])['vcrime']
.apply(lambda x: pd.rolling_sum(x, window=1, min_periods=0)
.shift()
.fillna(0)))
# counting the robberies in the previous 6 months for each zipcode and time range
temp_time['count_6m_loc_time'] = (temp_time.groupby(['zipcode', 'time'])['vcrime']
.apply(lambda x: pd.rolling_sum(x, window=6, min_periods=0)
.shift()
.fillna(0)))
temp_time['count_2y_loc_time'] = (temp_time.groupby(['zipcode', 'time'])['vcrime']
.apply(lambda x: pd.rolling_sum(x, window=24, min_periods=0)
.shift()
.fillna(0)))
# droping columns
temp_time = temp_time.drop(['vcrime', 'order_within_group'], axis=1)
return temp_time
# counting the robberies by time in prev month, 6 months and 2 years
def count_by_time(data):
# counting the robberies by zipcode, time and month (used in computing the crimes happened the previous 6 months)
temp_t = data.groupby(['time', 'year_month']).agg({'vcrime': 'sum'}).reset_index()
# temp_t['order_within_group'] = temp_t.groupby('zipcode').cumcount()
temp_t['count_1m_time'] = (temp_t.groupby(['time'])['vcrime']
.apply(lambda x: pd.rolling_sum(x, window=1, min_periods=0)
.shift()
.fillna(0)))
# counting the robberies in the previous 6 months for each zipcode and time range
temp_t['count_6m_time'] = (temp_t.groupby(['time'])['vcrime']
.apply(lambda x: pd.rolling_sum(x, window=6, min_periods=0)
.shift()
.fillna(0)))
temp_t['count_2y_time'] = (temp_t.groupby(['time'])['vcrime']
.apply(lambda x: pd.rolling_sum(x, window=24, min_periods=0)
.shift()
.fillna(0)))
# droping columns
temp_t = temp_t.drop(['vcrime'], axis=1)
return temp_t |
import torch.nn as nn
import torch.nn.functional as F
class NetworkNvidia(nn.Module):
"""NVIDIA model used in the paper."""
def __init__(self):
"""Initialize NVIDIA model.
NVIDIA model used
Image normalization to avoid saturation and make gradients work better.
Convolution: 5x5, filter: 24, strides: 2x2, activation: ELU
Convolution: 5x5, filter: 36, strides: 2x2, activation: ELU
Convolution: 5x5, filter: 48, strides: 2x2, activation: ELU
Convolution: 3x3, filter: 64, strides: 1x1, activation: ELU
Convolution: 3x3, filter: 64, strides: 1x1, activation: ELU
Drop out (0.5)
Fully connected: neurons: 100, activation: ELU
Fully connected: neurons: 50, activation: ELU
Fully connected: neurons: 10, activation: ELU
Fully connected: neurons: 1 (output)
the convolution layers are meant to handle feature engineering.
the fully connected layer for predicting the steering angle.
the elu activation function is for taking care of vanishing gradient problem.
"""
super(NetworkNvidia, self).__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(3, 24, 5, stride=2),
nn.ELU(),
nn.Conv2d(24, 36, 5, stride=2),
nn.ELU(),
nn.Conv2d(36, 48, 5, stride=2),
nn.ELU(),
nn.Conv2d(48, 64, 3),
nn.ELU(),
nn.Conv2d(64, 64, 3),
nn.Dropout(0.5)
)
self.linear_layers = nn.Sequential(
nn.Linear(in_features=64 * 2 * 33, out_features=100),
nn.ELU(),
nn.Linear(in_features=100, out_features=50),
nn.ELU(),
nn.Linear(in_features=50, out_features=10),
nn.Linear(in_features=10, out_features=1)
)
def forward(self, input):
"""Forward pass."""
input = input.view(input.size(0), 3, 70, 320)
output = self.conv_layers(input)
# print(output.shape)
output = output.view(output.size(0), -1)
output = self.linear_layers(output)
return output
class LeNet(nn.Module):
"""LeNet architecture."""
def __init__(self):
"""Initialization."""
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
"""Forward pass."""
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
|
"""Utilities for stitching south east asia domains
Example Use:
stitch_and_save(year=2017, month=1,
input_pattern="wrfout_d0{domain}_{year}-{month}*",
out_pattern="{year}/{month}/", overlap=15, n_domains=4,
max_levels=10)
This will combine 4 domains, for Jan 2017, using an overlap of 15 grid
points the blend the domain edges, and save only the first 10 pressure
levels. The stitched files will be saved in the directory specified with
out_pattern.
"""
# -*- coding: utf-8 -*-
import xarray as xr
import numpy as np
import logging
from importlib import import_module
import glob
import os
logger = logging.getLogger(__name__)
SAVE_FEATURES = ['U', 'V', 'PHB', 'PH', 'HGT', 'P', 'PB', 'T', 'Times']
class Regridder:
"""Regridder class for stitching domains"""
DEPENDENCIES = ['xesmf']
def __init__(self, lats, lons, min_lat, max_lat, min_lon, max_lon,
n_lats, n_lons):
"""
Parameters
----------
lats : ndarray
Array of latitudes for input grid
lons : ndarray
Array of longitudes for input grid
min_lat : float
Minimum lat for output grid
max_lat : float
Maximum lat for output grid
min_lon : float
Minimum lon for output grid
max_lon : float
Maximum lon for output grid
n_lats : int
Number of lats for output grid
n_lons : int
Number of lons for output grid
"""
self.check_dependencies()
import xesmf as xe
self.grid_in = {'lat': lats, 'lon': lons}
lons, lats = np.meshgrid(np.linspace(min_lon, max_lon, n_lons),
np.linspace(min_lat, max_lat, n_lats))
self.grid_out = {'lat': lats, 'lon': lons}
self.new_lat_lon = np.zeros((*lats.shape, 2))
self.new_lat_lon[..., 0] = lats
self.new_lat_lon[..., 1] = lons
self.regridder = xe.Regridder(self.grid_in, self.grid_out,
method='bilinear')
@classmethod
def check_dependencies(cls):
"""Check special dependencies for stitching module"""
missing = []
for name in cls.DEPENDENCIES:
try:
import_module(name)
except ModuleNotFoundError:
missing.append(name)
if any(missing):
msg = ('The sup3r stitching module depends on the following '
'special dependencies that were not found in the active '
'environment: {}'.format(missing))
logger.error(msg)
raise ModuleNotFoundError(msg)
def regrid_data(self, data_in):
"""Regrid data to output grid
Parameters
----------
data_in : xarray.Dataset
input data handle
Returns
-------
data_out : xarray.Dataset
output data handle
"""
times = data_in.Times.values
data_out = self.regridder(data_in)
data_out = data_out.rename({'lat': 'XLAT', 'lon': 'XLONG'})
data_out = data_out.rename({'x': 'west_east', 'y': 'south_north'})
data_out['Times'] = ('Time', times)
data_out['XLAT'] = (('Time', 'south_north', 'west_east'),
np.repeat(np.expand_dims(data_out['XLAT'].values,
axis=0),
len(times), axis=0))
data_out['XLONG'] = (('Time', 'south_north', 'west_east'),
np.repeat(np.expand_dims(data_out['XLONG'].values,
axis=0),
len(times), axis=0))
return data_out
def get_files(year, month, input_pattern, out_pattern, n_domains=4):
"""Get input files for all domains to stitch together, and output file
name
Parameters
----------
year : int
Year for input files
month : int
Month for input files
input_pattern : str
Pattern for input files. Assumes pattern contains {month}, {year}, and
{domain}
out_pattern : str
Pattern for output files. Assumes pattern contains {month} and {year}
n_domains : int
Number of domains to stitch together
Returns
-------
input_files : dict
Dictionary of input files with keys corresponding to domain number
out_files : list
List of output file names for final stitched output
"""
in_pattern = [input_pattern.format(year=year, month=str(month).zfill(2),
domain=i)
for i in range(1, n_domains + 1)]
input_files = {i: sorted(glob.glob(in_pattern[i]))
for i in range(n_domains)}
out_pattern = out_pattern.format(year=year, month=str(month).zfill(2))
out_files = [os.path.join(out_pattern,
os.path.basename(input_files[0][i]).replace(
'custom_wrfout_d01', 'stitched_wrfout'))
for i in range(len(input_files[0]))]
return input_files, out_files
def get_handles(input_files):
"""Get handles for all domains. Keep needed fields
Parameters
----------
input_files : list
List of input files for each domain. First file needs to be the file
for the largest domain.
Returns
-------
handles : list
List of xarray.Dataset objects for each domain
"""
handles = []
for f in input_files:
logger.info(f'Getting handle for {f}')
handle = xr.open_dataset(f)
handle = handle[SAVE_FEATURES]
handles.append(handle)
return handles
def unstagger_vars(handles):
"""Unstagger variables for all handles
Parameters
----------
handles : list
List of xarray.Dataset objects for each domain
Returns
-------
handles : list
List of xarray.Dataset objects for each domain, with unstaggered
variables.
"""
dims = ('Time', 'bottom_top', 'south_north', 'west_east')
for i, handle in enumerate(handles):
handles[i]['U'] = (dims, np.apply_along_axis(forward_avg, 3,
handle['U']))
handles[i]['V'] = (dims, np.apply_along_axis(forward_avg, 2,
handle['V']))
handles[i]['PHB'] = (dims, np.apply_along_axis(forward_avg, 1,
handle['PHB']))
handles[i]['PH'] = (dims, np.apply_along_axis(forward_avg, 1,
handle['PH']))
return handles
def prune_levels(handles, max_level=15):
"""Prune pressure levels to reduce memory footprint
Parameters
----------
handles : list
List of xarray.Dataset objects for each domain
max_level : int
Max pressure level index
Returns
-------
handles : list
List of xarray.Dataset objects for each domain, with pruned pressure
levels.
"""
for i, handle in enumerate(handles):
handles[i] = handle.loc[dict(bottom_top=slice(0, max_level))]
return handles
def regrid_main_domain(handles):
"""Regrid largest domain
Parameters
----------
handles : list
List of xarray.Dataset objects for each domain
Returns
-------
handles : list
List of xarray.Dataset objects for each domain, with unstaggered
variables and pruned pressure levels.
"""
min_lat = np.min(handles[0].XLAT)
min_lon = np.min(handles[0].XLONG)
max_lat = np.max(handles[0].XLAT)
max_lon = np.max(handles[0].XLONG)
n_lons = handles[0].XLAT.shape[-1]
n_lats = handles[0].XLAT.shape[1]
main_regridder = Regridder(handles[0].XLAT[0], handles[0].XLONG[0],
min_lat, max_lat, min_lon, max_lon,
3 * n_lats, 3 * n_lons)
handles[0] = main_regridder.regrid_data(handles[0])
return handles
def forward_avg(array_in):
"""Forward average for use in unstaggering"""
return (array_in[:-1] + array_in[1:]) * 0.5
def blend_domains(arr1, arr2, overlap=50):
"""Blend smaller domain edges
Parameters
----------
arr1 : ndarray
Data array for largest domain
arr2 : ndarray
Data array for nested domain to stitch into larger domain
overlap : int
Number of grid points to use for blending edges
Returns
-------
out : ndarray
Data array with smaller domain blended into larger domain
"""
out = arr2.copy()
for i in range(overlap):
alpha = i / overlap
beta = 1 - alpha
out[..., i, :] = out[..., i, :] * alpha + arr1[..., i, :] * beta
out[..., -i, :] = out[..., -i, :] * alpha + arr1[..., -i, :] * beta
out[..., :, i] = out[..., :, i] * alpha + arr1[..., :, i] * beta
out[..., :, -i] = out[..., :, -i] * alpha + arr1[..., :, -i] * beta
return out
def get_domain_region(handles, domain_num):
"""Get range for smaller domain
Parameters
----------
handles : list
List of xarray.Dataset objects for each domain
domain_num : int
Domain number to get grid range for
Returns
-------
lat_range : slice
Slice corresponding to lat range of smaller domain within larger domain
lon_range : slice
Slice corresponding to lon range of smaller domain within larger domain
min_lat : float
Minimum lat for smaller domain
max_lat : float
Maximum lat for smaller domain
min_lon : float
Minimum lon for smaller domain
max_lon : float
Maximum lon for smaller domain
n_lats : int
Number of lats for smaller domain
n_lons : int
Number of lons for smaller domain
"""
lats = handles[0].XLAT[0, :, 0]
lons = handles[0].XLONG[0, 0, :]
min_lat = np.min(handles[domain_num].XLAT.values)
min_lon = np.min(handles[domain_num].XLONG.values)
max_lat = np.max(handles[domain_num].XLAT.values)
max_lon = np.max(handles[domain_num].XLONG.values)
lat_mask = (min_lat <= lats) & (lats <= max_lat)
lon_mask = (min_lon <= lons) & (lons <= max_lon)
lat_idx = np.arange(len(lats))
lon_idx = np.arange(len(lons))
lat_range = slice(lat_idx[lat_mask][0], lat_idx[lat_mask][-1] + 1)
lon_range = slice(lon_idx[lon_mask][0], lon_idx[lon_mask][-1] + 1)
n_lats = len(lat_idx[lat_mask])
n_lons = len(lon_idx[lon_mask])
return (lat_range, lon_range, min_lat, max_lat, min_lon, max_lon,
n_lats, n_lons)
def impute_domain(handles, domain_num, overlap=50):
"""Impute smaller domain in largest domain
Parameters
----------
handles : list
List of xarray.Dataset objects for each domain
domain_num : int
Domain number to stitch into largest domain
overlap : int
Number of grid points to use for blending edges
Returns
-------
handles : list
List of xarray.Dataset objects for each domain
"""
out = get_domain_region(handles, domain_num)
(lat_range, lon_range, min_lat, max_lat, min_lon,
max_lon, n_lats, n_lons) = out
regridder = Regridder(handles[domain_num].XLAT[0],
handles[domain_num].XLONG[0],
min_lat, max_lat, min_lon, max_lon, n_lats, n_lons)
handles[domain_num] = regridder.regrid_data(handles[domain_num])
for field in handles[0]:
if field not in ['Times']:
arr1 = handles[0][field].loc[dict(south_north=lat_range,
west_east=lon_range)]
arr2 = handles[domain_num][field]
out = blend_domains(arr1, arr2, overlap=overlap)
handles[0][field].loc[dict(south_north=lat_range,
west_east=lon_range)] = out
return handles
def stitch_domains(year, month, time_step, input_files, overlap=50,
n_domains=4, max_level=15):
"""Stitch all smaller domains into largest domain
Parameters
----------
year : int
Year for input files
month : int
Month for input files
time_step : int
Time step for input files for the specified month. e.g. if year=2017,
month=3, time_step=0 this will select the file for the first time step
of 2017-03-01. If None then stitch and save will be done for full
month.
input_files : dict
Dictionary of input files with keys corresponding to domain number
overlap : int
Number of grid points to use for blending edges
n_domains : int
Number of domains to stitch together
max_level : int
Max pressure level index
Returns
-------
handles : list
List of xarray.Dataset objects with smaller domains stitched into
handles[0]
"""
logger.info(f'Getting domain files for year={year}, month={month},'
f' timestep={time_step}.')
step_files = [input_files[d][time_step] for d in range(n_domains)]
logger.info(f'Getting data handles for files: {step_files}')
handles = get_handles(step_files)
logger.info('Unstaggering variables for all handles')
handles = unstagger_vars(handles)
logger.info(f'Pruning pressure levels to level={max_level}')
handles = prune_levels(handles, max_level=max_level)
logger.info(f'Regridding main domain for year={year}, month={month}, '
f'timestep={time_step}')
handles = regrid_main_domain(handles)
for j in range(1, n_domains):
logger.info(f'Imputing domain {j + 1} for year={year}, '
f'month={month}, timestep={time_step}')
handles = impute_domain(handles, j, overlap=overlap)
return handles
def stitch_and_save(year, month, input_pattern, out_pattern,
time_step=None, overlap=50, n_domains=4, max_level=15,
overwrite=False):
"""Stitch all smaller domains into largest domain and save output
Parameters
----------
year : int
Year for input files
month : int
Month for input files
time_step : int
Time step for input files for the specified month. e.g. if year=2017,
month=3, time_step=0 this will select the file for the first time step
of 2017-03-01. If None then stitch and save will be done for full
month.
input_pattern : str
Pattern for input files. Assumes pattern contains {month}, {year}, and
{domain}
out_pattern : str
Pattern for output files
overlap : int
Number of grid points to use for blending edges
n_domains : int
Number of domains to stitch together
max_level : int
Max pressure level index
overwrite : bool
Whether to overwrite existing files
"""
logger.info(f'Getting file patterns for year={year}, month={month}')
input_files, out_files = get_files(year, month, input_pattern,
out_pattern, n_domains=n_domains)
out_files = (out_files if time_step is None
else out_files[time_step - 1: time_step])
for i, out_file in enumerate(out_files):
if not os.path.exists(out_file) or overwrite:
handles = stitch_domains(year, month, i, input_files,
overlap=overlap, n_domains=n_domains,
max_level=max_level)
basedir = os.path.dirname(out_file)
os.makedirs(basedir, exist_ok=True)
handles[0].to_netcdf(out_file)
logger.info(f'Saved stitched file to {out_file}')
|
calls = 0
def tracer(func):
def wrapper(*args, **kwargs):
global calls
calls += 1
print(f"call {calls} to {func.__name__}")
return func(*args, **kwargs)
# return wrapper()
return wrapper
@tracer
def spam(a, b, c): # same as : spam = tracer(spam)
print(f"{a + b + c} inside spam")
@tracer
def eggs(x, y):
print(f"{x ** y} inside eggs")
spam(a=1, b=2, c=3)
|
def dot_product(vec1,vec2):
sum = 0
for num in range(len(vec1)):
sum += vec1[num] * vec2[num]
return sum
print(dot_product([1,1],[1,1]))
print(dot_product([1, 2], [1, 4]))
print(dot_product([1, 2, 1], [1, 4, 3]))
|
from django.http import HttpResponse, Http404,HttpResponseRedirect
from django.shortcuts import render,redirect
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, Http404,HttpResponseRedirect
from .forms import NewPostForm,ProfileForm,CommentForm,UserForm
from .models import Post,Profile,Comments
from django.contrib.auth.models import User
from django.db import transaction
# Create your views here.
@login_required(login_url='/accounts/login')
def index(request):
gram = Post.this_post()
comment = Comments.this_comment()
return render(request, "index.html", {"gram":gram, "comment":comment})
def comment(request):
current_user = request.user
if request.method == 'POST':
form = CommentForm(request.POST, request.FILES)
if form.is_valid():
comment = form.save(commit=False)
comment.user = current_user
comment.save()
else:
form = CommentForm()
return render(request, 'comment.html', {"form": form})
@login_required(login_url='/accounts/login')
def post(request,post_id):
try:
post = Post.objects.get(id = post_id)
except DoesNotExit:
raise Http404()
return render(request,"post.html", {"post":post})
@login_required(login_url='/accounts/login/')
def new_post(request):
current_user = request.user
if request.method == 'POST':
form = NewPostForm(request.POST, request.FILES)
if form.is_valid():
post = form.save(commit=False)
post.user = current_user
post.save()
else:
form = NewPostForm()
return render(request, 'new_post.html', {"form": form},)
@login_required
@transaction.atomic
def update_profile(request):
if request.method == 'POST':
user_form = UserForm(request.POST, instance=request.user)
profile_form = ProfileForm(request.POST, request.FILES, instance=request.user.profile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
return redirect('/')
else:
user_form = UserForm(instance=request.user)
profile_form = ProfileForm(instance=request.user.profile)
return render(request, 'profile.html', {
'user_form': user_form,
'profile_form': profile_form
})
def comment(request):
return render(request, 'index.html')
|
# vim:tw=50
"""Numbers
There are several numeric types built into Python,
including integers (types |int| and |long|),
floating point numbers (type |float|), and complex
numbers (type |complex|).
10 # This is an 'int'
10.5 # This is a 'float'
6 + 3.2j # This is a 'complex'
The interactive Python interpreter makes a nice
calculator, and unlike this tutorial, you don't
even have to type |print| there - the |repr| of
every operation is output automatically. Basic
math is easy - you can do addition, subtraction,
multiplication, division, and exponentiation,
among other things. Parentheses do what you would
expect.
Exercises
- Print the number of atoms in the
sun, as a large integer: |119 * 10 ** 55|.
Bonus Work
- Try opening an interactive Python prompt (in a
terminal, not here) and using it as a
calculator.
"""
# Basic numeric types.
print("I'm an int:", 10)
print("I'm a float:", 2.79)
print("I'm complex:", 3.14 + 1j)
# Math is easy.
a = 1000.0
# Some basic math operators:
print()
print("Basic Math Operators:")
print("Div:", a / 10.0) # Divide by 10
print("Mul:", a * 10) # Multiply by 10
print("Add:", a + 12) # Add 12
print("Sub:", a - 15) # Subtract 15
print("Exp:", a ** 5) # Take a to the 5th power.
# Grouping:
print()
print("Parentheses:")
print("Multiplication before addition:", 3 + 2 * 5)
print("Force addition to come first:", (3 + 2) * 5)
|
# coding=utf-8
from datetime import datetime
from random import randint
from sqlalchemy.exc import IntegrityError
from faker import Faker
from . import db
from .models import User, Post, Comment, Tag
fake = Faker(locale='zh-CN')
def users(count=100):
i = 0
while i < count:
u = User(email=fake.email(),
username=fake.user_name(),
password='123456',
confirmed=True,
name=fake.name(),
location=fake.city(),
about_me=fake.text(),
member_since=fake.past_date())
db.session.add(u)
try:
db.session.commit()
i += 1
except IntegrityError:
db.session.rollback()
def posts(count=100):
user_count = User.query.count()
tag_count = Tag.query.count()
for i in range(count):
u = User.query.offset(randint(0, user_count - 1)).first()
t = Tag.query.offset(randint(0, tag_count-1)).first()
p = Post(body_html=fake.text(),
title=fake.sentence(),
timestamp=fake.date_time_between(start_date=u.member_since),
author=u,
tags = [t])
db.session.add(p)
db.session.commit()
def comments(count=100):
user_count = User.query.count()
post_count = Post.query.count()
for i in range(count):
u = User.query.offset(randint(0, user_count - 1)).first()
p = Post.query.offset(randint(0, post_count - 1)).first()
c = Comment(body_html=fake.sentence(),
timestamp=fake.date_time_between(start_date=max(u.member_since,p.timestamp)),
author = u,
post = p)
db.session.add(c)
db.session.commit()
def tags(count=10):
for i in range(count):
t = Tag(name=fake.word())
db.session.add(t)
db.session.commit()
|
from sys import stderr, exit, argv
from flask import Flask, jsonify, request, session, escape
app = Flask(__name__, static_url_path='/static')
@app.route('/feedback', methods=['POST'])
def store_feedback() :
post = request.get_json()
print >> stderr, post
with open('INTERACTION_DATA.json', 'a') as f :
print >> f, post
return jsonify({}), 200
@app.after_request
def after_request(response) :
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
return response
@app.route('/')
def root() :
return app.send_static_file('tetris.html')
if __name__ == '__main__' :
app.run(debug=True)
|
# -*- coding: utf-8 -*-
import cv2
import os
from matplotlib import pyplot as plt
from PIL import Image, ImageEnhance, ImageFilter
import pytesseract
from PIL import ImageFont, ImageDraw
import numpy as np
video_src = 'D:/PROJECTS/Python/HUMAN COUNT/dataset/VID_20191029_185101.mp4'
video_src = 'E:/PROJECT ALL/kaggle/project/human Count/dataset/PCDS/106_20150509_back/noisy/uncrowd/2015_05_09_15_05_40BackColor.avi'
#outputpath = 'D:/PROJECTS/Python/dataExtract/VID7/'
#video_src = 'D:/PROJECTS/Python/HUMAN COUNT/dataset/VID_20191029_185101.mp4'
#video_src = 'E:/Google Drive/PCDS/106_20150509_back/noisy/uncrowd/2015_05_09_15_05_40BackColor.avi'
#outputpath = 'D:/PROJECTS/Python/dataExtract/VID7/'
cap = cv2.VideoCapture(video_src)
count=0
ret_pre, img_pre = cap.read()
img_pre_gray = cv2.cvtColor(img_pre, cv2.COLOR_BGR2GRAY)
th2_pre = cv2.adaptiveThreshold(img_pre_gray,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,11,2)
while True:
ret, img = cap.read()
if (type(img) == type(None)):
break
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_sub=img_pre_gray-img_gray
ret,thresh = cv2.threshold(img_sub,50,120,cv2.THRESH_TRUNC)
th2 = cv2.adaptiveThreshold(img_gray,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,11,2)
img_sub=img_pre-img
th2_sub=th2_pre-th2
ret,thresh11 = cv2.threshold(th2_sub,127,255,cv2.THRESH_TRUNC)
cv2.imshow('img', th2_sub)
# cv2.imshow('img',cv2.cvtColor(img,cv2.COLOR_BAYER_GR2RGB ) )
img_pre_gray=img_gray
th2_pre=th2
img_pre=img
if cv2.waitKey(33) == 27:
break
cv2.destroyAllWindows() |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import Application
from ._models_py3 import ApplicationGroup
from ._models_py3 import ApplicationGroupList
from ._models_py3 import ApplicationGroupPatch
from ._models_py3 import ApplicationList
from ._models_py3 import ApplicationPatch
from ._models_py3 import CloudErrorProperties
from ._models_py3 import Desktop
from ._models_py3 import DesktopList
from ._models_py3 import DesktopPatch
from ._models_py3 import ExpandMsixImage
from ._models_py3 import ExpandMsixImageList
from ._models_py3 import HostPool
from ._models_py3 import HostPoolList
from ._models_py3 import HostPoolPatch
from ._models_py3 import Identity
from ._models_py3 import LogSpecification
from ._models_py3 import MigrationRequestProperties
from ._models_py3 import MsixImageUri
from ._models_py3 import MsixPackage
from ._models_py3 import MsixPackageApplications
from ._models_py3 import MsixPackageDependencies
from ._models_py3 import MsixPackageList
from ._models_py3 import MsixPackagePatch
from ._models_py3 import OperationProperties
from ._models_py3 import Plan
from ._models_py3 import RegistrationInfo
from ._models_py3 import RegistrationInfoPatch
from ._models_py3 import Resource
from ._models_py3 import ResourceModelWithAllowedPropertySet
from ._models_py3 import ResourceModelWithAllowedPropertySetIdentity
from ._models_py3 import ResourceModelWithAllowedPropertySetPlan
from ._models_py3 import ResourceModelWithAllowedPropertySetSku
from ._models_py3 import ResourceProviderOperation
from ._models_py3 import ResourceProviderOperationDisplay
from ._models_py3 import ResourceProviderOperationList
from ._models_py3 import ScalingHostPoolReference
from ._models_py3 import ScalingPlan
from ._models_py3 import ScalingPlanList
from ._models_py3 import ScalingPlanPatch
from ._models_py3 import ScalingSchedule
from ._models_py3 import SendMessage
from ._models_py3 import ServiceSpecification
from ._models_py3 import SessionHost
from ._models_py3 import SessionHostHealthCheckFailureDetails
from ._models_py3 import SessionHostHealthCheckReport
from ._models_py3 import SessionHostList
from ._models_py3 import SessionHostPatch
from ._models_py3 import Sku
from ._models_py3 import StartMenuItem
from ._models_py3 import StartMenuItemList
from ._models_py3 import UserSession
from ._models_py3 import UserSessionList
from ._models_py3 import Workspace
from ._models_py3 import WorkspaceList
from ._models_py3 import WorkspacePatch
except (SyntaxError, ImportError):
from ._models import Application # type: ignore
from ._models import ApplicationGroup # type: ignore
from ._models import ApplicationGroupList # type: ignore
from ._models import ApplicationGroupPatch # type: ignore
from ._models import ApplicationList # type: ignore
from ._models import ApplicationPatch # type: ignore
from ._models import CloudErrorProperties # type: ignore
from ._models import Desktop # type: ignore
from ._models import DesktopList # type: ignore
from ._models import DesktopPatch # type: ignore
from ._models import ExpandMsixImage # type: ignore
from ._models import ExpandMsixImageList # type: ignore
from ._models import HostPool # type: ignore
from ._models import HostPoolList # type: ignore
from ._models import HostPoolPatch # type: ignore
from ._models import Identity # type: ignore
from ._models import LogSpecification # type: ignore
from ._models import MigrationRequestProperties # type: ignore
from ._models import MsixImageUri # type: ignore
from ._models import MsixPackage # type: ignore
from ._models import MsixPackageApplications # type: ignore
from ._models import MsixPackageDependencies # type: ignore
from ._models import MsixPackageList # type: ignore
from ._models import MsixPackagePatch # type: ignore
from ._models import OperationProperties # type: ignore
from ._models import Plan # type: ignore
from ._models import RegistrationInfo # type: ignore
from ._models import RegistrationInfoPatch # type: ignore
from ._models import Resource # type: ignore
from ._models import ResourceModelWithAllowedPropertySet # type: ignore
from ._models import ResourceModelWithAllowedPropertySetIdentity # type: ignore
from ._models import ResourceModelWithAllowedPropertySetPlan # type: ignore
from ._models import ResourceModelWithAllowedPropertySetSku # type: ignore
from ._models import ResourceProviderOperation # type: ignore
from ._models import ResourceProviderOperationDisplay # type: ignore
from ._models import ResourceProviderOperationList # type: ignore
from ._models import ScalingHostPoolReference # type: ignore
from ._models import ScalingPlan # type: ignore
from ._models import ScalingPlanList # type: ignore
from ._models import ScalingPlanPatch # type: ignore
from ._models import ScalingSchedule # type: ignore
from ._models import SendMessage # type: ignore
from ._models import ServiceSpecification # type: ignore
from ._models import SessionHost # type: ignore
from ._models import SessionHostHealthCheckFailureDetails # type: ignore
from ._models import SessionHostHealthCheckReport # type: ignore
from ._models import SessionHostList # type: ignore
from ._models import SessionHostPatch # type: ignore
from ._models import Sku # type: ignore
from ._models import StartMenuItem # type: ignore
from ._models import StartMenuItemList # type: ignore
from ._models import UserSession # type: ignore
from ._models import UserSessionList # type: ignore
from ._models import Workspace # type: ignore
from ._models import WorkspaceList # type: ignore
from ._models import WorkspacePatch # type: ignore
from ._desktop_virtualization_api_client_enums import (
ApplicationGroupType,
ApplicationType,
CommandLineSetting,
HealthCheckName,
HealthCheckResult,
HostPoolType,
LoadBalancerType,
Operation,
PersonalDesktopAssignmentType,
PreferredAppGroupType,
RegistrationTokenOperation,
RemoteApplicationType,
ScalingScheduleDaysOfWeekItem,
SessionHostLoadBalancingAlgorithm,
SessionState,
SkuTier,
SsoSecretType,
Status,
StopHostsWhen,
UpdateState,
)
__all__ = [
'Application',
'ApplicationGroup',
'ApplicationGroupList',
'ApplicationGroupPatch',
'ApplicationList',
'ApplicationPatch',
'CloudErrorProperties',
'Desktop',
'DesktopList',
'DesktopPatch',
'ExpandMsixImage',
'ExpandMsixImageList',
'HostPool',
'HostPoolList',
'HostPoolPatch',
'Identity',
'LogSpecification',
'MigrationRequestProperties',
'MsixImageUri',
'MsixPackage',
'MsixPackageApplications',
'MsixPackageDependencies',
'MsixPackageList',
'MsixPackagePatch',
'OperationProperties',
'Plan',
'RegistrationInfo',
'RegistrationInfoPatch',
'Resource',
'ResourceModelWithAllowedPropertySet',
'ResourceModelWithAllowedPropertySetIdentity',
'ResourceModelWithAllowedPropertySetPlan',
'ResourceModelWithAllowedPropertySetSku',
'ResourceProviderOperation',
'ResourceProviderOperationDisplay',
'ResourceProviderOperationList',
'ScalingHostPoolReference',
'ScalingPlan',
'ScalingPlanList',
'ScalingPlanPatch',
'ScalingSchedule',
'SendMessage',
'ServiceSpecification',
'SessionHost',
'SessionHostHealthCheckFailureDetails',
'SessionHostHealthCheckReport',
'SessionHostList',
'SessionHostPatch',
'Sku',
'StartMenuItem',
'StartMenuItemList',
'UserSession',
'UserSessionList',
'Workspace',
'WorkspaceList',
'WorkspacePatch',
'ApplicationGroupType',
'ApplicationType',
'CommandLineSetting',
'HealthCheckName',
'HealthCheckResult',
'HostPoolType',
'LoadBalancerType',
'Operation',
'PersonalDesktopAssignmentType',
'PreferredAppGroupType',
'RegistrationTokenOperation',
'RemoteApplicationType',
'ScalingScheduleDaysOfWeekItem',
'SessionHostLoadBalancingAlgorithm',
'SessionState',
'SkuTier',
'SsoSecretType',
'Status',
'StopHostsWhen',
'UpdateState',
]
|
# Generated by Django 3.2.3 on 2021-10-13 13:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Sam', '0012_asset_expences_income_liabilities'),
]
operations = [
migrations.CreateModel(
name='Cash',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('invoice_number', models.TextField(max_length=100)),
('date', models.CharField(max_length=100)),
('internal_ref_no', models.TextField(max_length=100)),
('cash', models.TextField(max_length=100)),
('user_id', models.TextField(max_length=100)),
('account', models.TextField(max_length=100)),
('customer_id', models.TextField(max_length=100)),
('customer_name', models.TextField(max_length=100)),
('item_id1', models.TextField(max_length=100)),
('item_id2', models.TextField(max_length=100)),
('item_details1', models.TextField(max_length=100)),
('item_details2', models.TextField(max_length=100)),
('price1_1', models.TextField(max_length=100)),
('price1_2', models.TextField(max_length=100)),
('price2_1', models.TextField(max_length=100)),
('price2_2', models.TextField(max_length=100)),
('quantity1', models.TextField(max_length=100)),
('quantity2', models.TextField(max_length=100)),
('quantity3', models.TextField(max_length=100)),
('quantity4', models.TextField(max_length=100)),
('amount1', models.TextField(max_length=100)),
('amount2', models.TextField(max_length=100)),
('sales_ex1', models.TextField(max_length=100)),
('sales_ex2', models.TextField(max_length=100)),
('job1', models.TextField(max_length=100)),
('job2', models.TextField(max_length=100)),
('labour_charge', models.TextField(max_length=100)),
('other_charge', models.TextField(max_length=100)),
('total1', models.TextField(max_length=100)),
('total2', models.TextField(max_length=100)),
('total3', models.TextField(max_length=100)),
('total4', models.TextField(max_length=100)),
('total5', models.TextField(max_length=100)),
('total6', models.TextField(max_length=100)),
('discount', models.TextField(max_length=100)),
('tax', models.TextField(max_length=100)),
],
),
]
|
import os
from flask import Flask, session, render_template, request, flash, redirect, url_for, jsonify
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hard to guess secure key'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
# setup SQLAlchemy
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
db = SQLAlchemy(app)
# define database tables
class Group(db.Model):
__tablename__ = 'group'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
about = db.Column(db.Text)
breed = db.relationship('Breed', backref='group', cascade="delete")
class Breed(db.Model):
__tablename__ = 'breed'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(256))
size = db.Column(db.String(64))
color = db.Column(db.String(64))
description = db.Column(db.Text)
group_id = db.Column(db.Integer, db.ForeignKey('group.id'))
@app.route('/')
def index():
return render_template('index.html')
@app.route('/group')
def show_all_groups():
group = Group.query.all()
return render_template('group-all.html', group=group)
@app.route('/group/add', methods=['GET','POST'])
def add_group():
if request.method == 'GET':
return render_template('group-add.html')
if request.method == 'POST':
# get data from the form
name = request.form['name']
about = request.form['about']
# insert the data into the database
group = Group(name=name, about=about)
db.session.add(group)
db.session.commit()
return redirect(url_for('show_all_groups'))
@app.route('/ajax/group/add', methods=['POST'])
def add_ajax_group():
# get data from the form
name = request.form['name']
about = request.form['about']
# insert the data into the database
Group = Group(name=name, about=about)
db.session.add(group)
db.session.commit()
# flash message type: success, info, warning, and danger from bootstrap
flash('Group Inserted', 'success')
return jsonify({"id": str(group.id), "name": group.name})
@app.route('/group/edit/<int:id>', methods=['GET', 'POST'])
def edit_group(id):
group = Group.query.filter_by(id=id).first()
if request.method == 'GET':
return render_template('group-edit.html', group=group)
if request.method == 'POST':
# update data based on the form data
group.name = request.form['name']
group.about = request.form['about']
# update the database
db.session.commit()
return redirect(url_for('show_all_groups'))
@app.route('/group/delete/<int:id>', methods=['GET', 'POST'])
def delete_group(id):
group = Group.query.filter_by(id=id).first()
if request.method == 'GET':
return render_template('group-delete.html', group=group)
if request.method == 'POST':
# delete the groups by id
# all related breeds are deleted as well
db.session.delete(group)
db.session.commit()
return redirect(url_for('show_all_groups'))
@app.route('/ajax/group/<int:id>', methods=['DELETE'])
def delete_ajax_group(id):
group = Group.query.get_or_404(id)
db.session.delete(group)
db.session.commit()
return jsonify({"id": str(group.id), "name": group.name})
@app.route('/breed')
def show_all_breeds():
breed = Breed.query.all()
return render_template('breed-all.html', breed=breed)
@app.route('/breed/add', methods=['GET', 'POST'])
def add_breeds():
if request.method == 'GET':
group = Group.query.all()
return render_template('breed-add.html', group=group)
if request.method == 'POST':
# get data from the form
name = request.form['name']
size = request.form['size']
colors = request.form['colors']
description = request.form['description']
group_name = request.form['group']
group = Group.query.filter_by(name=group_name).first()
breed = Breed(name=name, size=size, colors=colors, description=description, group=group )
# insert the data into the database
db.session.add(breed)
db.session.commit()
return redirect(url_for('show_all_breeds'))
@app.route('/breed/edit/<int:id>', methods=['GET', 'POST'])
def edit_breed(id):
breed = Breed.query.filter_by(id=id).first()
group = Group.query.all()
if request.method == 'GET':
return render_template('breed-edit.html', breed=breed, group=group)
if request.method == 'POST':
# update data based on the form data
breed.name = request.form['name']
breed.size = request.form['size']
breed.colors = request.form['colors']
breed.description = request.form['description']
group_name = request.form['group']
group = Group.query.filter_by(name=group_name).first()
breed.groups = group
# update the database
db.session.commit()
return redirect(url_for('show_all_breeds'))
@app.route('/breed/delete/<int:id>', methods=['GET', 'POST'])
def delete_breed(id):
breed = Breed.query.filter_by(id=id).first()
group = Group.query.all()
if request.method == 'GET':
return render_template('breed-delete.html', breed=breed, group=group)
if request.method == 'POST':
db.session.delete(breed)
db.session.commit()
return redirect(url_for('show_all_breeds'))
@app.route('/ajax/breed/<int:id>', methods=['DELETE'])
def delete_ajax_breed(id):
breed = Breed.query.get_or_404(id)
db.session.delete(breed)
db.session.commit()
return jsonify({"id": str(breed.id), "name": breed.name})
@app.route('/about')
def about():
return render_template('about.html')
if __name__ == '__main__':
app.run()
|
import requests
def get_url_status(url):
con = requests.get(url)
return con.status_code
def run():
url_list = ["http://www.baidu.com", "http://www.163.com"]
for url in url_list:
status = get_url_status(url)
if status == 200:
print("{} 正常".format(url))
else:
print("{} 异常".format(url))
if __name__ == '__main__':
run() |
import torch.optim as optim
import torchvision
import torch
from torch.autograd import Variable
import numpy as np
from torch.nn import functional as F
import dataset
import helpers
import models
from config import *
# assert(file_name == 'triplet' or file_name == 'bigbottle' or file_name == 'tripletpro')
writer, save_dir = helpers.init(gpu, file_name, experiment_name)
# model = models.Triplet().cuda()
# model = models.BigBottle2(n_layer=n_layer).cuda()
model = models.TripletPro().cuda()
model.apply(models.init_weights) # xavier init
optimizer = optim.Adam(model.parameters(), lr=lr)
def run(n_epochs):
for epoch in range(n_epochs):
for n, (frame0, frame1, frame2, frame_rand) in enumerate(dataset.train_loader):
niter = epoch * len(dataset.train_loader) + n # count gradient updates
model.train()
frame0 = Variable(frame0).cuda()
frame1 = Variable(frame1).cuda()
frame2 = Variable(frame2).cuda()
frame_rand = Variable(frame_rand).cuda()
# Appearance constancy loss
# a1 = model.appearance(frame0)
# a2 = model.appearance(frame_rand)
# loss_appear = F.l1_loss(a1, a2) # two frames in video should have same appearance
# Pose constancy loss
# p1 = model.pose(frame0)
# frame_trans = frame0 # insert some transform here e.g. contrast, color inversion, small transl/rotations
# p2 = model.pose(frame_trans)
# loss_pose = F.l1_loss(p1, p2) # pose should not change under transformations
# Reconstruction Loss
optimizer.zero_grad()
# output, mu, log_var = model(frame0, frame2)
# loss = helpers.vae_loss(output, frame1, mu=mu, logvar=log_var, batch_size=batch_size, img_size=img_size,
# nc=nc)
output = model(frame0, frame2, frame_rand)
loss_reconst = F.l1_loss(output, frame1) # TODO make a proper VAE loss
loss = loss_reconst # + loss_appear # + loss_pose
loss.backward()
optimizer.step()
train_loss = loss.data[0]
writer.add_scalar('Loss/Train', train_loss, niter)
if epoch % log_interval == 0:
if print_output:
print("Epoch [{}/{}], Gradient Step: {}, Train Loss: {:.4f}"
.format(epoch, n_epochs, (epoch + 1) * len(dataset.train_loader), train_loss))
# test loss
model.eval()
test_loss = 0
for n, (frame0, frame1, frame2, frame_rand) in enumerate(dataset.test_loader):
frame0 = Variable(frame0).cuda()
frame1 = Variable(frame1).cuda()
frame2 = Variable(frame2).cuda()
frame_rand = Variable(frame_rand).cuda()
# output, mu, log_var = model(frame0, frame2)
# loss = helpers.vae_loss(output, frame1, mu=mu, logvar=log_var, batch_size=batch_size, img_size=img_size,
# nc=nc)
output = model(frame0, frame2, frame_rand)
loss = F.l1_loss(output, frame1)
test_loss += loss.data[0]
test_loss /= len(dataset.test_loader)
writer.add_scalar('Loss/Test', test_loss, epoch)
# test reconstruction quality for images from train and test set
# TODo new eval.py for inspecting latent space
# phases = ['train', 'test']
phases = ['train']
for phase in phases:
if phase == 'train':
evalset = dataset.trainset
else:
evalset = dataset.testset
# Test triplet reconstruction
# get random subset
idx = np.random.choice(range(evalset.num_subsets)) # random index of triplet
frames = evalset.get_subset(idx) # triplet from train data
inputs = list(f.view([1] + [i for i in f.shape]) for f in frames) # format for batch
frames = list(Variable(frame).cuda() for frame in inputs)
outputs = list()
outputs.append(model.reconstruct(frames[0]))
outputs.append(model.forward(frames[0], frames[2], frames[3]))
outputs.append(model.reconstruct(frames[2]))
outputs.append(model.reconstruct(frames[3])) # also reconstruct random frame
outputs = [out.data.cpu() for out in outputs]
show_images(inputs+outputs, 4, 'Interpolation', epoch)
# Test pose and appearance switch
# video = np.random.choice(evalset.sequences) # same video
# a, b = np.random.choice(video, 2)
# a = evalset.get_image(None, img_path=a)
# b = evalset.get_image(None, img_path=b)
video1, video2 = np.random.choice(evalset.sequences, 2) # different video
a = np.random.choice(video1)
b = np.random.choice(video2)
a = evalset.get_image(None, img_path=a)
b = evalset.get_image(None, img_path=b)
a = a.view([1] + [i for i in a.shape])
b = b.view([1] + [i for i in b.shape])
p_a = model.pose(Variable(a).cuda())
p_b = model.pose(Variable(b).cuda())
a_a = model.appearance(Variable(a).cuda())
a_b = model.appearance(Variable(b).cuda())
x_ab = model.generate(p_a, a_b) # pose a, appearance b
x_ba = model.generate(p_b, a_a)
x_ab = x_ab.data.cpu()
x_ba = x_ba.data.cpu()
show_images([a, b, x_ab, x_ba], 2, 'Switch Pose/Appearance', epoch)
# Test interpolation
length = 5
seq = video1[0:length]
seq = [evalset.get_image(None, img_path=path) for path in seq]
seq = [img.view([1] + [i for i in img.shape]) for img in seq]
appear = model.appearance(Variable(seq[0]).cuda())
p_init = model.pose(Variable(seq[0]).cuda())
p_end = model.pose(Variable(seq[-1]).cuda())
alpha = [float(i) / (length-1) for i in range(0, length)]
poses = [alpha[i] * p_init + (1-alpha[i]) * p_end for i in range(0, length)]
images = [model.generate(p, appear) for p in poses]
show_images(images, length, 'Linear Interpolation in Pose space', epoch)
def show_images(img_list, how_many_in_one_row, description, iter):
img_list = torch.cat(img_list, 0)
grid = helpers.convert_image_np(torchvision.utils.make_grid(img_list, how_many_in_one_row))
writer.add_image(description, grid, iter)
# torch.save(model.state_dict(), '{}/triplet.pkl'.format(save_dir))
run(num_epochs)
|
# first line: 12
@memory.cache
def detrend(rasterclass):
print("\n Detrending \n")
#perform detrending by applying a gaussian filter with a std of 200m, and detrend
trend = gaussian_filter(rasterclass.raster,sigma=200)
rasterclass.raster -= trend
rasterclass.detrend_ = True
|
from django.db import models
from simple_history.models import HistoricalRecords
import pandas as pd
class Table(models.Model):
id = models.AutoField(primary_key=True)
col_1 = models.CharField(max_length=250, blank=True, null=True, verbose_name="№")
col_2 = models.CharField(max_length=250, blank=True, null=True, verbose_name="ФИО")
col_3 = models.CharField(max_length=250, blank=True, null=True, verbose_name="ИИН")
col_4 = models.CharField(max_length=250, blank=True, null=True, verbose_name="Дата рождения")
col_5 = models.CharField(max_length=250, blank=True, null=True, verbose_name="Номер паспорта")
col_6 = models.CharField(max_length=250, blank=True, null=True, verbose_name="Гражданство")
col_7 = models.CharField(max_length=250, blank=True, null=True, verbose_name="прошли проверку ИИН")
col_8 = models.CharField(max_length=250, blank=True, null=True, verbose_name="Номер мобильного телефона")
col_9 = models.CharField(max_length=250, blank=True, null=True, verbose_name="Номер2")
col_10 = models.CharField(max_length=250, blank=True, null=True, verbose_name="Место и сроки пребывания в последние 14 дней до прибытия в Казахстан (укажите страну, область, штат и т.д.)")
col_11 = models.CharField(max_length=250, blank=True, null=True, verbose_name="номер по базе")
col_12 = models.CharField(max_length=250, blank=True, null=True, verbose_name="поставлен на учет в прогу")
col_13 = models.CharField(max_length=250, blank=True, null=True, verbose_name="дата постановки в прогу")
col_14 = models.CharField(max_length=250, blank=True, null=True, verbose_name="Регион")
col_15 = models.CharField(max_length=250, blank=True, null=True, verbose_name="Место жительство, либо предпологаемое место проживания")
col_16 = models.CharField(max_length=250, blank=True, null=True, verbose_name="Место работы")
col_17 = models.CharField(max_length=250, blank=True, null=True, verbose_name="заражен")
col_18 = models.CharField(max_length=250, blank=True, null=True, verbose_name="Госпитализирован (да/нет)")
col_19 = models.CharField(max_length=250, blank=True, null=True, verbose_name="Место госпитализации")
col_20 = models.CharField(max_length=250, blank=True, null=True, verbose_name="ИИН, ФИО, моб. тел проживающие вместе в домашнем карантине")
col_21 = models.CharField(max_length=250, blank=True, null=True, verbose_name="Найден (да/нет)")
col_22 = models.CharField(max_length=250, blank=True, null=True, verbose_name="находится на дозвоне (да/нет)")
col_23 = models.CharField(max_length=250, blank=True, null=True, verbose_name="Выбыл из РК")
history = HistoricalRecords()
def __str__(self):
return str(self.id)
class Meta:
ordering = ['id']
verbose_name = 'Таблица'
verbose_name_plural = 'Таблица'
class Document(models.Model):
id = models.AutoField(primary_key=True)
description = models.CharField(max_length=255, blank=True, null=True, verbose_name="Описание")
document = models.FileField(upload_to='documents/', verbose_name="Документ")
uploaded_at = models.DateTimeField(auto_now_add=True, verbose_name="Время загрузки")
document_status = [
('0', 'Провал'),
('1', 'Успеч'),
]
status = models.CharField(max_length=1, choices=document_status, default='0', verbose_name="Статус документа")
history = HistoricalRecords()
def save(self, *args, **kwargs):
update = kwargs.pop('update', None)
super(Document, self).save(*args, **kwargs)
if not update:
try:
res = Document.objects.latest('id')
if str(res.document)[-5:] == '.xlsx':
df = pd.read_excel('media/' + str(res.document))
df.columns = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]
for i in range(len(df)):
b = Table(
col_1 = df[1][i],
col_2 = df[2][i],
col_3 = df[3][i],
col_4 = df[4][i],
col_5 = df[5][i],
col_6 = df[6][i],
col_7 = df[7][i],
col_8 = df[8][i],
col_9 = df[9][i],
col_10 = df[10][i],
col_11 = df[11][i],
col_12 = df[12][i],
col_13 = df[13][i],
col_14 = df[14][i],
col_15 = df[15][i],
col_16 = df[16][i],
col_17 = df[17][i],
col_18 = df[18][i],
col_19 = df[19][i],
col_20 = df[20][i],
col_21 = df[21][i],
col_22 = df[22][i],
col_23 = df[23][i]
)
b.save()
tt = Document.objects.get(id=res.id)
tt.status='1'
tt.save(update=True)
except:
print('something wrong')
else:
print('not xslx')
def __str__(self):
return str(self.id)
class Meta:
ordering = ['id']
verbose_name = 'Загрузка документа'
verbose_name_plural = 'Загрузка документа'
|
n = int(input())
x = [int(i) for i in input().split(' ')][::-1]
p = [0] * n
d = [0] * (n + 1)
longest_subs = 0
for i in range(n):
lo = 1
hi = longest_subs
# binary search for longest subs able to hold curr elem
while lo <= hi:
mid = (lo + hi) // 2
if x[d[mid]] <= x[i]:
lo = mid + 1
else:
hi = mid - 1
longest_with_curr = lo
p[i] = d[longest_with_curr - 1]
d[longest_with_curr] = i
if longest_with_curr > longest_subs:
longest_subs = longest_with_curr
result = [0] * longest_subs
k = d[longest_subs]
for i in range(longest_subs - 1, -1, -1):
result[i] = n - k
k = p[k]
print(len(result))
print(*result[::-1])
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from .source import SourcePardot
__all__ = ["SourcePardot"]
|
liste_caracteres_bloques = ["1", "2", "3", "5", "6", "i", "j", "k", "l", "u", "v"]
liste_caracteres_fleche = ["w", "x", "y", "z"]
liste_caracteres_maison = []
coordonnees_interieur_maison = []
coordonnees_porte_maison = []
maison_shop = False
maison_grotte = False
niveau_monstres = 1
fond_ecran_combat = "imagesCombat/fondEcranPlage.png"
|
import sys
import oracledb
oracledb.version = "8.3.0"
sys.modules["cx_Oracle"] = oracledb
import cx_Oracle
import urllib3
urllib3.disable_warnings()
from .base import *
from logging.config import dictConfig
DEBUG = True # Always run in debug mode locally
# Dummy secret key value for testing and local usage
SECRET_KEY = "q9frwftd7&)vn9zonjy2&vgmq1i9csn20+f0r5whb%%u-mzm_i"
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
CANVAS_EMAIL_NOTIFICATION['course_migration_success_subject'] += ' (TEST, PLEASE IGNORE)'
CANVAS_EMAIL_NOTIFICATION['course_migration_failure_subject'] += ' (TEST, PLEASE IGNORE)'
CANVAS_EMAIL_NOTIFICATION['support_email_subject_on_failure'] += ' (TEST, PLEASE IGNORE)'
CANVAS_EMAIL_NOTIFICATION['support_email_address'] = 'tltqaemails@g.harvard.edu'
ALLOWED_HOSTS = ['*']
INSTALLED_APPS += ['django_extensions']
# If you want to use the Django Debug Toolbar, uncomment the following block:
'''
INSTALLED_APPS += ['debug_toolbar']
MIDDLEWARE = ['debug_toolbar.middleware.DebugToolbarMiddleware'] + MIDDLEWARE
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
'''
# Sessions
SESSION_COOKIE_SAMESITE = 'None'
# Allows the REST API passthrough to successfully negotiate an SSL session
# with an unverified certificate, e.g. the one that ships with django-sslserver
ICOMMONS_REST_API_SKIP_CERT_VERIFICATION = True
SELENIUM_CONFIG = {
'account_admin': {
'relative_url': 'accounts/10/external_tools/79', # dev (Admin Console)
#'relative_url': 'accounts/10/external_tools/99', # qa (Admin Console)
},
'canvas_site_creator': {
'test_data': {
'course_code': 'Selenium-Automated', # defaults to ILE
'course_group': 'Anthropology',
'course_short_title': 'Selenium Auto Test 101',
'course_title': 'Selenium Automated Test Course 101',
'template': 'None', # No Template
# Term used to be by value, but since the tool is displaying
# two different dropdown element values
# (see: https://github.com/Harvard-University-iCommons/
# canvas_account_admin_tools/pull/176#discussion_r90055379)
# the term value is modified to look by term display text.
'term': 'Fall 2016',
#TLT-2522 - Testing course with and without registrar_code_display
'course_with_registrar_code_display': {
'registrar_code_display': 'Automated_Test',
'sis_id_value': '362568',
},
'course_without_registrar_code_display': {
'registrar_code_display': '117138',
'sis_id_value': '360031',
},
},
},
'canvas_base_url': CANVAS_URL,
'course_info_tool': {
# 'relative_url': 'accounts/8/external_tools/68', # local
'relative_url': 'accounts/10/external_tools/79', # dev (Admin Console)
'test_course': {
'cid': '353035',
'term': 'Spring',
'title': 'Caribbean Poetics',
'registrar_code_display': 'HDS 2430',
'school': 'Harvard Divinity School',
'type': 'All courses',
'year': '2016',
},
'test_course_with_registrar_code_display_not_populated_in_db': {
'cid': '353457',
'registrar_code': 'selenium_test',
'school': 'Harvard College/GSAS',
},
# only SB/ILE courses support editing through the course info tool at the
# moment, so use this course for testing edit functionality
'test_course_SB_ILE': {
'cid': '354962', # Canvas course 3591
'term': 'Spring',
'title': 'HDS Spring 2016',
'school': 'Harvard Divinity School',
'type': 'All courses',
'year': '2016',
},
'test_users': {
'existing': {
'role_id': '9',
'user_id': '20299916'
},
}
},
'icommons_rest_api': {
'base_path': 'api/course/v2'
},
'publish_courses': {
'test_course': {
'relative_url': 'courses/2601'
},
'test_terms': {
'with_unpublished_courses': 'Summer 2017',
'with_all_published_courses': 'Full Year 2016',
},
# When testing on a term with large number of courses, test in dry
# run mode first to verify that numbers match expected results.
'op_config': {
'account': 10,
#'courses': [],
#'dry_run': False
'term': "sis_term_id:2016-0",
'published': 'false',
},
},
'run_locally': SECURE_SETTINGS.get('selenium_run_locally', False),
'selenium_username': SECURE_SETTINGS.get('selenium_user'),
'selenium_password': SECURE_SETTINGS.get('selenium_password'),
'selenium_grid_url': SECURE_SETTINGS.get('selenium_grid_url'),
'use_htmlrunner': SECURE_SETTINGS.get('selenium_use_htmlrunner', True),
}
dictConfig(LOGGING)
|
from os import getenv
from pymongo import MongoClient
# Default database name to use
DEFAULT_DATABASE = getenv("SMART_SCHOOL_DEFAULT_DB", "SmartSchool")
class DBClient:
def __init__(self, connection):
"""
Initializing database with given mongodb connection string.
"""
self.client = MongoClient(connection)
def getClient(self):
"""
:return MongoDB client object
"""
return self.client
def getDataBase(self):
"""
:return MongoDB database of default name
"""
return self.client[DEFAULT_DATABASE]
|
from keras.datasets import mnist
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
print(X_train[1])
print(Y_test[1])
print(X_train.shape) # (60000,28,28)
print(X_test.shape) # (10000,28,28)
print(Y_train.shape) # (60000,)
print(Y_test.shape) # (10000,)
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.callbacks import ModelCheckpoint, EarlyStopping
import numpy as np
import os
import tensorflow as tf
# cnn 모델에 넣기 위해 행,a,b,1 을 맞추기 위해 reshape를 수행
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32') / 255 # (60000,28,28,1)
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32') / 255 # (10000,28,28,1)
'''
분류 : classification
원-핫 인코딩 : to_categorical 함수 사용
단어 집합의 크기를 벡터의 차원으로 하고, 표현하고 싶은 단어의 인덱스에 1의 값을 부여하고,
다른 인덱스에는 0을 부여하는 단어의 벡터 표현 방식입니다. 이렇게 표현된 벡터를 원-핫 벡터(One-hot vector)
원-핫 인코딩을 두 가지 과정으로 정리해보겠습니다.
(1) 각 단어에 고유한 인덱스를 부여합니다. (정수 인코딩)
(2) 표현하고 싶은 단어의 인덱스의 위치에 1을 부여하고, 다른 단어의 인덱스의 위치에는 0을 부여
'''
Y_train = np_utils.to_categorical(Y_train)
Y_test = np_utils.to_categorical(Y_test)
print(Y_test[0]) #[0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]
print(Y_train.shape) # (60000,10)
print(Y_test.shape) # (10000,10)
# 컨볼루션 신경망의 설정
model = Sequential()
# 아웃 shape : 28(인풋컬럼) - 3(커널사이즈) + 1
# input : 28 가로, 28 세로, 1 피쳐
model.add(Conv2D(32, kernel_size=(3,3), input_shape=(28,28,1), activation='relu')) # (None,26,26,32)
model.add(Conv2D(64, (3,3), activation='relu')) # (None,24,24,64)
# 2x2 에서 가장 큰 숫자를 pool 에 넣어서 효율성을 높임.
model.add(MaxPooling2D(pool_size=2)) # (None,12,12,64)
model.add(Dropout(0.25)) # 크기에 영향 없음. 노드는 존재하나 작동을 시키지 않음.
model.add(Flatten()) # (None, 9216)
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
# activation 디폴트는 linear, relu 는 평타는 침.
# 분류모델은 무조건 softmax 나 sigmoid 사용, (sigmoid 는 0과 1로만 출력), 강제적으로 하나를 선택하게 함.
model.add(Dense(10, activation='softmax'))
# 로또 예측 모델이라면 to_categorical을 이용해 컬럼을 45로 변환하고, 마지막 레이어를 아래와 같이 구성.
# model.add(Dense(45, activation='softmax'))
# model.summary()
# softmax 를 사용하는 경우 loss 는 categorical_crossentropy 를 사용함.
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
early_stopping_callback = EarlyStopping(monitor='val_loss', patience=10)
# 모델의 실행
history = model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=2, batch_size=200, verbose=1,
callbacks=[early_stopping_callback]) # checkpointer
# 테스트 정확도 출력
print("\n Test Accuracy: %.4f" % (model.evaluate(X_test, Y_test)[1]))
|
"""策略工厂,根绝配置文件或者运行参数初始化一只股票的策略"""
from singleton import singleton
from pc_parity_strategy import PcParityStrategy
@singleton
class StrategyFactory:
def __init__(self):
pass
def create(self):
"""根据股票创建股票的策略"""
return PcParityStrategy()
|
__author__ = 'grant'
a = 10
if a - 10 == 0:
print('a is ten')
if True:
print('tis true')
if 1:
print('its a one')
if 't':
print('the non-empty is treated as true')
if a / 3 == 0:
print('variable a is probably 3')
else:
print('variable a is something else')
weather = 'rainy'
if weather == 'sunny':
__doc__ = "this tests if the weather variable is nice out" \
"this is also a doc string just for the if statement."
"""
ternary operators are a nice shortcut for writing if statements,
but remember, always air on the side of readability. Complex
ternary statements can be hard to debug and understand later.
"""
x = 10 if 's' in 'string' else 5
print(x)
|
def find_max(a):
n = len(a)
max = 0
for i in range(1, n):
if a[i] > a[max]:
max = i
return max
def sort(a):
result = []
while a:
max = find_max(a)
value = a.pop(max)
result.append(value)
return result
d = [2, 4, 5, 1, 3]
print(sort(d))
|
#!/usr/bin/env python
nx, ny, nz = 4, 5, 6
for idx in xrange(nx*ny*nz):
i = idx/(ny*nz)
j = idx/nz
k = idx%nz
j2 = (idx - i*ny*nz)/nz
k2 = idx - i*ny*nz - j2*nz
print i,j,k,'\t',j2,k2
|
import metar
import matplotlib.dates as mdates
import datetime as dt
stations = [('KDLS', 'The Dalles', 'OR'),
('KHRI', 'Hermiston', 'OR'),
('KPSC', 'Pasco', 'WA')]
startdate = dt.datetime(1980,1,1)
enddate = dt.datetime(2012,5,27)
timestep = dt.timedelta(days=1)
for station in stations:
outfilename = '%s_raw.csv' % (station[0],)
procfilename = '%s_processed.csv' % (station[0],)
errfillename = '%s_errors.log' % (station[0],)
outfile = open(outfilename, 'w')
errorfile = open(errfillename, 'a')
sta = metar.Station.station(station[0], city=station[1], state=station[2])
for n, date in enumerate(mdates.drange(startdate, enddate, timestep)):
dateobj = mdates.num2date(date)
url = sta.urlByDate(dateobj)
if n%100 == 0:
print('%s - %s' % (station[0], dateobj))
if n == 0:
sta.getHourlyData(url, outfile, errorfile, keepheader=True)
else:
sta.getHourlyData(url, outfile, errorfile, keepheader=False)
outfile.close()
metar.Station.processWundergroundFile(outfilename, procfilename, errorfile)
errorfile.close()
|
import re
from unicodedata import normalize
from photosandtext2 import app, db
from photosandtext2.models.photo import *
from photosandtext2.models.user import *
import datetime
ALLOWED_EXTENSIONS = app.config["ALLOWED_EXTENSIONS"]
def init_env():
"""
Used to initialize the environment. (Need to import photo models and run db.create_all() first.)
"""
crop1 = CropSettings(name="thumb200",height=200,width=200)
crop2 = CropSettings(name="thumb400",height=400,width=400)
crop3 = CropSettings(name="home400",height=0,width=400)
crop4 = CropSettings(name="home600",height=0,width=600)
crop5 = CropSettings(name="home800",height=0,width=800)
crop6 = CropSettings(name="display1280",height=0,width=1280)
crop7 = CropSettings(name="display1600",height=0,width=1600)
crop8 = CropSettings(name="display_t",height=0,width=100)
crop9 = CropSettings(name="display_m",height=0,width=240)
crop10 = CropSettings(name="display_n",height=0,width=320)
crop11 = CropSettings(name="display",height=0,width=500)
crop12 = CropSettings(name="display_z",height=0,width=640)
crop13 = CropSettings(name="display_c",height=0,width=800)
crop14 = CropSettings(name="display_b",height=0,width=1024)
for crop in (crop1, crop2, crop3, crop4, crop5, crop6, crop7, crop8, crop9, crop10, crop11, crop12, crop13, crop14):
db.session.add(crop)
user = User(username="jared", password="password")
db.session.add(user)
db.session.commit()
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
#Returns a string of a friendly date range
def date_format(date1, date2):
if (date1.strftime("%B %d %Y") == date2.strftime("%B %d %Y")):
return (date1.strftime("%B %d, %Y"))
if (date1.strftime("%B") == date2.strftime("%B")):
return (date1.strftime("%B %d")+" to "+date2.strftime("%d, %Y"))
else:
return (date1.strftime("%B %d")+" to "+date2.strftime("%B %d, %Y"))
def save_galleries():
for gallery in Gallery.query.filter(Gallery.thumbnails!=None).order_by(Gallery.updated.desc()):
gallery.save() |
from time import sleep, time
from config import config
class Session:
from oauthlib.oauth2 import BackendApplicationClient
from requests_oauthlib import OAuth2Session
# Wild Apricot uses "APIKEY" as the client ID
CLIENT_ID = 'APIKEY'
def __init__(self):
client = self.BackendApplicationClient(client_id = self.CLIENT_ID)
self.oauth2_session = self.OAuth2Session(client = client)
token = self.oauth2_session.fetch_token(
config['auth-endpoint'],
client_id = self.CLIENT_ID,
client_secret = config['secret'],
scope = 'auto')
self.account = int(config['account'] or token['Permissions'][0]['AccountId'])
# request synchronously communicates with the Wild Apricot API
def request(self, verb, endpoint, params = {}, data = {}, rpc = False):
path_prefix = 'rpc' if rpc else 'accounts'
endpoint = f'/v2.1/{path_prefix}/{self.account}/{endpoint}'
params['$async'] = False
# Rate-limiting because Wild Apricot limits API requests to 60 per
# minute.
if hasattr(self, 'last_request') and time() < self.last_request + 1:
sleep(1)
self.last_request = time()
response = self.oauth2_session.request(
verb,
config['api-host'] + endpoint,
params = params,
json = data)
if not response.ok:
raise Exception(f'{response.status_code}: {response.reason}')
return response.json()
|
import random
SIX_HANDS = []
PLAYER_LIST = []
if len(PLAYER_LIST) > 6:
PLAYER_LIST.pop()
class Deck(object):
def __init__(self,):
self.two_hands = []
self.current = ['2c', '3c', '4c', '5c', '6c', '7c', '8c', '9c', '10c', 'Jc', 'Qc', 'Kc', 'Ac',
'2s', '3s', '4s', '5s', '6s', '7s', '8s', '9s', '10s', 'Js', 'Qs', 'Ks', 'As',
'2d', '3d', '4d', '5d', '6d', '7d', '8d', '9d', '10d', 'Jd', 'Qd', 'Kd', 'Ad',
'2h', '3h', '4h', '5h', '6h', '7h', '8h', '9h', '10h', 'Jh', 'Qh', 'Kh', 'Ah']
self.hand = []
self.flop = []
self.pile = []
def shuffle(self):
# adding the pile back to the deck
self.current.extend(self.two_hands)
self.current.extend(self.flop)
self.current.extend(self.pile)
# TODO: make sure to actually understand what this does
# shuffling the created deck
# range(start, stop, step)
# pretty cool shuffle function from stackoverflow
for i in range(len(self.current)-1, 0, -1):
r = random.randint(0, i)
self.current[i], self.current[r] = self.current[r], self.current[i]
def deal_hands(self):
# TODO: update this function to simulate dealing one round of cards to each player, then a second round
# drawing a card from the created deck
for i in range(2):
# this loop creates six hands, two cards each, by drawing them from the deck
for a in range(2):
self.hand.append(self.current.pop())
self.two_hands.append(self.hand)
self.hand = []
def deal_flop(self):
# burn one card
self.pile.append(self.current.pop())
for b in range(3):
# deal three cards to the flop
self.flop.append(self.current.pop())
def burn1_deal1(self):
# for the turn and river cards
self.pile.append(self.current.pop())
self.flop.append(self.current.pop())
def my_hand(players, hands):
# input player name, output their hand
# the easiest way to do this:
# link the player and hand by their shared indexes in PLAYER_LIST and SIX_HANDS
name = input('Who are you?')
a = players.index(name)
print(f'Hello {name}, your hand is {hands[a]}')
if __name__ == "__main__":
# TODO: i left off here. trying to initialize the game. will still need to deal flop cards and implement chips.
pass
# poop
|
#!/usr/bin env python
# -*- coding: utf-8 -*-
#Project Neutrino
#Por Cleiton Lima <cleitonlima@fedoraproject.org>
#This file is part of Neutrino Project.
# Neutrino is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Neutrino is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Neutrino. If not, see <http://www.gnu.org/licenses/>.
#
#This script will install extra fonts, present in the system and in others ways.
#
#Elementary Gnome-Shell Theme
from os import environ, path, getenv
desktoptype = environ.get('DESKTOP_SESSION')
if "gnome" in desktoptype :
from api.base import GBase
base = GBase()
elif "kde" in desktoptype:
from api.base import KBase
base = KBase()
else:
pass
def install():
home = str(getenv("HOME"))
paths = str("/.themes/")
if path.isdir(home+paths+str("gs-elementary")) == True:
base.gshell_theme_apply("gs-elementary")
else:
base.gshell_theme_install("http://cleitonlima.com.br/neutrino/packages/gs-elementary.zip", "gs-elementary.zip")
base.gshell_theme_apply("gs-elementary")
gshell_elementary_DESCRIPTION = str("Tema Elementary para o Gnome-Shell, por half-left. Após atualizar o Gnome-Shell, será necessário reinstalar o tema para que ele volte a aparecer.") |
import dash
from dash.dependencies import Output, Input, State
import dash_core_components as dcc
import dash_html_components as html
import plotly
import random
import plotly.graph_objs as go
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(external_stylesheets=external_stylesheets)
server = app.server
colors = {'background': '#ffffff', 'text': '#33B5FF'}
app.layout = html.Div(style={'backgroundColor': colors['background'], 'color': colors['text'], 'height':'100vh', 'width':'100%', 'height':'100%', 'top':'0px', 'left':'0px'},
children=[
# Heading/Title
html.H1(children='WSYB - The app that helps you decide what your next purchase should be.'),
html.Div(style={'width':'100%'},
children=[
# Output
html.Div(id='output_container', style={'display':'inline-block', 'float':'right', 'padding':50}),
html.H4(children='Mandatory selections'),
# Q1
html.Div(children="What are you looking for?"),
html.Div(style={'height': '5px'}),
dcc.Dropdown(
id='category_dropdown',
style={'width': '35%'},
options=[
{'label': 'Camera', 'value': 'Camera'},
{'label': 'Laptop', 'value': 'Laptop'},
{'label': 'Phone', 'value': 'Phone'}
],
),
html.Div(style={'height': '15px'}),
# Q2
html.Div(children="What's your budget?"),
html.Div(style={'height': '5px'}),
dcc.Dropdown(
id='budget_dropdown',
style={'width': '35%'},
options=[
{'label': '< $1k', 'value': 'under_1k'},
{'label': '$1k to 2k', 'value': '1_2k'},
{'label': '> $2k', 'value': 'above_2k'}
],
),
html.Div(style={'height': '15px'}),
# Q3
html.Div(children="Are you ok with used products, or do you want only brand new?"),
html.Div(style={'height': '5px'}),
dcc.Dropdown(
id='used_or_new_dropdown',
style={'width': '35%'},
options=[
{'label': 'Used only', 'value': 'used_only'},
{'label': 'New only', 'value': 'new_only'},
{'label': 'Both used or new', 'value': 'both'}
],
),
html.Div(style={'height': '15px'}),
html.H4(children='Optional selections'),
# Q4
html.Div(children="What do you like to shoot?"),
html.Div(style={'height': '5px'}),
dcc.Checklist(
id='shooting_dropdown',
options=[
{'label': 'Portrait', 'value': 'portrait'},
{'label': 'Landscape', 'value': 'landscape'},
{'label': 'Street', 'value': 'street'},
{'label': 'General/Daily stuff', 'value': 'general'},
],
),
html.Div(style={'height': '15px'}),
# Q5
html.Div(children="Are you looking for any specific sensor size?"),
dcc.Checklist(
id='sensor_dropdown',
options=[
{'label': 'APSC', 'value': 'apsc'},
{'label': 'Full Frame', 'value': 'full_frame'},
{'label': 'Micro four-thirds', 'value': 'm43'},
{'label': 'Medium Format', 'value': 'medium_format'},
],
),
html.Div(style={'height': '10px'}),
# Q6
html.Div(children="Are you looking for any specific brand(s)?"),
html.Div(style={'height': '5px'}),
dcc.Checklist(
id='brand_dropdown',
options=[
{'label': 'Nikon', 'value': 'nikon'},
{'label': 'Canon', 'value': 'canon'},
{'label': 'Sony', 'value': 'sony'},
{'label': 'Leica', 'value': 'leica'},
{'label': 'Fujifilm', 'value': 'fujifilm'},
{'label': 'Olympus', 'value': 'olympus'}
],
),
# Submit button
html.Button('Submit', id='submit_button'),
]),
])
@app.callback(
Output('output_container', 'children'),
[Input('submit_button', 'n_clicks')],
[State('category_dropdown', 'value'),
State('budget_dropdown', 'value'),
State('used_or_new_dropdown', 'value'),
])
def display_buy(n_clicks, category_dropdown, budget_dropdown, used_or_new_dropdown):
if n_clicks:
if category_dropdown=='Camera':
if budget_dropdown=='under_1k':
if used_or_new_dropdown=='both':
return html.H3(children="You should buy: \n1. Fujifilm X-T2\n2. Nikon D5700\n3. Canon 80D"),
elif used_or_new_dropdown=='new_only':
return html.H3(children="You should buy: \n1. Nikon D5700\n2. Canon 80D"),
else:
return html.H3(children="You should buy: \n1. Fujifilm X-T2\n2. Nikon D750"),
elif budget_dropdown=='1_2k':
if used_or_new_dropdown=='both':
return html.H3(children="You should buy: \n1. Fujifilm X-T3\n2. Nikon Z7\n3. Canon EOS R\n4. Fujifilm X-T4\n5. Sony A7 III"),
elif used_or_new_dropdown=='new_only':
return html.H3(children="You should buy: \n1. Nikon D760\n2. Canon R6"),
else:
return html.H3(children="You should buy: \n1. Fujifilm X-T3\n2. Sony A7 III\n3. Nikon Z7"),
elif budget_dropdown=='above_2k':
if used_or_new_dropdown=='both':
return html.H3(children="You should buy: \n1. Sony A7R IV\n2. Fujifilm X-T4\n3. Nikon Z7\n4. Canon R5"),
elif used_or_new_dropdown=='new_only':
return html.H3(children="You should buy: \n1. Nikon Z7\n2. Canon R5"),
else:
return html.H3(children="You should buy: \n1. Leica Q\n2. Sony A7R IV"),
else:
return dash.no_update
if __name__ == '__main__':
app.run_server(debug=True) |
# Заповнюємо масив випадковоми числами
import random
def generuvaty_masyv( n, min, max ):
m = []
for _ in range(0,n):
m.append(random.randint(min,max))
return m
N = 20
masyv = generuvaty_masyv( N, 1, 20 )
print("Заданий масив:", masyv)
# Задача (викладка з легендою)
# В черзі на завантаження на корабель стоять 20 контейнерів.
# Вага кожного контейнера є в масиві (від 1 до 20 тон).
# Порядок елементів і є чергою
# нульовий контейнер треба брати першим, а за ним послідовно решту.
# Корабель може взяти на борт 50 тон контейнерів за один рейс.
# Потрібно перевезти усі контейнери, навантажуючи корабель
# контейнерами в порядку черги, але сумарною вагою не більше ніж
# може взяти корабель за рейс.
# Вивести номера контейнерів які йдуть в кожен рейс та
# скільки рейсів потрібно зробити кораблеві, щоб
# перевести усі контейнери.
# Задача (технічна викладка)
# Розбити елементи масиву на послідовні групи починаючи з 0-го елементу,
# щоб сума значень елементів групи не була більша 50.
# Вивести розподіл елементів по групах та кількість груп.
# ------------- Спрощений вивід без оформлення
reys = 1
suma = 0
print("Рейс:", reys)
for i in range(0,N):
if (suma + masyv[i]) <= 50:
# якщо влазить то додаємо до рейсу
suma = suma + masyv[i]
print(i)
else:
# якщо не влазить додаємо до нового рейсу
print("Cума:", suma)
reys = reys + 1
print("Рейс:", reys)
suma = masyv[i]
print(i)
if i==(N-1):
print("Cума:", suma)
print("Всього рейсів:", reys)
|
from django.conf.urls import patterns, include, url
from views import GLogListView, GLogDetailView, GLogCreateView, GLogUpdateView
urlpatterns = patterns('',
url(r'^$', GLogListView.as_view(), name='glog-list'),
url(r'^create/$', GLogCreateView.as_view(), name='glog-create'),
url(r'^edit/(?P<id>\d+)/$', GLogUpdateView.as_view(), name='glog-edit'),
url(r'^(?P<slug>[\w-]+)/$', GLogDetailView.as_view(), name='glog-detail'),
) |
from django.core.management.base import BaseCommand, CommandError
import sisyphus.models
import sisyphus.analytics
import django.http
import json
import time
import re
class Command(BaseCommand):
args = "<file_to_load file_to_load ...>"
help = "Load analytics from Google Analytics Top Content report in CSV format."
clean_keys = False
def parse_day(self, day):
# "Saturday, March 10, 2007",0
date, pv = day.split('",')
ts = int(time.mktime(time.strptime(date[1:], "%A, %B %d, %Y")))
return ts, int(pv.strip().replace(',','').replace('"',""))
def parse_url(self, url):
# /entry/2008/jul/12/polishing-up-our-django-boss-search-service/,64965,62184,77.61242753623188,0.5748977339133823,0.575155853151697,0.0
path, views, unique_views, avg_time, bounce, exit, rev = url.split(',')
path_parts = path.split('/')
if path.startswith('/entry/') and len(path_parts) == 7:
return { 'path': path, 'slug':path_parts[-2], 'views':views, 'unique_views': unique_views }
def handle(self, *args, **kwargs):
cli = sisyphus.models.redis_client()
if self.clean_keys:
keys = cli.keys("analytics.*")
if keys:
cli.delete(*keys)
for file in args:
print "Loading data from %s..." % (file,)
with open(file) as fin:
days = []
urls = []
in_days = False
in_urls = False
for line in fin.readlines():
if line.strip().startswith("#") or line.strip() == '':
in_days = False
in_urls = False
elif line.strip() == "Day,Pageviews":
in_days = True
in_urls = False
elif line.strip() == "Page,Pageviews,Unique Pageviews,Avg. Time on Page,Bounce Rate,% Exit,$ Index":
in_urls = True
in_days = False
elif in_days:
days.append(line)
elif in_urls:
urls.append(line)
for day in days:
ts, pv = self.parse_day(day)
day_bucket = ts / (60 * 60 * 24)
cli.zincrby(sisyphus.analytics.ANALYTICS_PAGEVIEW_BUCKET, day_bucket, pv)
for url in urls:
url_data = self.parse_url(url)
if url_data:
cli.zincrby(sisyphus.analytics.ANALYTICS_PAGEVIEW, url_data['slug'], url_data['views'])
cli.zincrby(sisyphus.analytics.ANALYTICS_REFER,
sisyphus.analytics.HISTORICAL_REFERRER,
url_data['views'])
cli.zincrby(sisyphus.analytics.ANALYTICS_REFER_PAGE % url_data['slug'],
sisyphus.analytics.HISTORICAL_REFERRER,
url_data['views'])
print url_data
print "%s days, %s urls" % (len(days), len(urls))
|
#!/usr/bin/python3
# safely-remove: A command-line tool to eject external data devices.
#
# (c) 2020 Alicia Boya Garcia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__version__ = "1.2"
import os
import logging
import sys
from gi.repository import GLib
from pydbus import SystemBus
bus = SystemBus()
log = logging.getLogger("safely-remove")
def decode_path(path_bytes):
# The mount_point received by the dbus API is a zero-terminated byte array
assert path_bytes[-1] == 0
return os.fsdecode(bytes(path_bytes[:-1]))
def mount_point_contains_path(mount_point, real_path):
mount_point = decode_path(mount_point)
return (real_path + "/").startswith(os.path.realpath(mount_point) + "/")
def find_device_by_mount_point(block_devices, real_path_containing_mount_point):
matches = [
(object_path, interfaces)
for object_path, interfaces in block_devices.items()
if any(
mount_point_contains_path(mount_point, real_path_containing_mount_point)
for mount_point in interfaces.get("org.freedesktop.UDisks2.Filesystem", {}).get("MountPoints", [])
)
]
assert len(matches) <= 1, matches
return matches[0] if len(matches) > 0 else None
def find_device_by_raw_device_path(block_devices, real_path):
matches = [
(object_path, interfaces)
for object_path, interfaces in block_devices.items()
if interfaces.get("org.freedesktop.UDisks2.Block", {}).get("Device") is not None
and decode_path(interfaces.get("org.freedesktop.UDisks2.Block", {}).get("Device")) == real_path
]
assert len(matches) <= 1, matches
return matches[0] if len(matches) > 0 else None
# Returns something like '/org/freedesktop/UDisks2/block_devices/dm_2d1',
# offering a org.freedesktop.UDisks2.Filesystem interface that can be used to
# unmount, and a org.freedesktop.UDisks2.Block interface that can be used to
# check for CryptoBackingDevice.
def find_device_by_path(block_devices, path, search_by_mount_points=True):
real_path = os.path.realpath(path)
log.debug("Searching device by raw device path: %s" % real_path)
match = find_device_by_raw_device_path(block_devices, real_path)
if not match and search_by_mount_points:
log.debug("Searching for device with a mount point containing: %s" % real_path)
match = find_device_by_mount_point(block_devices, real_path)
if match:
log.debug("Found device: %s" % match[0])
else:
log.debug("Did not found the device.")
return match
def get_block_devices():
udisks = bus.get(
"org.freedesktop.UDisks2",
"/org/freedesktop/UDisks2"
)
return dict(
(object_path, interfaces)
for object_path, interfaces in udisks.GetManagedObjects().items()
if object_path.startswith("/org/freedesktop/UDisks2/block_devices/")
)
def find_drive_by_path(path, search_by_mount_points=True):
filesystem_match = find_device_by_path(get_block_devices(), path, search_by_mount_points=search_by_mount_points)
if not filesystem_match:
return None
# Found a device, probably with a file system.
device_name = filesystem_match[0]
# Traverse any crypto backing devices
while True:
crypto_backing_device = bus.get("org.freedesktop.UDisks2", device_name)['org.freedesktop.UDisks2.Block'].CryptoBackingDevice
if crypto_backing_device == "/":
break
log.debug(f"Traversing crypto backing device: {crypto_backing_device}")
device_name = crypto_backing_device
drive = bus.get("org.freedesktop.UDisks2", device_name).Drive
log.debug(f"Found drive: {drive}")
return drive
def find_block_devices_using_drive(drive):
all_block_devices = get_block_devices()
drive_block_devices = []
log.debug(f"Looking for the entire tree of block devices using drive: {drive}")
for block_device, interfaces in all_block_devices.items():
if interfaces['org.freedesktop.UDisks2.Block']['Drive'] == drive:
log.debug(f"Found block device: {block_device}")
drive_block_devices.append(block_device)
# Let's look for encrypted filesystems using the drive block devices we know
while True:
need_rescan = False
for block_device, interfaces in all_block_devices.items():
if block_device in drive_block_devices:
continue
if interfaces['org.freedesktop.UDisks2.Block']['CryptoBackingDevice'] in drive_block_devices:
# In theory there may even be recursive crypto devices
need_rescan = True
log.debug(f"Found encrypted block device: {block_device}")
drive_block_devices.append(block_device)
if not need_rescan:
break
# Devices must be unmounted/locked in reverse order they were found
return list(reversed(drive_block_devices))
def support_interface(dbus_object, interface_name):
try:
dbus_object[interface_name]
return True
except KeyError:
return False
def safely_remove(path, search_by_mount_points=True):
drive_name = find_drive_by_path(path, search_by_mount_points)
if not drive_name:
print(f"Could not find a drive for {path}", file=sys.stderr)
sys.exit(1)
drive = bus.get("org.freedesktop.UDisks2", drive_name)
if not drive['org.freedesktop.UDisks2.Drive'].Removable:
print(f"Refusing to umount non-removable drive: {drive_name}", file=sys.stderr)
sys.exit(2)
block_devices = find_block_devices_using_drive(drive_name)
log.debug("Proceeding to unmount all block devices in the drive.")
try:
for block_device_name in block_devices:
block_device = bus.get("org.freedesktop.UDisks2", block_device_name)
if support_interface(block_device, 'org.freedesktop.UDisks2.Filesystem'):
mounted = len(block_device['org.freedesktop.UDisks2.Filesystem'].MountPoints) > 0
log.debug(f"Found filesystem, {'unmounting' if mounted else 'already unmounted'}: {block_device_name}")
if mounted:
block_device['org.freedesktop.UDisks2.Filesystem'].Unmount({})
if support_interface(block_device, 'org.freedesktop.UDisks2.Encrypted'):
unlocked = block_device['org.freedesktop.UDisks2.Encrypted'].CleartextDevice != "/"
log.debug(f"Found crypto backing device, {'locking' if unlocked else 'already unlocked'}: {block_device_name}")
if unlocked:
block_device['org.freedesktop.UDisks2.Encrypted'].Lock({})
if drive['org.freedesktop.UDisks2.Drive'].MediaRemovable:
# Eject the media. This will turn off the light on "has media" light in a SD card
# reader and eject optical disks from CD drives.
# UDisk2 under the hood calls the `eject` command, which can fail for hard drives.
log.debug(f"Ejecting drive: {drive_name}")
drive['org.freedesktop.UDisks2.Drive'].Eject({})
else:
# If the drive does not use removable media, power it off completely.
log.debug(f"Powering off non-removable media drive: {drive_name}")
drive['org.freedesktop.UDisks2.Drive'].PowerOff({})
except GLib.GError as error:
# Catch errors thrown by the bus (e.g. target is busy)
print(error, file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description="Safely ejects a device, just like Nautilus does. Doesn't require root.")
parser.add_argument("--version", action="version", version=f"safely-remove {__version__}")
parser.add_argument("--raw-device", dest="raw_device", action="store_true", help="interpret path as a device path, don't try to resolve a mount point")
parser.add_argument("-d", "--debug", dest="debug", action="store_true", help="show debug info")
parser.add_argument("path",
help="path of the device to unmount, be it raw (e.g. /dev/sdc1), a mount point (/var/mount/...) or a path within a mount point")
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
safely_remove(args.path, search_by_mount_points=not args.raw_device)
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Frederik M.J. Vestre
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY <copyright holder> ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from ResponseWaiter import *;
from SocketStreams import *;
from SimpleRpcController import *;
import Exprintconfig_pb2;
import socket
import sys
from inspect import *
chan=SocketChannel("localhost",1246);
chan.start()
#Create a rpc controller to send with the rpc method
cont= SimpleRpcController();
#Create a service that wraps the client channel
service = Exprintconfig_pb2.Exprintserver_Stub(chan);
#Create a responsewaiter that can block while waiting a response from the server
waiter = ResponseWaiter(chan,cont);
#Create and build the message that we send to the method, see the proto buffer documentation for more information
reqbld= Exprintconfig_pb2.Exprintconfig();
reqbld.printer="Morrohjornet"
expb = reqbld.exprints.add();
expb.paralell="HappyHour";
expb.subjectcode="TFY4125";
expb = reqbld.exprints.add();
expb.subjectcode="TDT4100"
#Run the RPC method
service.set_config(cont, reqbld,waiter.Callback());
#try:
if(True):
#Wait for response, if the response is null, the method is canceled or has failed and the rpc controller will report what happened.
rpo =waiter.Await();
#Clean up the waiter, free a pointer to the RpcChannel so it may be garbage collected.
#Remember to reset the waiter if you want to use it again.
waiter.cleanup();
if(rpo is None):
if(cont.Failed()):
print "Call failed:"+cont.ErrorText();
elif(cont.IsCanceled()):
print "Call canceled";
else:
print "Channel broken";
else:
#Print out the response code from the response
print rpo.responsecode
#
print "Finished"
chan.shutdown(True);
#except Exception, inst:
# print"Error"+str(inst)
|
from django.shortcuts import render
from rest_framework import generics
from core.models import Comunidad, Evento,AgentesPatorales,Solicitud
from .serializers import ComunidadSerializer, EventoSerializer,AgenteSerializer,SolicitudSerializer
# Create your views here.
class ComunidadLista(generics.ListCreateAPIView):
queryset=Comunidad.objects.all()
serializer_class=ComunidadSerializer
class ComunidadDetalle(generics.RetrieveUpdateDestroyAPIView):
queryset= Comunidad.objects.all()
serializer_class=ComunidadSerializer
class EventoLista(generics.ListCreateAPIView):
queryset=Evento.objects.all()
serializer_class=EventoSerializer
class EventoDetalle(generics.RetrieveUpdateDestroyAPIView):
queryset= Evento.objects.all()
serializer_class=EventoSerializer
class SolicitudLista(generics.ListCreateAPIView):
queryset=Solicitud.objects.all()
serializer_class=SolicitudSerializer
class SolicitudDetalle(generics.RetrieveUpdateDestroyAPIView):
queryset= Solicitud.objects.all()
serializer_class=SolicitudSerializer
class AgenteLista(generics.ListCreateAPIView):
queryset=AgentesPatorales.objects.all()
serializer_class=AgenteSerializer
class AgenteDetalle(generics.RetrieveUpdateDestroyAPIView):
queryset= AgentesPatorales.objects.all()
serializer_class=AgenteSerializer
|
from __future__ import unicode_literals
import logging
from mopidy import backend
from mopidy.models import SearchResult
logger = logging.getLogger(__name__)
class SubsonicLibraryProvider(backend.LibraryProvider):
def __init__(self, *args, **kwargs):
super(SubsonicLibraryProvider, self).__init__(*args, **kwargs)
self.remote = self.backend.remote
def _find_exact(self, query=None, uris=None):
if not query:
# Fetch all artists(browse library)
return SearchResult(
uri='subsonic:search',
tracks=self.remote.get_artists())
return SearchResult(
uri='subsonic:tracks',
tracks=self.remote.get_tracks_by(
query.get('artist'), query.get('album')))
def search(self, query=None, uris=None, exact=False):
if exact:
return self._find_exact(query=query, uris=uris)
logger.debug('Query "%s":' % query)
artist, album, title, any = None, None, None, None
if 'artist' in query:
artist = query['artist'][0]
if 'album' in query:
album = query['album'][0]
if 'track' in query:
title = query['track'][0]
if 'any' in query:
any = query['any'][0]
return SearchResult(
uri='subsonic:tracks',
tracks=self.remote.search_tracks(artist, album, title, any))
def lookup(self, uri):
try:
song_id = uri.split("subsonic://")[1]
track = self.remote.get_song(song_id)
return [track]
except Exception as error:
logger.debug('Failed to lookup "%s": %s' % (uri, error))
return []
|
class Solution(object):
def __init__(self):
self.k = 0
'''
这种方法超时了,让求第k个,没必要把所有的排列都求出来
'''
def getPermutation(self, n, k):
"""
:type n: int
:type k: int
:rtype: str
"""
nums = []
for i in xrange(1, n + 1):
nums.append(i)
res = []
self.dfs(nums, [], res, k)
s = ''
for i in res[k - 1]:
s += str(i)
return s
def dfs(self, nums, path, res, k):
if not nums:
self.k += 1
res.append(path)
for i in xrange(len(nums)):
if self.k == k:
break
self.dfs(nums[:i] + nums[i + 1:], path + [nums[i]], res, k)
sol = Solution()
print sol.getPermutation(9, 54494)
|
from os.path import join as pjoin
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 1
_version_micro = '' # use '' for first of series, number for 1 and above
_version_extra = 'dev'
# _version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 2 - Pre-Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Information Analysis"]
# Description should be a one-liner:
description = "afmMiner: an image analysis tool for discovering materials properties from atomic force microscopy"
# Long description will go up on the pypi page
long_description = """
AfmMiner
========
AfmMiner uses **Random Forests**, a kind of non-parametric supervised learning algorithm, to discover materials properties from atomic force microscopy (AFM) images. In particular, afmMiner can predict the photoluminesence of a material given some topographical and electrical AFM data. AfmMiner addresses a fundamental question: **can we predict PL from things that are not difraction limited?**.
To get started using these components in your own software, please go to the
repository README_.
.. _README: https://github.com/wesleybeckner/afm-miner/README.md
License
=======
``afmMiner`` is licensed under the terms of the MIT license. See the file
"LICENSE" for information on the history of this software, terms & conditions
for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2015--, Wesley Beckner, The University of Washington
eScience Institute.
"""
NAME = "AfmMiner"
MAINTAINER = "Wesley Beckner"
MAINTAINER_EMAIL = "wesleybeckner@gmail.com"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/wesleybeckner/afm-miner"
DOWNLOAD_URL = ""
LICENSE = "MIT"
AUTHOR = "Wesley Beckner"
AUTHOR_EMAIL = "wesleybeckner@gmail.com"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGE_DATA = {'afmMiner': [pjoin('data', '*')]}
REQUIRES = ["numpy"]
|
b = 7
def verdubbelB():
b = b + b
verdubbelB()
print(b)
import time
print(time.strftime(("%H:%M:%S")))
def f(y):
return 2*y + 1
print(f(3)+g(3))
def g(x):
return 5 + x + 10 |
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WikipediaToxicitySubtypes from Jigsaw Toxic Comment Classification Challenge."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@inproceedings{10.1145/3038912.3052591,
author = {Wulczyn, Ellery and Thain, Nithum and Dixon, Lucas},
title = {Ex Machina: Personal Attacks Seen at Scale},
year = {2017},
isbn = {9781450349130},
publisher = {International World Wide Web Conferences Steering Committee},
address = {Republic and Canton of Geneva, CHE},
url = {https://doi.org/10.1145/3038912.3052591},
doi = {10.1145/3038912.3052591},
booktitle = {Proceedings of the 26th International Conference on World Wide Web},
pages = {1391-1399},
numpages = {9},
keywords = {online discussions, wikipedia, online harassment},
location = {Perth, Australia},
series = {WWW '17}
}
"""
_DESCRIPTION = """
This version of the Wikipedia Toxicity Subtypes dataset provides access to the
primary toxicity label, as well the five toxicity subtype labels annotated by
crowd workers. The toxicity and toxicity subtype labels are binary values
(0 or 1) indicating whether the majority of annotators assigned that
attributes to the comment text.
The comments in this dataset come from an archive of Wikipedia talk pages
comments. These have been annotated by Jigsaw for toxicity, as well as a variety
of toxicity subtypes, including severe toxicity, obscenity, threatening
language, insulting language, and identity attacks. This dataset is a replica of
the data released for the Jigsaw Toxic Comment Classification Challenge on
Kaggle, with the training set unchanged, and the test dataset merged with the
test_labels released after the end of the competition. Test data not used for
scoring has been dropped. This dataset is released under CC0, as is the
underlying comment text.
See the Kaggle documentation or
https://figshare.com/articles/Wikipedia_Talk_Labels_Toxicity/4563973 for more
details.
"""
_DOWNLOAD_URL = 'https://storage.googleapis.com/jigsaw-unintended-bias-in-toxicity-classification/wikipedia_toxicity_subtypes.zip'
class WikipediaToxicitySubtypes(tfds.core.GeneratorBasedBuilder):
"""Classification of 220K Wikipedia talk page comments for types of toxicity.
This version of the Wikipedia Toxicity Subtypes dataset provides access to the
primary toxicity label, as well the five toxicity subtype labels annotated by
crowd workers. The toxicity and toxicity subtype labels are binary values
(0 or 1) indicating whether the majority of annotators assigned that
attributes to the comment text.
See the Kaggle documentation or
https://figshare.com/articles/Wikipedia_Talk_Labels_Toxicity/4563973 for more
details.
"""
VERSION = tfds.core.Version('0.2.0', 'Updated features for consistency with '
'CivilComments dataset.')
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'text': tfds.features.Text(),
'toxicity': tf.float32,
'severe_toxicity': tf.float32,
'obscene': tf.float32,
'threat': tf.float32,
'insult': tf.float32,
'identity_attack': tf.float32,
}),
supervised_keys=('text', 'toxicity'),
homepage='https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge/data',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
'filename': os.path.join(dl_path, 'wikidata_train.csv')
},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={'filename': os.path.join(dl_path, 'wikidata_test.csv')},
),
]
def _generate_examples(self, filename):
"""Yields examples.
Each example contains a text input and then six annotation labels.
Args:
filename: the path of the file to be read for this split.
Yields:
A dictionary of features, all floating point except the input text.
"""
with tf.io.gfile.GFile(filename) as f:
reader = csv.DictReader(f)
for row in reader:
example = {}
example['text'] = row['comment_text']
example['toxicity'] = float(row['toxic'])
example['severe_toxicity'] = float(row['severe_toxic'])
example['identity_attack'] = float(row['identity_hate'])
for label in ['obscene', 'threat', 'insult']:
example[label] = float(row[label])
yield row['id'], example
|
from airflow import DAG
from datetime import datetime, timedelta
from airflow.providers.amazon.aws.operators.glue import AwsGlueJobOperator
default_args = {
"owner": "airflow-user",
"start_date": datetime.today(),
"depends_on_past": False,
"email_on_failure": False,
"email_on_retry": False,
"email": "<your-email-address>",
"retries": 1,
"retry_delay": timedelta(minutes=5)
}
with DAG(dag_id="start-glue-job", schedule_interval="@daily", default_args=default_args, catchup=False) as dag:
glue_job = AwsGlueJobOperator(
task_id='my_glue_job',
job_name='<your-Glue-job-name>',
num_of_dpus=5,
region_name='<region>'
)
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Implementation of various module magics"""
from __future__ import absolute_import
from __future__ import unicode_literals
try:
import IPython
import IPython.core.magic
except ImportError:
raise Exception('This module can only be loaded in ipython.')
import sys
import types
from . import _commands
from . import _utils
@IPython.core.magic.register_line_cell_magic
def pymodule(line, cell=None):
"""Creates and subsequently auto-imports a python module.
"""
parser = _commands.CommandParser.create('pymodule')
parser.add_argument('-n', '--name',
help='the name of the python module to create and import')
parser.set_defaults(func=_pymodule_cell)
return _utils.handle_magic_line(line, cell, parser)
def _pymodule_cell(args, cell):
if cell is None:
raise Exception('The code for the module must be included')
name = args['name']
module = _create_python_module(name, cell)
# Automatically import the newly created module by assigning it to a variable
# named the same name as the module name.
ipy = IPython.get_ipython()
ipy.push({name: module})
def _create_python_module(name, code):
# By convention the module is associated with a file name matching the module name
module = types.ModuleType(str(name))
module.__file__ = name
module.__name__ = name
exec(code, module.__dict__)
# Hold on to the module if the code executed successfully
sys.modules[name] = module
return module
|
import sys
sys.stdin = open('input.txt')
def preorder(node):
if node == '.':
return
print(node, end='')
preorder(graph[node][0])
preorder(graph[node][1])
def inorder(node):
if node == '.':
return
inorder(graph[node][0])
print(node, end='')
inorder(graph[node][1])
def postorder(node):
if node == '.':
return
postorder(graph[node][0])
postorder(graph[node][1])
print(node, end='')
N = int(input())
graph = {}
for _ in range(N):
parent, child1, child2 = input().split()
graph[parent] = [child1, child2]
preorder('A')
print()
inorder('A')
print()
postorder('A')
print()
|
import boto3
import uuid
import sys
s3 = boto3.resource('s3')
bucket_name = "image-pattern"
bucket = s3.Bucket(bucket_name)
for obj in bucket.objects.all():
print(obj.key)
response = s3.get_bucket_location(
Bucket=bucket_name
)
# LIST AVAILABLE BUCKETS
for bucket in s3.buckets.all():
print "Bucket: "+bucket.name
# for obj in bucket.objects.all():
# print(obj.key)
sys.exit(0)
# DELETE BUCKET IF AVAILABLE
bucket = s3.Bucket(bucket_name)
if bucket in s3.buckets.all():
bucket.delete()
# CREATE BUCKET
s3.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': 'us-west-2'
},
)
# LOAD IMAGES PATTERNS
s3.Object(bucket_name, 'pink.jpg').put(Body=open('images/pink.jpg', 'rb'))
|
# Generated by Django 2.0.9 on 2018-11-10 15:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crm', '0017_auto_20181110_1456'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='duration',
),
migrations.AddField(
model_name='project',
name='end_date',
field=models.DateField(blank=True, null=True),
),
]
|
# -*- coding: utf-8 -*-
import sys
from dill import dill
from MyAPI.InderScience import InderScience
from MyAPI.MyBs import MyBs
from MyAPI.MySele import MySele
from MyAPI.ScienceDirect import ScienceDirect
from MyAPI.Tandfonline import Tandfonline
tand = {
40: 4,
41: 4,
42: 4,
43: 4,
44: 4,
45: 4,
46: 4,
47: 4,
48: 4,
49: 2
}
sd = {
8: 2,
9: 2,
10: 2,
11: 2,
12: 1,
13: 1,
14: 1,
15: 1,
17: 1,
18: 1,
19: 1,
20: 1,
21: 1,
22: 1,
23: 1,
24: 1,
25: 1,
26: 1,
27: 1
}
def create_urls_tand():
urls = []
for key, value in tand.items():
base_url = "https://www.tandfonline.com/toc/vece20/"
for i in range(0, value):
url = base_url + str(key) + "/" + str(i+1)
urls.append(url)
return urls
def create_urls_sd():
urls = []
for key, value in sd.items():
for i in range(0, value):
url = "https://www.sciencedirect.com/journal/international-review-of-economics-education/vol/{:d}/issue/{:d}".format(key, i+1)
urls.append(url)
urls.append("https://www.sciencedirect.com/journal/international-review-of-economics-education/vol/16/part/PA")
urls.append("https://www.sciencedirect.com/journal/international-review-of-economics-education/vol/16/part/PB")
return urls
def create_urls_is():
urls = []
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2009&vol=1&issue=1/2")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2010&vol=1&issue=3")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2010&vol=1&issue=4")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2011&vol=2&issue=1")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2011&vol=2&issue=2")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2011&vol=2&issue=3")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2011&vol=2&issue=4")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2012&vol=3&issue=1")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2012&vol=3&issue=2")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2012&vol=3&issue=3")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2012&vol=3&issue=4")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2013&vol=4&issue=1")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2013&vol=4&issue=2")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2013&vol=4&issue=3")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2013&vol=4&issue=4")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2014&vol=5&issue=1")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2014&vol=5&issue=2")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2014&vol=5&issue=3")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2014&vol=5&issue=4")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2015&vol=6&issue=1")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2015&vol=6&issue=2")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2015&vol=6&issue=3")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2015&vol=6&issue=4")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2016&vol=7&issue=1")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2016&vol=7&issue=2")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2016&vol=7&issue=3")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2016&vol=7&issue=4")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2017&vol=8&issue=1")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2017&vol=8&issue=2")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2017&vol=8&issue=3")
urls.append("http://www.inderscience.com/info/inarticletoc.php?jcode=ijpee&year=2017&vol=8&issue=4")
return urls
def get_source_tand():
urls = create_urls_tand()
sele = MySele()
pages = []
for url in urls:
source = sele.get_source(url)
page = Tandfonline()
page.set_src(source)
page.url = url
pages.append(page)
Tandfonline.save(pages, "tand.dat")
def get_source_sd():
urls = create_urls_sd()
sele = MySele()
pages = []
for url in urls:
source = sele.get_source(url)
page = ScienceDirect()
page.src = source
page.url = url
pages.append(page)
ScienceDirect.save(pages, "sd.dat")
def get_source_is():
urls = create_urls_is()
bs = MyBs()
pages = []
print("combien ? " + str(len(urls)))
i=0
for url in urls:
print(str(i) + " / " + str(len(urls)))
source = bs.get_source(url)
page = InderScience()
page.src = source
page.url = url
pages.append(page)
i+=1
InderScience.save(pages, "is.dat")
def tandfonline():
sys.setrecursionlimit(10000)
#get_source_tand()
pages = Tandfonline.load("tand.dat")
for elm in pages:
elm.parse()
return pages
def science_direct():
sys.setrecursionlimit(10000)
#get_source_sd()
pages = ScienceDirect.load("sd.dat")
for elm in pages:
elm.parse()
return pages
def inder_science():
sys.setrecursionlimit(10000)
#get_source_is()
pages = InderScience.load("is.dat")
for elm in pages:
elm.parse()
return pages
def main():
tand = tandfonline()
print("TAND DONE")
sd = science_direct()
print("SD DONE")
_is = inder_science()
print("IS DONE")
for page in tand:
page.save_to_csv("JEE.csv")
for page in sd:
page.save_to_csv("IREE.csv")
for page in _is:
page.save_to_csv("IJPEE.csv")
if __name__ == "__main__":
main() |
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth.models import User
from notifications.signals import notify
from django.shortcuts import reverse
# 注册的时候,发送通知到刚注册的账号
@receiver(post_save, sender=User)
def send_user_save_notifications(sender, instance, **kwargs):
# 确保是创建
if kwargs['created']:
verb = '注册成功!点击查看你的用户信息!'
url = reverse('user_info')
notify.send(instance, recipient=instance, verb=verb, action_object=instance, url=url)
|
# 90-degree turnning matrix func
def rotate_a_matrix_by_90_degree(a):
n = len(a)
m = len(a[0])
result = [[0]*m for _ in range(n)]
for i in range(n):
for j in range(m):
result[j][n-1-i] = a[i][j]
return result
# a = [[1, 2], [3, 4]]
# print(rotate_a_matrix_by_90_degree(a))
def check(length, new_lock):
for i in range(length, 2*length):
for j in range(length, 2*length):
if new_lock[i][j] != 1:
return False
return True
def solution(key, lock):
for rotate in range(4):
key = rotate_a_matrix_by_90_degree(key)
length = len(lock)
length_key = len(key)
# init 3-times new_lock
new_lock = [[0]*3*length for _ in range(3*length)]
for i in range(length, 2*length):
for j in range(length, 2*length):
new_lock[i][j] = lock[i-length][j-length]
for x in range(3*length - length_key+1):
for y in range(3*length - length_key+1):
# Match Key and lock
for i in range(length_key):
for j in range(length_key):
# print("x, y, i, j: ", x, y, i, j)
new_lock[i+x][j+y] += key[i][j]
# Check Key and lock is matched
if check(length, new_lock) == True:
return True
# Remove key from lock
for i in range(length_key):
for j in range(length_key):
new_lock[i+x][j+y] -= key[i][j]
return False
# print(check(length, new_lock))
# return new_lock
# For test solution
key = [[0, 0],[0, 1]]
lock = [[1, 1], [1, 0]]
print(solution(key, lock))
|
from django.core.management.base import BaseCommand
from ktapp import models
class Command(BaseCommand):
help = "Merge users"
def add_arguments(self, parser):
parser.add_argument("source_user_id", type=int)
parser.add_argument("target_user_id", type=int)
def handle(self, *args, **options):
self.source_user = models.KTUser.objects.get(id=options["source_user_id"])
self.target_user = models.KTUser.objects.get(id=options["target_user_id"])
self.stdout.write(
"Merging user {} into user {}...".format(self.source_user, self.target_user)
)
self.move_votes()
self.move_comments()
self.stdout.write("User merged.")
def move_votes(self):
# if there was no vote, we create a new
# if there was already, we don't touch it
for vote in models.Vote.objects.filter(user=self.source_user).order_by("id"):
self.stdout.write(
"Moving vote {} for film {}...".format(vote.rating, vote.film)
)
models.Vote.objects.get_or_create(
film=vote.film,
user=self.target_user,
defaults={"rating": vote.rating, "shared_on_facebook": False},
)
vote.delete()
def move_comments(self):
for comment in models.Comment.objects.filter(
created_by=self.source_user
).order_by("id"):
self.stdout.write(
"Moving comment {} at {} on {} {}...".format(
comment.id,
comment.created_at,
comment.domain,
comment.domain_object,
)
)
comment.created_by = self.target_user
if comment.domain == models.Comment.DOMAIN_FILM:
try:
vote = models.Vote.objects.get(
film=comment.film, user=self.target_user
)
comment.rating = vote.rating
except models.Vote.DoesNotExist:
comment.rating = None
comment.save()
self.fix_comment_metadata(self.target_user)
self.fix_comment_metadata(self.source_user)
def fix_comment_metadata(self, user):
self.stdout.write("Fixing comment metadata for user {}...".format(user))
user.latest_comments = ",".join(
[
unicode(comment.id)
for comment in user.comment_set.all().order_by("-created_at", "-id")[
:100
]
]
)
user.number_of_comments = user.comment_set.count()
user.number_of_film_comments = user.comment_set.filter(
domain=models.Comment.DOMAIN_FILM
).count()
user.number_of_topic_comments = user.comment_set.filter(
domain=models.Comment.DOMAIN_TOPIC
).count()
user.number_of_poll_comments = user.comment_set.filter(
domain=models.Comment.DOMAIN_POLL
).count()
user.save(
update_fields=[
"latest_comments",
"number_of_comments",
"number_of_film_comments",
"number_of_topic_comments",
"number_of_poll_comments",
]
)
for idx, comment in enumerate(
models.Comment.objects.filter(created_by=user).order_by("created_at", "id")
):
comment.serial_number_by_user = idx + 1
comment.save()
|
#!/usr/bin/python
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from tensorflow.python.platform import gfile
def reformat(dataset, labels):
if use_cnn:
dataset = dataset.reshape((-1, image_sizeX, image_sizeY, num_channels)).astype(np.float32)
else:
dataset = dataset.reshape((-1, image_sizeX * image_sizeY * num_channels)).astype(np.float32)
digit_number = labels[:,0]
num_digits_encoded = (np.arange(num_digits) == digit_number[:,None]-1).astype(np.float32)
digit1 = labels[:,1]
digit1_encoded = (np.arange(num_labels) == digit1[:,None]).astype(np.float32)
digit2 = labels[:,2]
digit2_encoded = (np.arange(num_labels) == digit2[:,None]).astype(np.float32)
digit3 = labels[:,3]
digit3_encoded = (np.arange(num_labels) == digit3[:,None]).astype(np.float32)
labels = np.hstack((num_digits_encoded, digit1_encoded, digit2_encoded, digit3_encoded))
return dataset, labels
pickle_file = './svhn_3digits_gray.pickle'
use_cnn = True
image_sizeX =32
image_sizeY = 32
num_channels = 1
num_digits = 3
num_labels = 11
# Start by reading in the pickle datasets
print("Reading pickle file {}".format(pickle_file))
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
graph = tf.Graph()
tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_sizeX, image_sizeY, num_channels))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_digits + num_digits * num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
model_name = "./logs/svhm_cnn_dep_16_ps_5_reg_0.01_lr_0.002_nnl1_1024_nnl2_512_bs_128_ts_full_06.01PM_November_03_2016_"
with tf.Session(graph=graph) as session:
print("Load graph...")
with gfile.FastGFile(model_name + ".proto", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
persisted_sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
print("Map variables...")
saver = tf.train.Saver()
saver.restore(session, "./logs/svhm_cnn_dep_16_ps_5_reg_0.01_lr_0.002_nnl1_1024_nnl2_512_bs_128_ts_full_06.01PM_November_03_2016_")
start = time.time()
input_image = test_dataset[0]
input_label = test_labels[0]
if use_cnn:
input_placeholder = tf.placeholder(tf.float32, shape=(1, image_sizeX, image_sizeY, num_channels))
else:
input_placeholder = tf.placeholder(tf.float32, shape=(1, image_sizeX * image_sizeY * num_channels))
label_placeholder = tf.placeholder(tf.float32, shape=(1, num_digits + num_digits * num_labels))
input_image = input_image[np.newaxis, ...]
input_label = input_label[np.newaxis, ...]
feed_dict = {input_placeholder : input_image, label_placeholder : input_label, keep_prob: 1.0}
test_pnd, test_pd1, test_pd2, test_pd3 = full_model(input_placeholder)
pnd, pd1, pd2, pd3 = session.run([test_pnd, test_pd1, test_pd2, test_pd3], feed_dict=feed_dict)
end = time.time()
print("Time taken for single inference : {} seconds".format(end - start))
print("Test image labels: {}".format(test_labels[0]))
print("Neural Network predicted : {}".format([pnd, pd1, pd2, pd3]))
print("Predicted num digits : {}".format(np.argmax(pnd)+1))
print("Predicted number (0=noNum, 1...10= 1 to 0) : {}{}{}".format(np.argmax(pd1), np.argmax(pd2), np.argmax(pd3)))
plt.imshow(input_image.reshape(32, 32), cmap=plt.cm.binary)
plt.show()
|
'''
Тест позволяет проверить работу функции, сымитировать набор данных на виртуальной клавиатуре пользователем.
Данные берутся из внешенго файла -- data_OGZ_plane.tsv.
'''
# Инициализация полей ввода
x_field = "{container=':mainWidget.qstw_mode_QStackedWidget' name='qle_ogz_x' type='QLineEdit' visible='1'}"
y_field = "{container=':mainWidget.qstw_mode_QStackedWidget' name='qle_ogz_y' type='QLineEdit' visible='1'}"
x1_field = "{container=':mainWidget.qstw_mode_QStackedWidget' name='qle_ogz_x_2' type='QLineEdit' visible='1'}"
y1_field = "{container=':mainWidget.qstw_mode_QStackedWidget' name='qle_ogz_y_2' type='QLineEdit' visible='1'}"
def init():
source(findFile("scripts", "script_file_squish.py"))
all()
point1 = 143
point2 = 12
def cliking(x, x_field, point1, point2): # Функция набора на клавиатуре
value_X = [] # Создаю пустой список
for i in x:
value_X.append (i) # Для текущей итерации значение поля X из таблицы преобразую в список
mouseClick(waitForObject(x_field), point1, point2, 0, Qt.LeftButton) # Ставлю курсор на нужное поле
for key in value_X: # По ключу обращаюсь к вирт.клавиатуре
clickButton((dict_keyboard[key])) # Набираю значение из поля Х
snooze(0.2) # Задержка, иначе не успевает нажимать на клавиши клавы
def main():
num = 0
startApplication("ArmkApplication")
clickButton(waitForObject(route_b))
clickButton(waitForObject(geo_calc_b))
clickButton(waitForObject(OGZ_rb))
# Заполнение режима ПГЗ-Сфера
for record in testData.dataset("data_OGZ_plane.tsv"):
x = testData.field(record, "X")
y = testData.field(record, "Y")
x1 = testData.field(record, "X1")
y1 = testData.field(record, "Y1")
result_S_n = testData.field(record, "result_S")
result_A_n = testData.field(record, "result_A")
x11_point = 143 # Координаты (x; y) наведения курсора
x12_point = 12
y11_point = 142
y12_point = 15
x21_point = 152
x22_point = 17
y21_point = 152
y22_point = 13
# Счетчик выполненных тестов
num += 1
test.log("Тест №", str(num))
cliking(x, x_field, x11_point, x12_point)
cliking(y, y_field, y11_point, y12_point)
cliking(x1, x1_field, x21_point, x22_point)
cliking(y1, y1_field, y21_point, y22_point)
# Сравнение выводимго результата с ожидаемым из файла data_OGZ_plane.tsv. Поля -- result_S_n, result_A_n
test.compare(str(waitForObjectExists(":qstw_mode.qle_ogz_s_QLineEdit").displayText), result_S_n)
test.compare(str(waitForObjectExists(":qstw_mode.qle_ogz_a_QLineEdit").displayText), result_A_n)
# Чек-бокс "Результат в ДУ" должен быть неактивным: enabled = False
test.compare(waitForObjectExists(":mainWidget.qchk_resultInDE_QCheckBox").enabled, False)
test.log("Всего тестов выполнено", str(num))
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
image = cv2.imread("lena.bmp", cv2.IMREAD_GRAYSCALE)
cv2.imwrite("1_original_lena.bmp", image)
plt.hist(image.reshape(image.size), bins = range(256))
plt.savefig("1_original_histogram.png")
devide3 = image / 3
cv2.imwrite("2_devide3_lena.bmp", devide3)
plt.clf()
plt.hist(devide3.reshape(devide3.size), bins = range(256))
plt.savefig("2_devide3_histogram.png")
cnt = [0] * 255
for x in devide3.reshape(devide3.size):
cnt[int(x)] += 1
for i in range(1, len(cnt)):
cnt[i] += cnt[i - 1]
histoList = [255 * cnt[int(x)] / devide3.size for x in devide3.reshape(devide3.size)]
histoEq = np.array(histoList).reshape(512, 512)
cv2.imwrite("3_histo_lena.bmp", histoEq)
plt.clf()
plt.hist(histoList, bins = range(256))
plt.savefig("3_histo_histogram.png") |
#!/usr/bin/python3
"""find the number that occure only one time in an array"""
def solution(A):
for i in A:
if A.count(i) == 1:
return i
#This algorithm is correct by it will take O(n²) to pass, which is not so optimal
#another solution is:
def solution(A):
for i in range(len(A)):
A.sort()
if A[i] > A[i - 1] and A[i] < A[i + 1]:
return A[i]
#also not very optimal the built in python method sort() takes n log n time complexity, which is horrible
def solution(A):
A = sort_it(A)
for i in range(len(A) - 1):
if A[i] > A[i - 1] and A[i] < A[i + 1]:
return A[i]
#also this algorithms is correct but it takes to much time o(n2)
def sort_it(arr):
n = len(arr)
# Traverse through all array elements
for i in range(n):
# Last i elements are already in place
for j in range(0, n-i-1):
# traverse the array from 0 to n-i-1
# Swap if the element found is greater
# than the next element
if arr[j] > arr[j+1] :
arr[j], arr[j+1] = arr[j+1], arr[j]
return arr
### The perfect answer is:
def solution(A):
result = 0
for number in A:
result ^= number
return result
###another good answer is :
def solution(A):
dic = {}
value = 0
for i in A:
dic[i] = 0
for i in A:
dic[i] += 1
for j in dic:
if dic[j] == 1:
value = j
return j
|
#!/usr/bin/env python3
import re
import sys
import json
import base64
import ipaddress
def decode_address(address):
hex_ip, hex_port = address.split(':')
ip_join, ipv6 = (':', True) if len(hex_ip) == 32 else ('.', False)
if ipv6:
ipv6_string = ":".join([hex_ip[i:i + 4] for i in range(0, 32, 4)])
ip = ipaddress.ip_address(ipv6_string).exploded
else:
ip_items = [str(item) for item in reversed(list(base64.b16decode(hex_ip)))]
ip = ip_join.join(ip_items)
port = int.from_bytes(base64.b16decode(hex_port), byteorder='big')
return ip, port
def _read_tcp(path='/proc/net/tcp'):
lines = []
with open(path, 'r') as f:
for line in f:
line = line.strip()
line = re.sub(' +', ' ', line)
lines.append(line.split())
sock_list = []
header = lines[0]
for line in lines[1:]:
line_struct = {}
for idx, head in enumerate(header):
key = 'remote_address' if head == 'rem_address' else head
line_struct[key] = line[idx]
sock_list.append(line_struct)
return sock_list
def sockets():
sock_list = []
sock_list.extend(_read_tcp())
sock_list.extend(_read_tcp('/proc/net/tcp6'))
data_list = []
for item in sock_list:
data_item = {}
data_item['local_ip'], data_item['local_port'] = decode_address(item['local_address'])
data_item['rem_ip'], data_item['rem_port'] = decode_address(item['remote_address'])
data_list.append(data_item)
return data_list
def main():
data = {
'sockets': sockets(),
}
print(json.dumps(data))
sys.exit(0)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
from ana_bsec import log
import torch,copy,math,numpy
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class NN_EXP(nn.Module):
def __init__(self):
super(NN_EXP,self).__init__()
self.fc1=nn.Linear(1,4)
self.fc2=nn.Linear(4,1)
def forward(self, x):
x=F.relu(self.fc1(x))
x=self.fc2(x)
return x
def num_paras(self):
return sum([p.numel() for p in self.parameters()])
def num_layers(self):
ax=0
for name,child in self.named_children():
ax+=1
return ax
def __str__(self):
stru=[]
for name,child in self.named_children():
stru.append(tuple(child.state_dict()['weight'].t().size()))
return "%s %s %s"%(self.__class__.__name__,stru,self.num_paras())
def loss_func(netout,target):
loss=F.mse_loss(netout,target)
return loss
def train(NetClass):
traindata=[[torch.tensor([i,],dtype=torch.float32),torch.tensor([numpy.exp(i),],dtype=torch.float32)]
for i in numpy.linspace(0,numpy.log(1000),1001)]
batch_size=1001
trainloader=torch.utils.data.DataLoader(traindata,shuffle=True,batch_size=batch_size,drop_last=True)
train_iter_num=int(len(traindata)/batch_size)
testdata=[[torch.tensor([i,],dtype=torch.float32),torch.tensor([numpy.exp(i),],dtype=torch.float32)]
for i in numpy.linspace(numpy.log(0.8),numpy.log(1200),101)]
testloder=torch.utils.data.DataLoader(testdata,batch_size=len(testdata))
net=NetClass()
#net=net.double()
log(net)
test_i=testloder.__iter__().__next__()
netout=net(test_i[0])
test_loss=loss_func(netout,test_i[1])
log("random init: %f"%(test_loss))
log("epoch num: train_loss test_loss")
optimizer=optim.SGD(net.parameters(),lr=0.0001,momentum=0)
optimizer.zero_grad()
scheduler=torch.optim.lr_scheduler.MultiStepLR(optimizer,[5000,10000,15000],gamma=0.2)
for epoch in range(30000):
running_loss=0
for i in trainloader:
netout=net(i[0])
loss=loss_func(netout,i[1])
loss.backward()
optimizer.step()
optimizer.zero_grad()
running_loss+=loss.item()
scheduler.step()
if epoch%1000==0:
with torch.no_grad():
netout=net(test_i[0])
test_loss=loss_func(netout,test_i[1])
log("%3d: %f %f"%(epoch,running_loss/train_iter_num,test_loss))
#return
save_name='%s_%s_%s.ckpt'%(net.__class__.__name__,net.num_layers(),net.num_paras())
torch.save(net,save_name)
log("saved net to %s"%(save_name))
def draw_fit(netfile):
net=torch.load(netfile+'.ckpt')
testdata=[[torch.tensor([i,],dtype=torch.float32),torch.tensor([numpy.exp(i),],dtype=torch.float32)]
for i in numpy.linspace(numpy.log(0.8),numpy.log(1200),101)]
testloder=torch.utils.data.DataLoader(testdata,batch_size=len(testdata))
i=testloder.__iter__().__next__()
netout=net(i[0])
test_loss=loss_func(netout,i[1]).item()
xs=i[0].view(1,-1).tolist()[0]
ys_corr=i[1].view(1,-1).tolist()[0]
ys_fit=netout.view(1,-1).tolist()[0]
import matplotlib.pyplot as plt
fig=plt.figure()
ax1=fig.subplots(1)
mks=3
ax1.plot(xs,ys_corr,'o',markersize=mks,label="exp(x)")
ax1.plot(xs,ys_fit,'x',markersize=mks,label=netfile)
ax1.legend(loc=2)
plt.title("%s: %.1f"%(str(net),test_loss))
plt.savefig("%s.png"%(netfile))
plt.show()
if __name__=="__main__":
#train(NN_EXP)
draw_fit("NN_EXP_3_33") |
from PyQt5.QtCore import pyqtSignal, QObject
from DataSocket import TCPReceiveSocket
import time
# a client socket
class QDataSocket(QObject):
reconnecting = pyqtSignal()
new_data = pyqtSignal(tuple)
def __init__(self, tcp_port, tcp_ip='localhost'):
super().__init__()
self.socket = TCPReceiveSocket(tcp_ip=tcp_ip, tcp_port=tcp_port, handler_function=self._data_received)
def _data_received(self, data):
self.new_data.emit((data, time.time()))
def start(self):
self.socket.start()
def stop(self):
self.socket.stop()
|
from numpy import *
from ConvNet import *
import time
import struct
import os
#mnist has a training set of 60,000 examples, and a test set of 10,000 examples.
#log檔作用:紀錄檔案(logfile)是一個記錄了發生在執行中的作業系統或其他軟體中的事件的檔案
#
def train_net(train_covnet, logfile, cycle, learn_rate, case_num = -1) :
# Read data
# Change it to your own dataset path
trainim_filepath = './data/raw/train-images.idx3-ubyte' #training的資料
trainlabel_filepath = './data/raw/train-labels.idx1-ubyte' #label的資料
trainimfile = open(trainim_filepath, 'rb') #open()開啟檔案
trainlabelfile = open(trainlabel_filepath, 'rb') #使用’rb’按照二進位制位進行讀取的,不會將讀取的位元組轉換成字元
train_im = trainimfile.read() # 讀取文件內容 f.read(size) - 回傳檔案內容,
train_label = trainlabelfile.read() #size為要讀取進來的字串長度,若不填則讀取整份文件
im_index = 0
label_index = 0
magic, numImages , numRows , numColumns = struct.unpack_from('>IIII' , train_im , im_index)
magic, numLabels = struct.unpack_from('>II', train_label, label_index)
print ('train_set:', numImages)
train_btime = time.time()
logfile.write('learn_rate:' + str(learn_rate) + '\t')
logfile.write('train_cycle:' + str(cycle) + '\t')
########################################################################################################
# Begin to train
for c in range(cycle) :
im_index = struct.calcsize('>IIII')
label_index = struct.calcsize('>II')
train_case_num = numImages
if case_num != -1 and case_num < numImages :
train_case_num = case_num
logfile.write("trainset_num:" + str(train_case_num) + '\t')
for case in range(train_case_num) :
im = struct.unpack_from('>784B', train_im, im_index)
label = struct.unpack_from('>1B', train_label, label_index)
im_index += struct.calcsize('>784B')
label_index += struct.calcsize('>1B')
im = array(im)
im = im.reshape(28,28)
bigim = list(ones((32, 32)) * -0.1)
for i in range(28) :
for j in range(28) :
if im[i][j] > 0 :
bigim[i+2][j+2] = 1.175
im = array([bigim])
label = label[0]
print (case, label)
train_covnet.fw_prop(im, label)
train_covnet.bw_prop(im, label, learn_rate[c])
print ('train_time:', time.time() - train_btime)
logfile.write('train_time:'+ str(time.time() - train_btime) + '\t')
######################################################################################################
def test_net(train_covnet, logfile, case_num = -1) :
# Read data
# Change it to your own dataset path
testim_filepath = './data/raw/t10k-images.idx3-ubyte'
testlabel_filepath = './data/raw/t10k-labels.idx1-ubyte'
testimfile = open(testim_filepath, 'rb')
testlabelfile = open(testlabel_filepath, 'rb')
test_im = testimfile.read()
test_label = testlabelfile.read()
im_index = 0
label_index = 0
magic, numImages , numRows , numColumns = struct.unpack_from('>IIII' , test_im , im_index)
magic, numLabels = struct.unpack_from('>II', test_label, label_index)
print('test_set:', numImages)
im_index += struct.calcsize('>IIII')
label_index += struct.calcsize('>II')
correct_num = 0
testcase_num = numImages
if case_num != -1 and case_num < numImages:
testcase_num = case_num
logfile.write("testset_num:" + str(testcase_num) + '\t')
# To test
for case in range(testcase_num) :
im = struct.unpack_from('>784B', test_im, im_index)
label = struct.unpack_from('>1B', test_label, label_index)
im_index += struct.calcsize('>784B')
label_index += struct.calcsize('>1B')
im = array(im)
im = im.reshape(28,28)
bigim = list(ones((32, 32)) * -0.1)
for i in range(28) :
for j in range(28) :
if im[i][j] > 0 :
bigim[i+2][j+2] = 1.175
im = array([bigim])
label = label[0]
print( case, label)
train_covnet.fw_prop(im)
if argmax(train_covnet.outputlay7.maps[0][0]) == label :
correct_num += 1
correct_rate = correct_num / float(testcase_num)
print('test_correct_rate:', correct_rate)
logfile.write('test_correct_rate:'+ str(correct_rate) + '\t')
logfile.write('\n')
log_timeflag = time.time()
train_covnet = CovNet()
# Creat a folder name 'log' to save the history
train_covnet.print_netweight('log/origin_weight' + str(log_timeflag) + '.log')
logfile = open('log/nanerrortestcase.log', 'w')
logfile.write("train_time:" + str(log_timeflag) + '\t')
train_net(train_covnet, logfile, 1, [0.0001, 0.0001], 50)
train_covnet.print_netweight('log/trained_weight' + str(log_timeflag) + '.log')
test_net(train_covnet, logfile, 50)
logfile.write('\n')
logfile.close()
|
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('login/', views.loginPage , name='login'),
path('logout/', views.logoutUser , name='logout'),
path('register/',views.registerUser , name='register'),
path('', views.home, name='home'),
path('room/<int:pk>', views.room, name='room'),
path('profile/<str:pk>',views.userProfile , name='user-profile' ),
path('create-room' , views.createRoom , name='create-room'),
path('update-room/<int:pk>' , views.updateRoom , name='update-room'),
path('delete-room/<int:pk>' , views.deleteRoom , name='delete-room'),
path('delete-message/<int:pk>' , views.deleteMessage , name='delete-message'),
path('updateuser/' , views.updateUser , name='update-user'),
]
urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
#!/usr/bin/env python
import os
import sys
from recipes.examples import Examples
from recipes.apport import Apport
from recipes.packages import Packages
from recipes.bashrc import Bashrc
from recipes.node import Node
from recipes.gedit import Gedit
from recipes.git import Git
from recipes.fstab import Fstab
from recipes.restricted import Restricted
def check_if_root():
if os.geteuid() != 0:
print 'You must be root to run this script.'
sys.exit(1)
def get_user():
user = raw_input('What user do you wish to install for: ')
assert os.path.exists(os.path.expanduser('~' + user))
return user
def execute_recipe(recipe):
recipe.execute()
if not recipe.is_valid():
print('ERROR: the %s recipe did not execute properly' %
recipe.__class__.__name__)
sys.exit(1)
if __name__ == '__main__':
check_if_root()
user = get_user()
execute_recipe(Examples(user))
execute_recipe(Apport(user))
execute_recipe(Packages(user))
execute_recipe(Bashrc(user))
execute_recipe(Node(user))
execute_recipe(Gedit(user))
execute_recipe(Git(user))
execute_recipe(Fstab(user))
execute_recipe(Restricted(user))
|
#包含min函数的栈
class stack():
def __init__(self):
self._item = []
self._min = []
def pop(self):
if self._item:
self._min.pop()
return self._item.pop()
else:
print("stack is empty!")
def push(self,item):
self._item.append(item)
self._min.append(min(self._item))
def min(self):
if self._min:
return self._min[-1]
else:
print("min stack is empty!")
def print_all(self):
print("this is stack: ", self._item)
print("this is min_stack: ", self._min)
if __name__ == "__main__":
stack = stack()
stack.min()
stack.print_all()
stack.push(3)
stack.min()
stack.print_all()
stack.push(4)
stack.min()
stack.print_all()
stack.push(2)
stack.min()
stack.print_all()
stack.push(1)
stack.min()
stack.print_all()
stack.pop()
stack.min()
stack.print_all()
stack.pop()
stack.min()
stack.print_all()
stack.push(0)
stack.min()
stack.print_all()
|
import random
def intercalar_iguais(v1, v2, vf):
x = 0
for i in range(len(v1)):
vf[x] = v1[i]
x += 1
vf[x] = v2[i]
x += 1
return vf
def contem(v, qtd, e):
for i in range(qtd):
if v[i] == e:
return True
return False
def gerar(v1, v2):
qtd_v1 = 0
qtd_v2 = 0
n = (len(v1) + len(v2)) * 5
while (qtd_v1 < len(v1)):
e = random.randint(1, n)
if not contem(v1, qtd_v1, e) and not contem(v2, qtd_v2, e):
v1[qtd_v1] = e
qtd_v1 += 1
while (qtd_v2 < len(v2)):
e = random.randint(1, n)
if not contem(v1, qtd_v1, e) and not contem(v2, qtd_v2, e):
v2[qtd_v2] = e
qtd_v2 += 1
n = int(input('Número de elementos em v1 e em v2: '))
v1 = [0] * n
v2 = [0] * n
vf = [0] * (len(v1) + len(v2))
gerar(v1, v2)
print('v1:', v1)
print('v2:', v2)
intercalar_iguais(v1,v2,vf)
print('vf:', vf) |
import fractions
from functools import reduce
def gcd_list(numbers):
return reduce(fractions.gcd, numbers)
n, x0 = map(int, input().split())
x = list(map(int,input().split()))
x.append(x0)
x.sort()
xdiff = []
tmp = x[0]
for xs in x:
if xs - tmp == 0: continue
xdiff.append(xs-tmp)
print(gcd_list(xdiff)) |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 19 17:28:13 2021
@author: gawe
"""
import numpy as _np
mu0 = 4.0*_np.pi*1e-7 # [H/m], approximate permeability of free space
cc = 299792458 # [m/s], defined speed of light, weird flex: 3e8 m/s is fine
eps0 = 1.0/(mu0*(cc**2.0)) # [F/m], permittivity of free space
"""
Note:
In "rectangular" waveguide generally "a" is measured parallel to the
traditional H-field (B-field in free space) direction for a fundamental
TE10 mode (transverse electric wave travelling in the z-direction).
TEmn where m, n are mode numbes
In this sense "b" is measured parallel to the traditional D-field
(E-field in free space) direction for a fundamental TE10 mode.
if a>b (rectangular waveguide)
TE10 is the dominant mode
if a=b (square waveguide)
TE10 and TE01 are both dominant (degenerate)
if a<b (rectangular waveguide with long dimension parallel to the E-field
direction)
TE01 is the dominant mode (Transverse magnetic wave
"""
# ----
def beta2(freq, mu=None, eps=None):
"""
freespace wavenumber squared
"""
if mu is None: mu = mu0 # end if
if eps is None: eps = eps0 # end if
return (2*_np.pi*freq)**2.0*(eps*mu)
def betax(m, a):
"""
wavenumber in the x-direction
"""
return m*_np.pi/a
def betay(n, b):
"""
wavenumber in the y-direction
"""
return n*_np.pi/b
def cutoff_wavenumber(a, b, m=1, n=0):
return _np.sqrt( betax(m, a)**2.0 + betay(n, b)**2.0 )
def cutoff_frequency(a, b, m=1, n=0, mu=None, eps=None):
if mu is None: mu = mu0 # end if
if eps is None: eps = eps0 # end if
return cutoff_wavenumber(a, b, m, n)/(2.0*_np.pi*_np.sqrt(eps*mu))
# ----
def betaz(freq, a, b, m=1, n=0, mu=None, eps=None):
"""
wavenumber in the z-direction
Note: betaz is the guide value
"""
bet2 = beta2(freq, mu=mu, eps=eps)
# betax2 = betax(m, a)**2.0
# betay2 = betay(n, b)**2.0
# return _np.sqrt(bet2-betax2-betay2)
return _np.sqrt(bet2-cutoff_wavenumber(a, b, m, n)**2.0)
# ----
def lambdax_TE(m, a):
return 2*a/m
def lambday_TE(n, b):
return 2*b/n
def lambda_freespace(freq, mu=None, eps=None):
return 2.0*_np.pi/_np.sqrt(beta2(freq, mu=mu, eps=eps))
def lambdaz_TE(freq, a, b, m=1, n=0, mu=None, eps=None):
"""
Guide wavelength: wavelength in the z-direction (along the waveguide)
"""
#ilambdax2 = 1.0/lambdax_TE(m,a)**2.0
#ilambday2 = 1.0/lambday_TE(n,b)**2.0
#ilambda2 = 1.0/lambda_freespace(freq, mu=mu, eps=eps)**2.0
#return 1.0/_np.sqrt( ilambda2 - ilambdax2 - ilambday2 )
return 2.0*_np.pi/betaz_TE(freq, a, b, m=m, n=n, mu=mu, eps=eps)
# ----
def TE10_cutoff(a, mu=None, eps=None):
if mu is None: mu = mu0 # end if
if eps is None: eps = eps0 # end if
return 1.0/(2.0*a*_np.sqrt(mu*eps)) # (Fc)10
def cutoff_TE(a, b, m=1, n=0, mu=None, eps=None):
fc10 = TE10_cutoff(a, mu=mu, eps=eps)
return fc10*_np.sqrt(m**2.0 + (n*a/b)**2.0)
# ----
def TM11_cutoff(a, b, mu=None, eps=None):
if mu is None: mu = mu0 # end if
if eps is None: eps = eps0 # end if
fc10 = TE10_cutoff(a, mu=mu, eps=eps)
return fc10*_np.sqrt(1 + (a/b)**2.0) # (Fc)11
def cutoff_TM(a, b, m=1, n=1, mu=None, eps=None):
fc10 = TE10_cutoff(a, mu=mu, eps=eps)
return fc10*_np.sqrt(m**2.0 + (n*a/b)**2.0)
# ----
def wave_impedance_TEmn(freq, a, b, m=1, n=0, mu=None, eps=None):
"""
Z_w in the +z-direction
f>fc: real and greater than the intrinsic impedance of the medium
in the guide
f=fc: infinite wave impedance
f<fc: reactively inductive (imaginary)
---> rectangular waveguide is an inductive storage element for
TEmn waves travelling in the +z direction for f<fc
"""
if mu is None: mu = mu0 # end if
return 2.0*_np.pi*freq*mu/betaz_TE(freq, a, b, m=m, n=n, mu=mu, eps=eps)
# ====================================================================== #
|
from django.db import models
class Ornanization(models.Model):
name = models.CharField(max_length=20)
department = models.CharField(max_length=40)
email = models.EmailField(max_length=50)
duty = models.CharField(max_length=20)
cell_phone = models.IntegerField(max_length=11)
plane_number = models.IntegerField(max_length=11)
|
# Generated by Django 2.0 on 2018-01-19 09:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Machine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, verbose_name='机器名称')),
('ip', models.GenericIPAddressField(verbose_name='ip地址')),
('password', models.CharField(max_length=32, verbose_name='Password')),
],
),
migrations.CreateModel(
name='Modular',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, verbose_name='模块名称')),
('svn', models.CharField(max_length=128, verbose_name='SVN路径')),
('main', models.CharField(max_length=128, verbose_name='主文件')),
],
),
migrations.CreateModel(
name='Point',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, verbose_name='功能点')),
('svn', models.CharField(max_length=128, verbose_name='SVN路径')),
('main', models.CharField(max_length=128, verbose_name='主文件')),
('modular', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app02.Modular', verbose_name='所属模块')),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, verbose_name='项目名称')),
('svn', models.CharField(max_length=128, verbose_name='SVN路径')),
],
),
migrations.CreateModel(
name='TestConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('test_root_dir', models.CharField(max_length=128, verbose_name='测试目录')),
('current_stamp', models.IntegerField(verbose_name='当前时间戳')),
],
),
migrations.CreateModel(
name='TestInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time_stamp', models.IntegerField(verbose_name='时间戳')),
('test_list', models.CharField(max_length=1024, verbose_name='测试集合')),
('machine_list', models.CharField(max_length=1024, verbose_name='执行机合集')),
('status', models.IntegerField(verbose_name='状态')),
],
),
migrations.CreateModel(
name='TestInfo_Detail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time_stamp', models.IntegerField(verbose_name='时间戳')),
('test_case', models.CharField(max_length=10, verbose_name='测试项')),
('status', models.IntegerField(verbose_name='状态')),
('machine', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app02.Machine', verbose_name='执行机')),
],
),
migrations.CreateModel(
name='UserInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=32)),
('password', models.CharField(max_length=32)),
],
),
migrations.CreateModel(
name='UserSave',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, verbose_name='用户名')),
('test_case', models.TextField(verbose_name='测试项')),
('machine_case', models.TextField(verbose_name='机器')),
],
),
migrations.AddField(
model_name='modular',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app02.Project', verbose_name='所属项目'),
),
]
|
import numpy as np
import sys
# have a quick look of a file containing numpy array that you saved
file = sys.argv[1]
array = np.load(file)
print(array)
print('shape:', np.shape(array))
|
def add(num1, num2):
return num1 + num2
def subtract(num1, num2):
return num1 - num2
def multiply(num1, num2):
return num1 * num2
def divide(num1, num2):
return num1 / num2
print ("Calculator v2")
print("______________________")
num1 = float(input("Enter a number: "))
operand = input("+, -, *, / : ")
num2 = float(input("Enter a number: "))
if operand == "+":
print(num1, "+", num2, "=")
result = add(num1, num2)
print("______________________")
print(result)
elif operand == "-":
print(num1, "-", num2, "=")
result = subtract(num1, num2)
print("______________________")
print(result)
elif operand == "*":
print(num1, "*", num2, "=")
result = multiply(num1, num2)
print("______________________")
print(result)
elif operand == "/":
print(num1, "/", num2, "=")
result = divide(num1, num2)
print("______________________")
print(result)
else:
print("Unknown Input") |
import os
from flask import Flask
# create_app 是一个应用工厂函数
def create_app(test_config=None):
# 用于创建和配置Flask应用
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY="dev",
DATABASE=os.path.join(app.instance_path, "flask.sqlite"), # 这里定义了数据库的路径和名称
)
# print(app.instance_path)
# C:\Users\Jarvis\Desktop\Python后端学习\flask_tutorial\instance
if test_config is None:
# 如果测试配置参数没给就加载已有测试文件
app.config.from_pyfile("config.py", silent=True)
else:
# 给了就加载配置
app.config.from_mapping(test_config)
# 确保应用的文件夹时存在的
try:
os.makedirs(app.instance_path)
except OSError:
pass
# hello页面
@app.route('/hello')
def hello():
return "Hello, World!"
# 注册数据库相关
from . import db
db.init_app(app) # 这样就把初始化数据库命令和关闭连接函数注册到应用中了
# 这样是不是也行呢?
app.teardown_appcontext(db.close_db) # 注册关闭数据库连接的函数
app.cli.add_command(db.init_db_command) # 注册初始化数据库的命令
# 注册蓝图相关
from . import auth
app.register_blueprint(auth.bp)
from . import blog
app.register_blueprint(blog.bp)
app.add_url_rule("/", endpoint="index") # 这样`url_for('index')` 或 `url_for('blog.index')` 都会有效,会生成同样的 `/` URL 。
return app
# if __name__ == '__main__':
# create_app().run() |
# Détecte si un mot est un palindrome
un_mot = "kayak"
nb_lettres = len(un_mot)
est_palindrome = True
for i in range(0, nb_lettres): # Rq: pour optimiser la boucle, on peut parcourir uniquement la moitié du mot
if not un_mot[i] == un_mot[nb_lettres - 1 - i]:
est_palindrome = False
break
print(f"{un_mot} est-il un palindrome? Réponse: {est_palindrome}")
# Avec une boucle TantQue
un_autre_mot = "kay3ak"
i = 0
est_palindrome = True
while i < nb_lettres and est_palindrome:
est_palindrome = un_autre_mot[i] == un_autre_mot[nb_lettres - 1 - i]
i += 1
print(f"{un_autre_mot} est-il un palindrome? Réponse: {est_palindrome}")
|
import pandas_datareader.data as web
import datetime
import matplotlib.pyplot as plt
from zipline.api import order_target, record, symbol,set_commission, commission
from zipline.algorithm import TradingAlgorithm
start = datetime.datetime(2016, 1 ,2)
end = datetime.datetime(2016, 1, 31)
data = web.DataReader("000660.KS", "yahoo", start, end)
data = data[['Adj Close']]
data.columns = ["SKHynix"]
data = data.tz_localize("UTC")
plt.plot(data.index, data['SKHynix'])
plt.show()
def initialize(context): #initial asset and trading pay
context.sym = symbol('SKHynix')
set_commission(commission.PerDollar(cost=0.00165))
#only buy
def handle_data(context, data):
order_target(context.sym, 1)
if __name__ == "__main__":
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data)
result = algo.run(data)
print(result[['starting_cash', 'ending_cash', 'ending_value']].tail())
#Reward
plt.title('Profit')
plt.plot(result.index, result.portfolio_value)
plt.show() |
#!/usr/bin/python3
# -*- coding:utf-8 -*-
import numpy as np
from IPython import embed
class ReLU:
def forward(self, x):
self.grad_mask = x < 0
return np.maximum(x, 0)
def backward(self, grad_input):
grad_input[self.grad_mask] = 0
return grad_input
def test_ReLU(input, grad_input):
f = ReLU()
output = f.forward(input)
grad_output = f.backward(grad_input)
return output, grad_output
if __name__ == "__main__":
x = np.arange(-5, 5).reshape((2, -1))
y = np.arange(-5, 5).reshape((2, -1))
val, grad = test_ReLU(x, y)
embed(header="main")
|
def input_number():
x = 10
try:
y = int(input('請輸入數字:'))
z = x / y
except ZeroDivisionError as e:
print('分母不可 = 0, 請重新輸入~', e)
input_number()
except ValueError as e:
print('輸入資料錯誤, 請重新輸入~', e)
input_number()
except Exception as e:
print("發生了一個我料想不到的錯誤", e)
else:
print(z)
if __name__ == '__main__':
input_number() |
import numpy as np
class Utils:
def Sigmoid(self, z):
return (1/(1+np.exp(-z)))
def CrossEntropy(self, a, y):
return np.mean(y * np.log(a) + (1-y) * np.log(1-a))
def DistEuclidean(self, dato, dato1):
x = 0
for i in range(len(dato1.columns)-1):
x += (((dato.iloc[0, i] - dato1.iloc[:, i])**2))
return np.sqrt(x)
def CrossEntropyLoss_Optimized(self, A2, Y, parameters):
m = Y.shape[1]
logprobs = np.multiply(np.log(A2), Y)+(1-Y)*(np.log(1-A2))
cost = -np.sum(logprobs)/m
cost = np.squeeze(cost)
return cost
def GradientDescent(self, Hn, learning_rate, dHn):
return Hn - (learning_rate * dHn)
|
from __future__ import annotations
from soda.sodacl.check_cfg import CheckCfg
from soda.sodacl.location import Location
class GroupByCheckCfg(CheckCfg):
def __init__(
self,
source_header: str,
source_line: str,
source_configurations: dict | None,
location: Location,
name: str,
query: str,
fields: dict,
check_cfgs: list[CheckCfg] = [],
group_limit: int = 1000,
):
super().__init__(source_header, source_line, source_configurations, location, name)
self.query = query
self.fields = fields
self.check_cfgs = check_cfgs
self.group_limit = group_limit
|
import os
import psycopg2
DEBUG = False
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_URL = 'https://recipe-ingredient-catalog.herokuapp.com/static/'
ALLOWED_HOSTS = ['recipe-ingredient-catalog.herokuapp.com']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
}
}
# Connect to heroku's postgres database using a uri and psycopg2:
DATABASE_URL = os.environ.get('DATABASE_URL', '')
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
# Use dj_database_url to decipher the DATABASE_URL into a format Django can read
import dj_database_url
DATABASES['default'] = dj_database_url.config(conn_max_age=600, ssl_require=True)
CORS_ORIGIN_WHITELIST = ['https://ehlerorngard.com', 'https://www.ehlerorngard.com']
CSRF_TRUSTED_ORIGINS = ['ehlerorngard.com', 'www.ehlerorngard.com']
# Allow these headers on the request
CORS_ALLOW_HEADERS = [
'accept',
'access-control-request-headers',
'access-control-request-method',
'accept-encoding',
'accept-language',
'authorization',
'connection',
'content-type',
'cookie',
'dnt',
'host',
'origin',
'referer',
'server',
'user-agent',
'x-csrftoken',
'x-requested-with',
]
# Allow cross origin cookies:
SESSION_COOKIE_SAMESITE = None
CRSF_COOKIE_SAMESITE = None
# To allow cookies in cross-site HTTP requests:
CORS_ALLOW_CREDENTIALS = True
# Ensure the CSRF cookie is sent from a secure (https) location:
CSRF_COOKIE_SECURE = True
# Allow browsers to ensure that the cookie is only sent under an HTTPS connection:
SESSION_COOKIE_SECURE = True
# Enable XSS filter in the browser, and force it to always block suspected XSS attacks:
SECURE_BROWSER_XSS_FILTER = True
# Redirect all non-HTTPS requests to HTTPS
SECURE_SSL_REDIRECT = True
"""
________[ NOTE ]________
Most of the following are synonymous with the defaults for those settings,
but I like clarity and prefer minimizing ambiguity regarding things that
impact critical functionality, so I am including them for easy future reference.
"""
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_COOKIE_NAME = 'csrftoken'
# Whether to store the CSRF token in the user’s session instead of in a cookie
CSRF_USE_SESSIONS = False
# True would disallow csrf cookies in a response (/ in anything other than in HTTP)
CSRF_COOKIE_HTTPONLY = False
SECURE_CONTENT_TYPE_NOSNIFF = False
X_FRAME_OPTIONS = 'DENY'
|
from include.data_load import DataSets
from include.regression import Regression
import numpy as np
def main() -> None:
datasets_obj = DataSets()
datasets_obj.load_boston_housing_dataset()
train_data, train_target = datasets_obj.get_train_data()
test_data, test_target = datasets_obj.get_test_data()
reg = Regression("linear")
reg.fit(train_data, train_target)
train_score = reg.get_score(train_data, train_target)
test_score = reg.get_score(test_data, test_target)
predict = reg.predict(test_data)
if __name__ == '__main__':
main()
|
# Generated by Django 2.1.3 on 2019-01-10 10:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0002_auto_20190110_1055'),
]
operations = [
migrations.RemoveField(
model_name='game',
name='id',
),
migrations.AlterField(
model_name='game',
name='game_name',
field=models.TextField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='purchase',
name='pid',
field=models.TextField(primary_key=True, serialize=False),
),
]
|
from django.db import models
class Button(models.Model):
value = models.BooleanField(default = True)
mapped_io = models.CharField(max_length=25, default = 'a')
def __str__(self):
return "Digital: " + str(self.mapped_io)
class Slider(models.Model):
value = models.IntegerField(default = 0)
mapped_io = models.CharField(max_length=25, default = 'a')
def __str__(self):
return "Analogue: " + str(self.mapped_io)
# Create your models here.
|
#863. All Nodes Distance K in Binary Tree
#We are given a binary tree (with root node root), a target node, and an integer value K.
#Return a list of the values of all nodes that have a distance K from the target node.
#The answer can be returned in any order.
#Example 1:
#Input: root = [3,5,1,6,2,0,8,null,null,7,4], target = 5, K = 2
#Output: [7,4,1]
#Explanation:
#The nodes that are a distance 2 from the target node (with value 5)
#have values 7, 4, and 1.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def distanceK(self, root: TreeNode, target: TreeNode, K: int) -> List[int]:
##time O(N), space O(N)
def dfs(node, parent):
if node:
node.parent= parent
dfs(node.left, node)
dfs(node.right, node)
dfs(root, None)
queue=[(target,0)]
out=[]
seen=set([target])
while queue:
cur_node, cur_distance= queue.pop(0)
if cur_distance== K:
out.append(cur_node.val)
elif cur_distance<= K:
for next_node in (cur_node.left, cur_node.right, cur_node.parent):
if next_node and next_node not in seen:
queue.append((next_node, cur_distance+1))
seen.add(next_node)
return out |
import h5py, sys
for fname in sys.argv[1:]:
print 'loading '+fname+'...'
with h5py.File(fname, 'a') as h5f:
for key in h5f.keys():
if key in ['scstiffness_rho', 'scstiffness_rho3d', 'scstiffness_rho3dinterlayer',
'josephsonexchange', 'josephsonexchange_j3d', 'scstiffness']:
del h5f[key]
print key, ' removed'
print 'done'
|
from django.contrib import admin
from . models import NetVisCache
admin.site.register(NetVisCache)
|
import csv,sys
from get_relationship import GetRelationship
from add_relationship import AddRelationship
import globals
def read_csv(filepath):
data = []
with open(filepath) as f:
reader=csv.reader(f)
# next(reader, None)
for row in reader:
if row:
data.append(row)
return data
def request_tree(request_type,data):
# print person_list
a = AddRelationship()#globals.person_list,globals.relationship_list)
g = GetRelationship()#globals.person_list,globals.relationship_list,globals.order3_relationship_definition)
output={'msg':''}
if request_type=='ADD_SPOUSE':
output = a.add_spouse(data[0],data[1])
elif request_type=='ADD_CHILD':
output = a.add_child(data[0],data[1],data[2])
elif request_type=='GET_RELATIONSHIP':
output = g.get_custom_relationship_wrapper(data[0],data[1])
return output['msg']
def main():
globals.initialize()
print ("Welcome to Shan family")
input_file = sys.argv[1]
# person_list = []
# relationship_list = []
# order3_relationship_definition = {}
# print person_list
fp = open('order3_relationships.txt')
reader = csv.DictReader(filter(lambda row: row[0]!='#', fp))
for row in reader:
globals.order3_relationship_definition[row['Name']]=row['Definition'].strip()
fp = open('initial.txt')
reader = csv.DictReader(filter(lambda row: row[0]!='#', fp),delimiter=" ")
for row in reader:
out = request_tree(row['type'],row[None])
# print out
print "Initial Tree setup done"
print "-----------------------"
# ##Any new commands can go in the input.txt file
fp = open(input_file)
reader = csv.reader(fp,delimiter = " ")
for row in reader:
request_type = row.pop(0)
out = request_tree(request_type,row)
print out
if __name__ == '__main__':
main()
|
"""Cancer data classification
Classifying the Wisconsin cancer data from UCI repository
into benign and malignant classes with k Nearest Neighbors
"""
# -*- coding: utf-8 -*-
#!/usr/bin/env python3
# File : cancer_knn.py
# find whether cancer is malignant or benign using kNN
import time
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import metrics, model_selection, neighbors, preprocessing
warnings.filterwarnings('ignore')
# get the initial time
t_init = time.time()
# url for the Wisconsin Breast Cancer data from UCI
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data'
# set the names of the columns as pulled from the file accompanying the dataset
# which is breast-cancer-wisconsin.names
names = [
"SampleCodeNumber", "ClumpThickness", "UniformityCellSize",
"UniformityCellShape", "MarginalAdhesion", "SingleEpithelialCellSize",
"BareNuclei", "BlandChromatin", "NormalNucleoli", "Mitoses", "Class"
]
print('[INFO] gathering the {} data'.format(url.split('/')[-2]))
df = pd.read_csv(url, names=names)
print('[INFO] shape of the cancer data {}'.format(df.shape))
print('[INFO] information about the cancer database \n{}'.format(df.info()))
print('[INFO] report of the data at a fine grained level \n{}'.format(
df.describe()))
# As per the documentation note of the cancer dataset, there are some
# missing attribute values. There are 16 instances in Groups 1 to 6 that
# contain a single missing (i.e., unavailable) attribute value, now
# denoted by "?".
missing_counts = df.apply(lambda x: x == '?', axis=1).sum()
null_counts = df.apply(lambda x: x.isnull().values.ravel().sum())
isnull_predicate = df.isnull().values.any()
print('[INFO] report of the missing attribute information \n{}'.format(
missing_counts))
print('[INFO] BareNuclei attribute information details \n{}'.format(
df.groupby('BareNuclei').BareNuclei.count()))
print('[INFO] does the dataset has any null values ? {}'.format(
isnull_predicate))
print(
'[INFO] null attribute value information if any \n{}'.format(null_counts))
# As per the above result, BareNuclei has 16 values equal to "?" for which
# we may either discard the rows with missing values or replace them with
# the most common or frequent values in the dataset given by
# df[df.BareNuclei != ‘?’]
# most frequent value of BareNuclei from the table
frequent_value = df['BareNuclei'].value_counts().index[0]
print('[INFO] replacing the ? with most frequent value of {}'.format(
frequent_value))
df['BareNuclei'] = df['BareNuclei'].replace('?', np.NaN)
df['BareNuclei'] = df['BareNuclei'].fillna(frequent_value)
df['BareNuclei'] = df['BareNuclei'].apply(lambda x: int(x))
# Heatmap of the correlation matrix calculated from pandas with index of
# the nlargest = 10
# nlargest represents the n largest values sorted in decreasing order.
plt.figure(1)
fields = df.corr().nlargest(10, 'Class')['Class'].index
corr = df[fields].corr()
sns.heatmap(corr, annot=True, fmt=".2f", linewidths=0.4)
plt.title('Heatmap of Cancer Data Correlation Matrix')
plt.show()
# distribute the dataset between training data and target/labels as under
X = df.drop(['SampleCodeNumber', 'Class'], axis=1)
y = df['Class']
# here we are representing class label 2 as 'benign' and 4 as 'malignant'
df.Class.replace([2, 4], ['benign', 'malignant'], inplace=True)
print('[INFO] target class labels for cancer {}'.format(np.unique(y)))
print('[INFO] count of benign and malignant classes \n{}'.format(
df.Class.value_counts()))
plt.figure(2)
sns.countplot(df['Class'],
label='Count',
palette=sns.color_palette("deep", 10))
plt.show()
# as per the accompanying documentation, the class labels 2 and 4 correspond
# to cancer states, Benign and Malignant as under
# class label = 2 -> Benign
# class label = 4 -> Malignant
# we can encode the labels with scikit learn LabelEncoder though it's
# not needed in this case as it's usually applied in the cases where the
# target labels are all strings
le = preprocessing.LabelEncoder()
labels = le.fit_transform(df['Class'])
print('[INFO] scikit encoded labels {}'.format(np.unique(labels)))
# get a box plot of all the parameters
plt.figure(3)
df.drop('Class',
axis=1).plot(kind='box',
subplots=True,
layout=(4, 3),
sharex=False,
sharey=False,
figsize=(9, 9),
title='Box Plot of individual cancer input variables')
plt.show()
# Feature Scaling - Standardization
# As a part of optimization of the algorithm, we can apply feature
# scaling, by standardizing features using StandardScaler class from
# sklearn's preprocessing module. Scaling will ensure that the features
# will have a 0 mean and standard deviation of 1. This helps in all the
# features contributing equally.
scaler = preprocessing.StandardScaler()
print('[INFO] re-scaling the features with options {}'.format(
scaler.get_params()))
X_std_array = scaler.fit_transform(X.values)
X_std = pd.DataFrame(X_std_array, index=X.index, columns=X.columns)
# now print mean and standard deviation
print("[DEBUG] Dataset Mean:", round(X_std.mean()))
print("[DEBUG] Dataset Standard deviation:", X_std.std())
# For the purpose of checking how well the trained model will perform on
# sample unseen test data, we will split the dataset into separate
# training and testing data sets
# we will split the datasets in 70% - 30% ratio, also the below function
# provides a shuffled data.
(X_train, X_test, y_train,
y_test) = model_selection.train_test_split(X_std,
y,
test_size=0.3,
random_state=1,
stratify=y)
print('[INFO] now evaluating the kNN classifier with minkowski metric...')
model = neighbors.KNeighborsClassifier(n_neighbors=3,
p=2,
metric='minkowski',
weights='uniform')
# fit the model using training and target labels
model.fit(X_train, y_train)
# test data prediction
y_predicted = model.predict(X_test)
target_names = le.classes_
print(
"[DEBUG] kNN Training Confusion Matrix\n\n",
pd.crosstab(y_train,
model.predict(X_train),
rownames=["Actual"],
colnames=["Predicted"]))
print('[DEBUG] Training set Accuracy score: {:0.3%}'.format(
metrics.accuracy_score(y_train, model.predict(X_train))))
print(
'[DEBUG] kNN Testing Confusion Matrix\n\n',
pd.crosstab(y_test,
model.predict(X_test),
rownames=['Actual'],
colnames=['Predicted']))
print('[DEBUG] Testing set Accuracy score: {:0.3%}'.format(
metrics.accuracy_score(y_test, model.predict(X_test))))
clr = metrics.classification_report(y_test,
y_predicted,
target_names=target_names)
print('[INFO] classification metrics for the model \n{}'.format(clr))
# calculate the running time of the model
run_time = time.time() - t_init
print('[INFO] total time taken for the model: %.3f s' % run_time)
|
# defnination function
def name():
name = input('Enter name: ') # variable to Receive the input
if len(name) <= 10: # if condetion to check len of name
return name
else:
return name[0] + str(len(name[1:-2])) + name[-1]
# ptint fun
print(name())
|
"""
step1: choose a starting point x0 and set k = 0
step2: determine a descent direction d_k
step3: determine step size by line search, choose step size tau_k > 0
step4: update x[k + 1] = x[k] + tau_k * d_k, k += 1
repeat until stopping criterion is satisfied
Linear approximation:
Suppose that f is differentiable and \partial f(x) \neq 0
Then we have linear approximation for all delta x \neq 0: f(x + \delta x) \approx f(x) + \partial f(x)^T \
"""
from first_order_algorithm.line_search import armijo_step
import numpy as np
from numba import njit
import matplotlib.pyplot as plt
from first_order_algorithm.fun_grad import f, df
def steepest_descent(x, eps, sigma, beta):
d = -df(x) # compute the gradient of f at initial point x
descent_iter = 0
step_size_iter = 0
x_list = [x.copy()]
y_list = [f(x)]
while np.linalg.norm(d) > eps:
descent_iter += 1
step_size, armi_iter = armijo_step(x, d, sigma, beta)
step_size_iter += armi_iter
x += step_size * d
x_list.append(x.copy())
y_list.append(f(x))
d = -df(x)
return x, descent_iter, step_size_iter, x_list, y_list
def contour_plot(delta, x_range, y_range, x_arr, levels, title):
x1 = np.arange(x_range[0], x_range[1], delta)
x2 = np.arange(y_range[0], y_range[1], delta)
X1, X2 = np.meshgrid(x1, x2)
Y = evaluate_mesh(X1, X2)
fig, ax = plt.subplots()
CS = ax.contour(X1, X2, Y, levels=levels)
ax.scatter(x_arr[:, 0], x_arr[:, 1], s=1)
ax.plot(x_arr[:, 0], x_arr[:, 1], color='red')
ax.clabel(CS, inline=1, fontsize=10)
ax.set_title(title)
fig.show()
@njit()
def evaluate_mesh(X1, X2):
n, m = X1.shape
Y = np.zeros((n, m))
for i in range(n):
for j in range(m):
Y[i, j] = f([X1[i, j], X2[i, j]])
return Y
|
length = float(input("Enter length of rectangle :"))
width = float(input("Enter width of rectangle :"))
PerimeterOfRectangle = 2*(length+width)
AreaOfRectangle = length*width
print ("Perimeter of Rectangle: {}".format(PerimeterOfRectangle))
print("Area of Rectangle: {}".format(AreaOfRectangle)) |
__author__ = 'ole'
from core.plugin import Plugin
# This plugin is necessary to keep the connection alive.
class Pong(Plugin):
def on_ping(self, message):
self.logger.info("PONG {0}".format(message.content))
self.send("PONG {0}\r\n".format(message.content).encode())
|
#def char_frequency(str1):
str1=raw_input("enter the string")
dict1 = {}
for n in str1:
if n in dict1:
dict1[n] += 1
else:
dict1[n] = 1
print dict1
#print(char_frequency('google.com'))
|
from evaluation import fit_and_score
import random
def _query(item):
""" Default query. """
return True
def _accept(pred):
""" Default acceptor. """
return True
class Sampler(object):
def __init__(self, pipeline, batch_size=30, query=_query,
key=None, accept=_accept):
self.pipeline = pipeline # a sklearn pipeline or classifier
self.batch_size = batch_size # number of items per batch
self.query = query # a function to filter before prediction
self.key = key # a key function to sort predictions
self.accept = accept # a function to filter predictions
def __call__(self, dataset):
""" Sample a batch from dataset. """
raise NotImplementedError
def fit(self, pool):
X_train, y_train = zip(*pool.labelled_items)
self.pipeline.fit(X_train, y_train)
def fit_and_score(self, pool, X_test, y_test, n=1):
"""
Fit model to labelled data from pool and score on train and test.
If n>1, then run n times with bootstrapped training data.
"""
return iter(fit_and_score(self.pipeline, pool, X_test, y_test, n=n))
class Random(Sampler):
def __call__(self, dataset):
unlabelled = list(filter(self.query, dataset.unlabelled_items))
random.shuffle(unlabelled)
for i, (text, label) in enumerate(unlabelled):
if i >= self.batch_size:
break
yield text, label
class Active(Sampler):
def __call__(self, dataset):
unlabelled = list(filter(self.query, dataset.unlabelled_items))
if unlabelled:
X, _ = zip(*unlabelled)
count = 0
predictions = zip(self.pipeline.predict_proba(X), X)
if self.key:
predictions = sorted(predictions, key=self.key)
for probs, text in predictions:
pred = dict(zip(self.pipeline.classes_, probs))
if self.accept(pred):
yield text, pred
count += 1
if self.batch_size and count == self.batch_size:
break
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.