seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
37708551559 | import hashlib as hasher
import datetime as date
class Block:
def __init__(self, index, timestamp, data, previous_hash):
self.index = index
self.timestamp = timestamp
self.data = data
self.previous_hash = previous_hash
self.hash = self.hash_block()
def hash_block(self):
content_to_hash = (str(self.index) +
str(self.timestamp) +
str(self.data) +
str(self.previous_hash))
return hasher.sha256(content_to_hash.encode("utf-16")).hexdigest()
def __str__(self):
return 'Block: ' + str(self.index) + ', data: ' + str(self.data) + ', hash: '+ str(self.hash) + ', prevHash: ' + str(self.previous_hash)
| FollowJack/Blockchain | __old__/models.py | models.py | py | 757 | python | en | code | 0 | github-code | 90 |
70991569576 | import numpy as np
import cv2
from os import listdir
from os.path import isfile, join
from tqdm import tqdm
paths = {"train": "data/train", "val": "data/val"}
def process(path):
img = cv2.imread(path)
if img is not None:
# Create new tuple with new width and height
newDim = (150, 150)
# Resize img to avoid using too much memory
resizedImg = cv2.resize(img, newDim)
return resizedImg
return None
def prepData(path):
x_data = []
y_data = []
addonPaths = ["/NORMAL/", "/PNEUMONIA/"]
for idx, addon in tqdm(enumerate(addonPaths)):
for f in listdir(path+addon):
if isfile(join(path+addon, f)):
img = process(path+addon+f)
if img is not None:
if path == paths['train']:
# If training data is being generated/processed we want to use data augmentation by rotating images clockwise and counterclockwise to get more data.
horizontalImg1 = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
horizontalImg2 = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
x_data.append(img)
y_data.append(idx)
x_data.append(horizontalImg1)
y_data.append(idx)
x_data.append(horizontalImg1)
y_data.append(idx)
else:
x_data.append(img)
y_data.append(idx)
# Convert data lists to numpy arrays (will be needed when saving)
x_data = np.array(x_data, dtype=np.float32)
y_data = np.array(y_data, dtype=np.long)
return x_data, y_data
| gabr444/xray | data.py | data.py | py | 1,736 | python | en | code | 0 | github-code | 90 |
39294672394 | from dagster import *
# When partiton X of `a_first` is materialized, it will trigger the
# materialization of X for `a_second` and `a_third`
#
# When multiple partitions of `a_first` are materialized by triggering a
# backfill, only the latest partition will automatically be materialized for
# `a_second` and `a_third`
#
# As all assets have the same partition, we do not need to add much
# configuration. By default, dagster sees that they all have the same partition
# and will map each partiton 1 to 1
@asset(
partitions_def=DailyPartitionsDefinition(start_date='2023-08-01'),
)
def a_first(context: OpExecutionContext) -> str:
key = context.asset_partition_key_for_output()
context.log.info('%s', key)
return key
@asset(
partitions_def=DailyPartitionsDefinition(start_date='2023-08-01'),
auto_materialize_policy=AutoMaterializePolicy.eager(),
)
def a_second(context: OpExecutionContext, a_first: str) -> str:
context.log.info('%s', a_first)
return a_first * 2
@asset(
partitions_def=DailyPartitionsDefinition(start_date='2023-08-01'),
auto_materialize_policy=AutoMaterializePolicy.eager(),
)
def a_third(context: OpExecutionContext, a_second: str) -> list[str]:
context.log.info('%s', a_second)
return [a_second] * 3
| meyer1994/minidagster | minidagster/linear_with_partitions.py | linear_with_partitions.py | py | 1,279 | python | en | code | 0 | github-code | 90 |
18531299536 | from aiogram import types
from tgbot.data.strings import user_info
from tgbot.service.repo.repository import SQLAlchemyRepos
from tgbot.service.repo.user_repo import UserRepo
async def my_cabinet(message: types.Message, repo: SQLAlchemyRepos):
user = repo.get_repo(UserRepo)
u = await user.get_user(user_id=message.from_user.id)
await message.answer(
text=user_info.format(
date=u.created_at.strftime('%d.%m.%Y %H:%M'),
referral_count=u.referrals,
username=(await message.bot.get_me()).username,
user_id=message.from_user.id
)
)
| uicodee/contestmaker | tgbot/handlers/buttons/cabinet.py | cabinet.py | py | 612 | python | en | code | 1 | github-code | 90 |
18444705749 | n, m = list(map(int, input().split(' ')))
l = list(map(int, input().split(' ')))
d = {} # key=match, val=num
d[2] = [1]
d[3] = [7]
d[4] = [4]
d[5] = [5, 3, 2]
d[6] = [9, 6]
d[7] = [8]
enable = [False] * 10
for a in l:
enable[a] = True
# マッチをk(=2..7)本使う場合の一番大きい値を総当たり
def f(nokori, current):
if nokori == 0:
return current
if nokori < 0:
return -1
temps = []
for key, nums in list(d.items())[::-1]:
for num in nums:
if enable[num]:
if nokori-key != 0 and current % 10 != 0 and current % 10 < num:
# 98 ok, 89 重複するので最後の桁以外は skip
continue
temps.append(f(nokori-key, current*10+num))
break
return max(temps)
# 余裕があるうちは一番マッチの本数が少ないモノをヤケクソで詰めまくる
def hoge():
for key, nums in list(d.items()):
for num in nums:
if enable[num]:
return (num, key)
num, key = hoge()
cs = []
while n > 50:
n -= key
cs.append(str(num))
# ヤケクソで詰めたやつと総当たりで調べたマックスを並び替えて辞書順で大きくする
num_ans = str(f(n, 0))
print(''.join(sorted(list(num_ans) + cs)[::-1]))
| Aasthaengg/IBMdataset | Python_codes/p03128/s476249797.py | s476249797.py | py | 1,236 | python | en | code | 0 | github-code | 90 |
40581294096 | """
516. Longest Palindromic Subsequence
Given a string s, find the longest palindromic subsequence's length in s.
A subsequence is a sequence that can be derived from another sequence by deleting some or no elements without changing the order of the remaining elements.
Example 1:
Input: s = "bbbab"
Output: 4
Explanation: One possible longest palindromic subsequence is "bbbb".
Example 2:
Input: s = "cbbd"
Output: 2
Explanation: One possible longest palindromic subsequence is "bb".
Constraints:
1 <= s.length <= 1000
s consists only of lowercase English letters.
"""
class Solution:
def longestPalindromeSubseq(self, s: str) -> int:
ps = s[::-1]
t = [[ -1 for i in range(len(s)+1)] for j in range(len(s)+1)]
for x in range(len(s)+1):
for y in range(len(ps)+1):
if x == 0 or y == 0:
t[x][y] = 0
for numi in range(1,len(s)+1):
for numj in range(1,len(ps)+1):
if s[numi-1] == ps[numj-1]:
t[numi][numj] = 1 + t[numi-1][numj-1]
else:
t[numi][numj] = max(t[numi-1][numj] , t[numi][numj-1])
"""
Ans = ""
(x,y) = len(s),len(ps)
while (x>0 and y>0):
if s[x-1] == ps[y-1]:
Ans += s[x-1]
x= x-1
y= y-1
else:
if t[x-1][y] > t[x][y-1]:
x = x-1
y = y
else:
x = x
y = y-1
"""
return t[len(s)][len(ps)]
| venkatsvpr/Problems_Solved | LC_Longest_Palindromic_Subsequence.py | LC_Longest_Palindromic_Subsequence.py | py | 1,617 | python | en | code | 3 | github-code | 90 |
32664505661 | import torch
import torch.nn.parallel
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class AntiAliasDownsampleLayer(nn.Module):
def __init__(self, remove_model_jit: bool = False, filt_size: int = 3, stride: int = 2,
channels: int = 0):
super(AntiAliasDownsampleLayer, self).__init__()
if not remove_model_jit:
self.op = DownsampleJIT(filt_size, stride, channels)
else:
self.op = Downsample(filt_size, stride, channels)
def forward(self, x):
return self.op(x)
@torch.jit.script
class DownsampleJIT(object):
def __init__(self, filt_size: int = 3, stride: int = 2, channels: int = 0):
self.stride = stride
self.filt_size = filt_size
self.channels = channels
assert self.filt_size == 3
assert stride == 2
a = torch.tensor([1., 2., 1.])
filt = (a[:, None] * a[None, :]).clone().detach()
filt = filt / torch.sum(filt)
self.filt = filt[None, None, :, :].repeat((self.channels, 1, 1, 1)).cuda().half()
def __call__(self, input: torch.Tensor):
if input.dtype != self.filt.dtype:
self.filt = self.filt.float()
input_pad = F.pad(input, (1, 1, 1, 1), 'reflect')
return F.conv2d(input_pad, self.filt, stride=2, padding=0, groups=input.shape[1])
class Downsample(nn.Module):
def __init__(self, filt_size=3, stride=2, channels=None):
super(Downsample, self).__init__()
self.filt_size = filt_size
self.stride = stride
self.channels = channels
assert self.filt_size == 3
a = torch.tensor([1., 2., 1.])
filt = (a[:, None] * a[None, :]).clone().detach()
filt = filt / torch.sum(filt)
self.filt = filt[None, None, :, :].repeat((self.channels, 1, 1, 1))
def forward(self, input):
input_pad = F.pad(input, (1, 1, 1, 1), 'reflect')
return F.conv2d(input_pad, self.filt, stride=self.stride, padding=0, groups=input.shape[1])
| Alibaba-MIIL/ImageNet21K | src_files/models/tresnet/layers/anti_aliasing.py | anti_aliasing.py | py | 2,035 | python | en | code | 665 | github-code | 90 |
19128525267 | import RPi.GPIO as GPIO
import time
class HR8825:
def __init__(self, dir_pin, step_pin, enable_pin, mode_pins):
self.dir_pin = dir_pin
self.step_pin = step_pin
self.enable_pin = enable_pin
self.mode_pins = mode_pins
GPIO.setup(self.dir_pin, GPIO.OUT)
GPIO.setup(self.step_pin, GPIO.OUT)
GPIO.setup(self.enable_pin, GPIO.OUT)
GPIO.setup(self.mode_pins, GPIO.OUT)
def digital_write(self, pin, value):
GPIO.output(pin, value)
def Stop(self):
self.digital_write(self.enable_pin, 0)
def SetMicroStep(self, mode, stepformat):
"""
(1) mode
'hardware' : Use the switch on the module to control the microstep
'software' : Use software to control microstep pin levels
Need to put the All switch to 0
(2) stepformat
('fullstep', 'halfstep', '1/4step', '1/8step', '1/16step', '1/32step')
"""
microstep = {'fullstep': (0, 0, 0),
'halfstep': (1, 0, 0),
'1/4step': (0, 1, 0),
'1/8step': (1, 1, 0),
'1/16step': (0, 0, 1),
'1/32step': (1, 0, 1)}
if (mode == "software"):
self.digital_write(self.mode_pins, microstep[stepformat])
def TurnStep(self, Dir, steps, stepdelay=0.002):
self.digital_write(self.enable_pin, 1)
self.digital_write(self.dir_pin, Dir)
if (steps == 0):
return
for _ in range(steps):
self.digital_write(self.step_pin, True)
time.sleep(stepdelay)
self.digital_write(self.step_pin, False)
time.sleep(stepdelay)
| RM220507/XYZ-Gantry | code/gantrycontrol/HR8825.py | HR8825.py | py | 1,734 | python | en | code | 0 | github-code | 90 |
19012317271 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 22 15:12:11 2022
@author: lidor
"""
import numpy as np
from ccg import *
import create_feat_tsc as tsc
import sort_shank as MC
from loading_data import *
from noise_classifier import get_preds
def getX(item):
x = X1[:,item].float().unsqueeze(0)
return x
def getY(item):
y = str(int(Y[item]))
return y
# inputs
filebase = '/home/lidor/data/AUSS_project/Automated_curation/test_data/mP31_04'
shank = 2
Nchannels = 10
Nsamp = 32
# Loading data
clu,res = load_clures(filebase,shank)
mspk,sspk = make_mSPK(filebase,shank,Nchannels=Nchannels,Nsamp=Nsamp)
cc = get_CCmat(filebase,shank)
u_clu = np.unique(clu)
# generating time mat for NC
time_mat = get_time_mat1(res, clu)
# get porpabilities form NC
pred = get_preds(clu, mspk, sspk, cc, time_mat, u_clu)
# generat a clu which noise and multuybuts are lebeled as zero
cleanClu = tsc.tsc(pred,clu)
ind,Z = tsc.get_cleanClu_idx(clu,cleanClu)
# make new featurs for the MC
nspk_vec = tsc.compute_Nvec(cleanClu)[1:]
cluster_ids = np.unique(cleanClu)
time_mat = tsc.compute_timeMat(cleanClu,res,cluster_ids)[1:,:]
mean_spk,std_spk = tsc.orgnize_WF(mspk,sspk,ind,Z)
sample_rate = 20000
cc = compCCG(res,cleanClu,FS=sample_rate,window_size=0.042)[0]
cc = cc[1:-1,1:,1:]
newCLu = MC.main_loop(cleanClu, res, cc, mean_spk, std_spk, nspk_vec, time_mat)
clu1 = clu
clu2 = newCLu
U_id = np.unique(clu2)
reco_list = list()
for i in U_id:
idx = np.where(clu2==i)[0]
l = np.unique(clu1[idx])
if len(l) > 1:
reco_list.append(l)
print(reco_list)
| ayalab1/neurocode | spikeSorting/AutomatedCuration/Automated-curation/runig_AI_pipeline.py | runig_AI_pipeline.py | py | 1,721 | python | en | code | 8 | github-code | 90 |
18048772719 | S = input()
c, f = -1, -1
for i, s in enumerate(S):
if s == 'C':
c = i
break
for i, s in enumerate(S[::-1]):
if s == 'F':
f = len(S)-i-1
break
if c >= 0 and f >= 0 and c < f:
print('Yes')
else:
print('No') | Aasthaengg/IBMdataset | Python_codes/p03957/s980800459.py | s980800459.py | py | 253 | python | en | code | 0 | github-code | 90 |
28528958761 | from AgentState import AgentState
from AgentAction import AgentAction
def test_agent_moves_when_action_happens():
# Arrange
state = AgentState(0, 0)
sut = AgentAction("RIGHT")
# Act
new_state = sut.result(state)
# Assert
assert new_state.x == 1
| guillermoSb/ia_lab01 | tests/test_action.py | test_action.py | py | 276 | python | en | code | 0 | github-code | 90 |
14825138435 | from enum import Enum
from typing import List
from schemas.common import AVAILABLE_SCHEMAS
from schemas.signature import Ed25519Signature
from typedefs.datatype import UInt16, UInt8
from typedefs.field import ComplexField, Field, Schema, SimpleField
from schemas.address import MAX_MULTI_ADDRESSES, MIN_MULTI_ADDRESSES
from typedefs.subschema import AnyOf, OneOf
class UnlockType(Enum):
Signature = 0
Reference = 1
Account = 2
Anchor = 3
Nft = 4
Multi = 5
Empty = 6
def unlock_type_field(unlock_type: UnlockType, name: str, article="a") -> SimpleField:
return SimpleField(
"Unlock Type",
UInt8(),
f"Set to <strong>value {unlock_type.value}</strong> to denote {article} <i>{name}</i>.",
)
# Signature Unlock
signature_unlock_name = "Signature Unlock"
signature_unlock_fields: List[Field] = [
unlock_type_field(UnlockType.Signature, signature_unlock_name),
ComplexField("Signature", OneOf(), [Ed25519Signature()]),
]
def SignatureUnlock(
omitFields: bool = False,
) -> Schema:
return Schema(
signature_unlock_name,
"Unlocks the address derived from the contained Public Key in the transaction in which it is contained in.",
signature_unlock_fields,
tipReference=45,
omitFields=omitFields,
)
AVAILABLE_SCHEMAS.append(SignatureUnlock())
# Reference Unlock
reference_unlock_name = "Reference Unlock"
reference_unlock_fields: List[Field] = [
unlock_type_field(UnlockType.Reference, reference_unlock_name),
SimpleField("Reference", UInt16(), "Represents the index of a previous unlock."),
]
def ReferenceUnlock(
omitFields: bool = False,
) -> Schema:
return Schema(
reference_unlock_name,
"References a previous unlock to support unlocking multiple inputs owned by the same address.",
reference_unlock_fields,
tipReference=45,
omitFields=omitFields,
)
AVAILABLE_SCHEMAS.append(ReferenceUnlock())
# Account Unlock
account_unlock_name = "Account Unlock"
account_unlock_fields: List[Field] = [
unlock_type_field(UnlockType.Account, account_unlock_name, article="an"),
SimpleField(
"Account Reference Unlock Index",
UInt16(),
"Index of input and unlock corresponding to an Account Output.",
),
]
def AccountUnlock(
omitFields: bool = False,
) -> Schema:
return Schema(
account_unlock_name,
"Points to the unlock of a consumed Account Output.",
account_unlock_fields,
tipReference=42,
omitFields=omitFields,
)
AVAILABLE_SCHEMAS.append(AccountUnlock())
# Anchor Unlock
anchor_unlock_name = "Anchor Unlock"
anchor_unlock_fields: List[Field] = [
unlock_type_field(UnlockType.Anchor, anchor_unlock_name, article="an"),
SimpleField(
"Anchor Reference Unlock Index",
UInt16(),
"Index of input and unlock corresponding to an Anchor Output.",
),
]
def AnchorUnlock(
omitFields: bool = False,
) -> Schema:
return Schema(
anchor_unlock_name,
"Points to the unlock of a consumed Anchor Output.",
anchor_unlock_fields,
tipReference=54,
omitFields=omitFields,
)
AVAILABLE_SCHEMAS.append(AnchorUnlock())
# NFT Unlock
nft_unlock_name = "NFT Unlock"
nft_unlock_fields: List[Field] = [
unlock_type_field(UnlockType.Nft, nft_unlock_name, article="an"),
SimpleField(
"NFT Reference Unlock Index",
UInt16(),
"Index of input and unlock corresponding to an NFT Output.",
),
]
def NFTUnlock(
omitFields: bool = False,
) -> Schema:
return Schema(
nft_unlock_name,
"Points to the unlock of a consumed NFT Output.",
nft_unlock_fields,
tipReference=43,
omitFields=omitFields,
)
AVAILABLE_SCHEMAS.append(NFTUnlock())
# Empty Unlock
empty_unlock_name = "Empty Unlock"
empty_unlock_fields: List[Field] = [
unlock_type_field(UnlockType.Empty, empty_unlock_name, article="an"),
]
def EmptyUnlock(
omitFields: bool = False,
) -> Schema:
return Schema(
empty_unlock_name,
"Used to maintain correct index relationship between addresses and signatures when unlocking a Multi Address where not all addresses are unlocked.",
empty_unlock_fields,
tipReference=52,
omitFields=omitFields,
)
AVAILABLE_SCHEMAS.append(EmptyUnlock())
# Multi Unlock
multi_unlock_name = "Multi Unlock"
multi_unlock_fields: List[Field] = [
unlock_type_field(UnlockType.Multi, multi_unlock_name),
SimpleField("Unlocks Count", UInt8(), "The number of unlocks following."),
ComplexField(
"Unlocks",
AnyOf(MIN_MULTI_ADDRESSES, MAX_MULTI_ADDRESSES),
[
SignatureUnlock(omitFields=True),
ReferenceUnlock(omitFields=True),
AccountUnlock(omitFields=True),
AnchorUnlock(omitFields=True),
NFTUnlock(omitFields=True),
EmptyUnlock(omitFields=True),
],
),
]
def MultiUnlock(
omitFields: bool = False,
) -> Schema:
return Schema(
multi_unlock_name,
"Unlocks a Multi Address with a list of other unlocks.",
multi_unlock_fields,
tipReference=52,
omitFields=omitFields,
)
AVAILABLE_SCHEMAS.append(MultiUnlock())
| iotaledger/tip-tools | schema-tool/schemas/unlock.py | unlock.py | py | 5,338 | python | en | code | 0 | github-code | 90 |
41873825494 | #!/usr/bin/env python
# coding: utf-8
# ## Histogram plot
#
# When visualising one dimensional data without relating it to other information an option would be histograms.
# Histograms are used when describing distributions in your data, it is not the values itself you are visualising, rather the counts/frequencies of each value.
#
# We again start with importing our libraries
# In[1]:
import pandas as pd
import seaborn as sns
sns.set_theme()
sns.set(rc={'figure.figsize':(16,8)})
# For this example we will be using the prepared dataset from seaborn containing mileages of several cars.
# Information about the cars is also given.
# In[2]:
mpg_df = sns.load_dataset('mpg')
mpg_df.head()
# We start of simple by plotting the distribution of horsepower in our dataset.
# In[3]:
sns.histplot(data=mpg_df, x='horsepower')
# A first thing that is visible is that our feature is not normally distributed, we have a long tail to the higer end.
#
# For histograms we can specify the amount of bins in which we seperate the counts, seaborn selects a suitable number yet we can change this.
# In[4]:
sns.histplot(data=mpg_df, x='horsepower', bins=100)
# As you can see, the previous option looks a lot better.
# Taking the right amount of bins is important.
#
# In order to add more information to our plot, we can use categorical data to split our data into multiple histograms.
# Here we used the origin of the cars to split into 3 categories, notice how each of them has their own area, japan and europe are on the lower end whilst usa is centered in higher horsepower.
# In[5]:
sns.histplot(data=mpg_df, x='horsepower', hue='origin', bins=20, multiple='stack')
# A neat feature of seaborn is that it can join histograms and scatter plots (in the next section) together.
#
# Here we see how the visualisations of 2 one dimensional histograms perfectly combine together into a scatter plot, where 2 dimensional data is shown (both mileage and horsepower).
# In[6]:
sns.jointplot(data=mpg_df, x='mpg', y='horsepower')
# Histograms are a really powerfull tool when it comes to validating your data, we can easily the distribution of each feature, see if they are normally distributed and visualise distributions of subgroups.
#
# Yet for final visualisations they are often not interesting enough.
# In[ ]:
| LorenzF/data-science-practical-approach | src/_build/jupyter_execute/c4_data_visualisation/histogram.py | histogram.py | py | 2,344 | python | en | code | 0 | github-code | 90 |
4128477251 | import sys
import logging
import warnings
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from datetime import timedelta
from itertools import groupby
from collections import OrderedDict
import numpy as np
import pandas as pd
import scipy.signal
from .iaga2hdf import read_hdf, write_hdf
from .filters import minute_interval_filter, second_interval_filter
from ..util.nan import nan_interp
from ..signal.lfilter import lp_fir_filter
from ..stats.stats import despike
logger = logging.getLogger('pyrsss.mag.process_hdf')
"""
A NOTE ON THE COORDINATE FRAME
The following describes the XYZ convention
(from https://github.com/usgs/geomag-algorithms/blob/master/docs/algorithms/XYZ.md):
- X is the magnitude of the geographic north pole component of the H vector;
- Y is the magnitude of the east component of the H vector;
- Z is the downward component of the geomagnetic field, same as before.
"""
def consecutive_nans(x, y):
"""
Return the maximum number of nans encountered in the logical or of
the *x* and *y* time series.
"""
nan = np.isnan(x) | np.isnan(y)
lengths = []
for key, group in groupby(nan):
if key == True:
lengths.append(len(list(group)))
if lengths:
return max(lengths)
return 0
def process_timeseries(dt,
Bx,
By,
c1='B_X',
c2='B_Y',
despike_data=True,
remove_mean=True):
"""
Process surface magnetic field measurement time series with
indices *dt* and components *Bx* and *By*. Output a
:class:`DataFrame` with columns *c1* and *c2* associated with the
processed output time series. If *despike_data*, remove outliers
prior to filtering. If *remove_mean*, remove the mean from the
output time series.
"""
warnings.warn('use process_df instead',
PendingDeprecationWarning)
n = consecutive_nans(Bx, By)
interval = (dt[1] - dt[0]).total_seconds()
if n > 0:
logger.warning('longest contiguous gap = {:.2f} minutes'.format(n * interval / 60))
# fill data gaps via linear interpolation
Bx = nan_interp(Bx)
By = nan_interp(By)
if despike_data:
# remove outliers
df = pd.DataFrame(index=dt,
data={'Bx': Bx,
'By': By})
df = despike(df)
dt = df.index.to_pydatetime()
Bx = df.Bx.values
By = df.By.values
# apply 1 - 100 mHz bandpass filter
if interval == 1.0:
h = second_interval_filter()
elif interval == 60.0:
h = minute_interval_filter()
else:
raise ValueError('1 to 100 mHz filter not yet synthesized for {} s interval data'.format(interval))
Bx_filtered, dt_filtered = lp_fir_filter(h, Bx, mode='valid', index=dt)
By_filtered = lp_fir_filter(h, By, mode='valid')
# remove mean
if remove_mean:
Bx_filtered -= np.mean(Bx_filtered)
By_filtered -= np.mean(By_filtered)
# build DataFrame and store to disk
return pd.DataFrame(index=dt_filtered,
data={c1: Bx_filtered,
c2: By_filtered})
def process(hdf_fname,
source_key='B_raw',
key='B',
he=False,
despike_data=True,
remove_mean=True):
"""
Process the magnetic field columns of *hdf_fname*, applying
pre-processing (nan interpolation) and a band-pass filter. Look
for input at *source_key* and store output under identifier
*key*. If *he*, process the H and E magnetic field components. If
*remove_mean*, remove the mean from each column.
"""
logger.info('processing {}'.format(hdf_fname))
df_raw, header = read_hdf(hdf_fname, source_key)
dt = df_raw.index.to_pydatetime()
Bx_raw = df_raw['B_X'].values * 1e-9
By_raw = df_raw['B_Y'].values * 1e-9
df_filtered = process_timeseries(dt,
Bx_raw,
By_raw,
despike_data=despike_data,
remove_mean=remove_mean)
if he:
Bh_raw = df_raw['B_H'].values * 1e-9
Be_raw = df_raw['B_E'].values * 1e-9
df_he_filtered = process_timeseries(dt,
Bh_raw,
Be_raw,
c1='B_H',
c2='B_E',
despike_data=despike_data,
remove_mean=remove_mean)
df_filtered = df_filtered.join(df_he_filtered)
write_hdf(hdf_fname, df_filtered, key, header)
return hdf_fname
def fill_nans(df, delta=None):
"""
"""
if not delta:
dt_diff = np.diff(df.index.values)
delta_timedelta64 = min(dt_diff)
delta_seconds = delta_timedelta64 / np.timedelta64(1, 's')
delta = timedelta(seconds=delta_seconds)
logger.info('using delta = {} (s)'.format(delta.total_seconds()))
index_new = pd.date_range(start=df.index[0],
end=df.index[-1],
freq=delta)
missing = sorted(set(index_new) - set(df.index))
if missing:
logger.warning('Missing time indices (filled by NaNs):')
for x in missing:
logger.warning(x)
df = df.reindex(index_new, copy=False)
return df, delta
def nan_interpolate(df):
"""
Reference:
https://stackoverflow.com/questions/29007830/identifying-consecutive-nans-with-pandas
"""
sum_nan = df.isnull().sum()
df_null_int = df.isnull().astype(int)
for col in df.columns:
max_run = df[col].isnull().astype(int).groupby(df[col].notnull().astype(int).cumsum()).sum()
if sum_nan[col]:
# BELOW IS BROKEN!!!
pass
# logger.warning('column {} has {} NaNs ({} max consecutive run)'.format(col,
# sum_nan[col],
# max_run))
df.interpolate(inplace=True)
return df
def process_df(df,
delta=None,
despike_data=True,
subtract_median=True):
"""
"""
if despike_data:
logger.info('despike')
df = despike(df)
logger.info('Fill gaps')
df, delta = fill_nans(df, delta=delta)
logger.info('Gap interpolation')
df = nan_interpolate(df)
# apply 1 - 100 mHz bandpass filter
interval = delta.total_seconds()
if interval == 1.0:
h = second_interval_filter()
elif interval == 60.0:
h = minute_interval_filter()
else:
raise ValueError('1 to 100 mHz filter not yet synthesized for {} s interval data'.format(interval))
data = OrderedDict()
dt = df.index.to_pydatetime()
for i, col in enumerate(df.columns):
logger.info('Band-pass filter {}'.format(col))
if i == 0:
col_filtered, dt_filtered = lp_fir_filter(h, df[col].values, mode='valid', index=dt)
else:
col_filtered = lp_fir_filter(h, df[col].values, mode='valid')
data[col] = col_filtered
df_filtered = pd.DataFrame(index=dt_filtered,
data=data)
# remove median
if subtract_median:
logger.info('Subtract median')
df_filtered = df_filtered.sub(df.median(axis=1), axis=0)
return df_filtered
def process_new(hdf_fname,
source_key='B_raw',
key='B',
despike_data=True,
subtract_median=True):
"""
"""
logger.info('processing {}'.format(hdf_fname))
df_raw, header = read_hdf(hdf_fname, source_key)
df = df_raw[['B_X', 'B_Y']] * 1e-9
df_filtered = process_df(df,
despike_data=despike_data,
subtract_median=subtract_median)
write_hdf(hdf_fname, df_filtered, key, header)
return hdf_fname
def main(argv=None):
if argv is None:
argv = sys.argv
parser = ArgumentParser('Apply preprocessing steps to raw magnetometer data.',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('hdf_fnames',
type=str,
nargs='*',
metavar='hdf_fname',
help='HDF file record to process')
parser.add_argument('--source-key',
'-s',
type=str,
default='B_raw',
help='')
parser.add_argument('--key',
'-k',
type=str,
default='B',
help='key to associate with the processed records')
parser.add_argument('--he',
action='store_true',
help='include results in HE coordinate')
args = parser.parse_args(argv[1:])
for hdf_fname in args.hdf_fnames:
process(hdf_fname,
source_key=args.source_key,
key=args.key,
he=args.he)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main())
| butala/pyrsss | pyrsss/mag/process_hdf.py | process_hdf.py | py | 9,491 | python | en | code | 6 | github-code | 90 |
31398021308 | from django.urls import path, include
from . import views
urlpatterns = [
path('',
views.index, name='index'),
path('reports/',
views.report_index, name='report_index'),
path('reports/pptp/',
views.report_index_pptp, name='report_index_pptp'),
path('reports/pptp/<int:fromdate>_<int:untildate>/<str:user_ip>',
views.pptp_manual_user, name='pptp_manual_user'),
path('reports/top/week/',
views.report_top_week, name='report_top_week'),
path('reports/top/month/',
views.report_top_month, name='report_top_month'),
path('reports/manual/<int:fromdate>_<int:untildate>/',
views.report_manual, name='report_manual'),
path('reports/manual/<int:fromdate>_<int:untildate>/<str:user_ip>/',
views.report_manual_user, name='report_manual_user'),
] | wirrja/squidward | squidward/urls.py | urls.py | py | 849 | python | en | code | 0 | github-code | 90 |
73211777898 | #
# @lc app=leetcode id=84 lang=python
#
# [84] Largest Rectangle in Histogram
#
# @lc code=start
class Solution(object):
def largestRectangleArea(self, heights):
"""
:type heights: List[int]
:rtype: int
"""
# O(n) Stack Solution
# Stores (index, height)
stack = [(-1, -1)]
max_area = float("-inf")
for index, height in enumerate(heights):
# Pop on encountering a decreasing height
# And update max_area
while stack[-1] != (-1, -1) and height <= stack[-1][1]:
# Finding the area for this number
old_index, old_height = stack.pop()
# Right limit is new (index, height)
# Left limit is stack.peek()
max_area = max(max_area, (index - stack[-1][0] - 1) * old_height)
# Else simply append
stack.append((index, height))
# If in the end there are elements left
while stack[-1] != (-1, -1):
# Finding the area for this number
old_index, old_height = stack.pop()
# Right limit is len(heights)
# Left limit is stack.peek()
max_area = max(max_area, (len(heights) - stack[-1][0] - 1) * old_height)
return max_area
# O(n^2) Approach TLE:
# Keep track of max area and consider every consecutive pair once
# length = len(heights)
# max_area = 0
# for i in range(length):
# min_height = float("inf")
# for j in range(i, length):
# min_height = min(min_height, heights[j])
# current_area = min_height * (j-i+1)
# max_area = max(max_area, current_area)
# return max_area
# @lc code=end
| ashshekhar/leetcode-problems-solutions | 84.largest-rectangle-in-histogram.py | 84.largest-rectangle-in-histogram.py | py | 1,967 | python | en | code | 0 | github-code | 90 |
5345548844 | from logging import makeLogRecord
import pytest
from cryptologging.algorithms.hash import MD5HashEncryptor
from cryptologging.formatter import CryptoFormatter
# @pytest.mark.parametrize(
# ('value', 'value_hash', 'result'),
# [
# pytest.param(
# 'string',
# 'b45cffe084dd3d20d928bee85e7b0f21',
# 'string',
# id='string_value',
# ),
# pytest.param(
# 1,
# 'c4ca4238a0b923820dcc509a6f75849b',
# '1',
# id='int_value',
# ),
# pytest.param(
# 0.5,
# 'd310cb367d993fb6fb584b198a2fd72c',
# '0.5',
# id='float_value',
# ),
# pytest.param(
# b'hello',
# '5d41402abc4b2a76b9719d911017c592',
# '"b\'hello\'"',
# id='byte_value',
# ),
# pytest.param(
# None,
# '37a6259cc0c1dae299a7866489dff0bd',
# 'null',
# id='none_value',
# ),
# pytest.param(
# [],
# 'd751713988987e9331980363e24189ce',
# '[]',
# id='empty_list',
# ),
# pytest.param(
# ['hello', 1, 3.14, b'hello'],
# 'b79f61f48f9c9a7ec910e896275d334e',
# '["hello",1,3.14,"b\'hello\'"]',
# id='list_of_primitive_types_value',
# ),
# pytest.param(
# ('hello', 1, 3.14, b'hello'),
# 'b79f61f48f9c9a7ec910e896275d334e',
# '["hello",1,3.14,"b\'hello\'"]',
# id='tuple_of_primitive_types_value',
# ),
# pytest.param(
# (),
# 'd751713988987e9331980363e24189ce',
# '[]',
# id='empty_tuple',
# ),
# pytest.param(
# {},
# '99914b932bd37a50b983c5e7c90ae93b',
# '{}',
# id='empty_dict',
# ),
# pytest.param(
# {'key': 'value'},
# 'a7353f7cddce808de0032747a0b7be50',
# '{"key":"value"}',
# id='dict_value',
# ),
# ]
# )
# def test_formatter(value, value_hash, result):
# formatter = CryptoFormatter(encryptor=MD5HashEncryptor())
# assert formatter.format(makeLogRecord({'msg': value})) == result
#
# formatter = CryptoFormatter(encryptor=MD5HashEncryptor(), encrypt_full_record=True)
# assert formatter.format(makeLogRecord({'msg': value})) == value_hash
@pytest.mark.parametrize(
('value', 'secret_keys', 'result'),
[
pytest.param(
{'key': 'value'},
{'secret_key', },
'{"key":"value"}',
id='has_not_secret_key',
),
pytest.param(
{'secret_key': 'value'},
{'secret_key', },
'{"secret_key":"2063c1608d6e0baf80249c42e2be5804"}',
id='has_secret_key',
),
pytest.param(
{'secret_key': 'value', 'secret_key1': 'value'},
{'secret_key', 'secret_key1'},
'{"secret_key":"2063c1608d6e0baf80249c42e2be5804","secret_key1":"2063c1608d6e0baf80249c42e2be5804"}',
id='has_2_secret_keys',
),
pytest.param(
{'key': {'secret_key': 'value'}},
{'secret_key', },
'{"key":{"secret_key":"2063c1608d6e0baf80249c42e2be5804"}}',
id='secret_key_in_nested_dict',
),
pytest.param(
[{'key': {'secret_key': 'value'}}, {'key': 'value'}],
{'secret_key', },
'[{"key":{"secret_key":"2063c1608d6e0baf80249c42e2be5804"}},{"key":"value"}]',
id='list_of_dicts__one_of_them_has_secret_key',
),
pytest.param(
[{'key': [{'secret_key': 'value', 'key': 'value'}]}, {'key': 'value'}],
{'secret_key', },
'[{"key":[{"secret_key":"2063c1608d6e0baf80249c42e2be5804","key":"value"}]},{"key":"value"}]',
id='list_of_dicts__one_of_them_has_list_of_dicts_with_secret_key',
),
]
)
def test_encryption_on_secret_fields(value, secret_keys, result):
formatter = CryptoFormatter(encryptor=MD5HashEncryptor(), secret_keys=secret_keys)
assert formatter.format(makeLogRecord({'msg': value})) == result
| NotFunnyMan/cryptologging | tests/formatters/test_hash.py | test_hash.py | py | 4,313 | python | en | code | 0 | github-code | 90 |
8998297156 | # Problem #6: Sum square difference
# https://projecteuler.net/problem=6
#
# The sum of the squares of the first ten natural numbers is:
#
# (1^2 + 2^2 + ... + 10^2) = 385
#
# The square of the sum of the first ten natural numbers is:
#
# (1 + 2 + ... + 10)^2 = 55^2 = 3025
#
# Hence the difference between the sum of the squares of the first ten natural
# numbers and the square of the sum is: 3025 - 385 = 2640
#
# Find the difference between the sum of the squares of the first one hundred
# natural numbers and the square of the sum.
from functools import reduce
LIMIT = 100
def simple():
numbers = [n + 1 for n in range(LIMIT)]
sum_of_squares = reduce(lambda s, n: s+n, map(lambda n: n*n, numbers))
square_of_sums = pow(reduce(lambda s, n: s+n, numbers), 2)
return square_of_sums - sum_of_squares
def short():
numbers = range(1, LIMIT + 1)
sum_of_squares = sum(map(lambda n: n*n, numbers))
square_of_sums = pow(sum(numbers), 2)
return square_of_sums - sum_of_squares
def direct():
n = LIMIT
sum_n = n * (n+1) / 2
sum_of_squares = n * (n+1) * (2 * n + 1) / 6
square_of_sums = sum_n * sum_n
return square_of_sums - sum_of_squares
# square of sums: | sum of squares:
# n^2 + n n^2 + n |
# n*(n+1) n*(n+1) n^4 + 2n^3 + n^2 | (n^2 + n) * (2n + 1) 2n^3 + 3n^2 + n
# ------- x ------- = ---------------- | -------------------- = ---------------
# 2 2 4 | 6 6
#
# square of sums - sum of squares:
# 3n^4 + 6n^3 + 3n^2 4n^3 + 6n^2 + 2n 3n^4 + 6n^3 - 4n^3 + 3n^2 - 6n^2 - 2n
# ------------------ - ---------------- = -------------------------------------
# 12 12 12
#
# 3n^4 + 2n^3 - 3n^2 - 2n n(3n+2)(n+1)(n-1)
# = ----------------------- = -----------------
# 12 12
def optimal():
n = LIMIT
return n * (3*n+2) * (n+1) * (n-1) / 12
if __name__ == "__main__":
print("simple: " + str(simple()))
print("short: " + str(short()))
print("direct: " + str(direct()))
print("optimal: " + str(optimal()))
| ravyne/projecteuler | python/src/p0006.py | p0006.py | py | 2,167 | python | en | code | 0 | github-code | 90 |
18494997989 | N,x=input().split()
X=int(x)
n=int(N)
y=0
A = [int(i) for i in input().split()]
A.sort()
for z in range(n):
y+=A[z]
if y>=X:
if y==X:
print(z+1)
else:
print(z)
break
else:
print(n-1) | Aasthaengg/IBMdataset | Python_codes/p03254/s772039889.py | s772039889.py | py | 212 | python | en | code | 0 | github-code | 90 |
1438695218 |
import csv
dicts = {
'A':1,
'B':2,
'C':3,
'D':4,
'E':5,
'F':6,
'G':7,
'H':8,
'I':9,
'J':10,
'K':11,
'L':12,
'M':13,
'N':14,
'O':15,
'P':16,
'Q':17,
'R':18,
'S':19,
'T':20,
'U':21,
'V':22,
'W':23,
'X':24,
'Y':25,
'Z':26
}
file = open('words.txt','rt')
try:
reader = csv.reader(file)
for row in reader:
print (row)
finally:
file.close()
row.sort()
#print (row)
lengs=len(row)
#print(lengs)
point=0
arrays = [0 for i in range(lengs+1)]
for x in range(0,lengs):
strlengs=len(row[x])
#print(namelengs)
sums = 0
#print(row[x])
for y in range(0,strlengs):
slices=row[x]
letters=slices[y:y+1]
#print(letters)
#print(dicts[letters])
sums = sums + dicts[letters]
#print(x)
#print(sums)
#print(point)
arrays[x]=sums
maxa=max(arrays)
#print(maxa)
#print(arrays)
#print (row)
tns=[]
for n in range(1,1000):
tn=1/2*n*(n+1)
tns.append(tn)
#print(tn)
if tn > maxa:
break
count=0
for x in range(0,lengs):
if arrays[x] in tns:
#print(arrays[x])
#print(count)
count = count + 1
print(count)
#162
| okadaakihito/ProjectEuler | Problem_42.py | Problem_42.py | py | 1,343 | python | en | code | 0 | github-code | 90 |
5359553136 | import os
import sys
import glob
import json
import scipy.signal as signal
import numpy.ma as ma
import numpy as np
import matplotlib
import matplotlib.pylab as plt
import matplotlib.dates as mdates
import datetime
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
From http://scipy-cookbook.readthedocs.io/items/SavitzkyGolay.html
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
matplotlib.rcParams['font.size'] = 8
def process(f, i):
path = 'time_series_images/' + os.path.basename(f) + '.png'
if os.path.exists(path):
print('Exists, skipping ...')
return
j = json.loads(open(f).read())
p = j['features'][0]['properties']
# fr = p['water_area_filled_fraction']
t = p['water_area_time']
v1 = p['water_area_value']
v2 = p['water_area_filled']
t_jrc = p['water_area_time_jrc']
v_jrc = p['water_area_value_jrc']
filled_fr = list(zip(v1, v2))
filled_fr = [(o[1]-o[0])/o[1] for o in filled_fr]
mask = ma.masked_greater_equal(filled_fr, 0.5)
# t = list(ma.masked_array(t, mask).compressed())
# v1 = list(ma.masked_array(v1, mask).compressed())
# v2 = list(ma.masked_array(v2, mask).compressed())
if not len(t):
print('Empty, skipping ...')
return
years = mdates.YearLocator() # every year
v2_filtered = savitzky_golay(np.array(v2), window_size=15, order=4)
# v2_filtered = signal.medfilt(v2, 7)
# v2_filtered = lowess(v2, t)
# v2_filtered = lowess(v2, t, frac=1./50)
t = [datetime.datetime.fromtimestamp(tt / 1000) for tt in t]
t_jrc = [datetime.datetime.fromtimestamp(tt_jrc / 1000) for tt_jrc in t_jrc]
s_scale = 'Scale: {:.2f}'.format(p['scale']) + '$m$'
s_area = 'Area: {:.2f}'.format(p['area']/(1000*1000)) + '$km^2$, ' + '{:.2f}'.format(100 * p['area']/(1000*1000)) + '$ha$'
title = s_scale + ', ' + s_area
fig = plt.figure(figsize=(11, 4))
ax = fig.add_subplot(111)
ax.xaxis.set_major_locator(years)
# fig.autofmt_xdate()
ax.set_xlim([datetime.date(1985, 1, 1), datetime.date(2019, 1, 1)])
ax.grid(color='k', linestyle='-', linewidth=1, alpha=0.2)
plt.title(title)
plt.xticks(rotation=90)
ax.plot(t_jrc, v_jrc, marker='.', c='r', markersize=2, linewidth=0, alpha=0.05)
ax.plot(t, v1, marker='.', c='b', markersize=2, linewidth=0, alpha=0.05)
ax.plot(t, v2, marker='.', c='k', markersize=3, linewidth=0, alpha=0.8)
# for SG
if len(t) != len(v2_filtered):
print('Bad, shapes are not equal, skipping line plotting ...')
else:
ax.plot(t, v2_filtered, marker='.', c='k', markersize=0, linewidth=2, alpha=0.1)
# for LOWESS
# v2_filtered_t = [datetime.datetime.fromtimestamp(t / 1000) for t in v2_filtered[:, 0]]
# ax.plot(v2_filtered_t, v2_filtered[:, 1], marker='.', c='k', markersize=0, linewidth=2, alpha=0.1)
path = 'time_series_images/' + os.path.basename(f) + '.png'
print(str(i) + ' ' + path)
plt.tight_layout()
plt.savefig(path, dpi=150)
plt.close()
# ========================== JRC
# fig = plt.figure(figsize=(11, 4))
# ax = fig.add_subplot(111)
# ax.xaxis.set_major_locator(years)
# ax.set_xlim([datetime.date(1985, 1, 1), datetime.date(2019, 1, 1)])
# ax.grid(color='k', linestyle='-', linewidth=1, alpha=0.2)
# plt.title(title)
# plt.xticks(rotation=90)
# ax.plot(t_jrc, v_jrc, marker='.', c='r', markersize=2, linewidth=0, alpha=0.8)
# ax.plot(t, v1, marker='.', c='b', markersize=2, linewidth=0, alpha=0.05)
# ax.plot(t, v2, marker='.', c='k', markersize=3, linewidth=0, alpha=0.05)
# for SG
# if len(t) != len(v2_filtered):
# print('Bad, shapes are not equal, skipping line plotting ...')
# else:
# ax.plot(t, v2_filtered, marker='.', c='k', markersize=0, linewidth=2, alpha=0.1)
# path = 'time_series_images/' + os.path.basename(f) + '-jrc.png'
# print(str(i) + ' ' + path)
# plt.tight_layout()
# plt.savefig(path, dpi=150)
# plt.close()
offset = 0
for (i, f) in enumerate(glob.glob('time_series/*.geojson')[offset:]):
print('Processing ' + str(i) + ' ...')
process(f, i + offset)
| openearth/eo-reservoir | time_series_scripts/tasks_generate_thumbs.py | tasks_generate_thumbs.py | py | 7,073 | python | en | code | 0 | github-code | 90 |
14276157930 |
def heart_rate_calculation():
RestingHR = int (input('RestingHR:'))
Age = int (input('Age:'))
print("Intensity| Rate")
print("---------|------")
for i in range(55,100,5):
TargetHeartRate = ((220 - Age) - RestingHR) * i / 100 + RestingHR
print('{}% |{}bpm'.format(i,int(TargetHeartRate))) | Skkii1003/2019-SE1 | homework03/201-karvonenheartratecalculation/src/heart_rate_cal.py | heart_rate_cal.py | py | 341 | python | en | code | 0 | github-code | 90 |
72571934697 | import logging
from logging.handlers import SMTPHandler, RotatingFileHandler
import os
from flask import Flask, request, current_app
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_mail import Mail
from flask_bootstrap import Bootstrap
from config import Config
from flask_wtf.csrf import CSRFProtect
import sqlite3 as sql
import json
import datetime
db = SQLAlchemy()
migrate = Migrate()
login = LoginManager()
login.login_view = 'auth.login'
login.login_message = 'Please log in to access this page.'
mail = Mail()
bootstrap = Bootstrap()
csrf = CSRFProtect()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(config_class)
csrf.init_app(app)
app.jinja_env.add_extension('jinja2.ext.loopcontrols')
db.init_app(app)
migrate.init_app(app, db)
login.init_app(app)
mail.init_app(app)
bootstrap.init_app(app)
from app.errors import bp as errors_bp
app.register_blueprint(errors_bp)
from app.auth import bp as auth_bp
app.register_blueprint(auth_bp, url_prefix='/auth')
from app.main import bp as main_bp
app.register_blueprint(main_bp)
from app.main.utils import get_json_data
if not app.debug and not app.testing:
if app.config['MAIL_SERVER']:
auth = None
if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
auth = (app.config['MAIL_USERNAME'],
app.config['MAIL_PASSWORD'])
secure = None
if app.config['MAIL_USE_TLS']:
secure = ()
mail_handler = SMTPHandler(
mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
fromaddr='no-reply@' + app.config['MAIL_SERVER'],
toaddrs=app.config['ADMINS'], subject='Microblog Failure',
credentials=auth, secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/microblog.log',
maxBytes=10240, backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Protocols startup')
# Create protocols database if it does not exist
with sql.connect(app.config.get('PROTOCOLS_DB')) as con:
cur = con.cursor()
sql_query = "SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';".format(table_name='Protocols')
cur.execute(sql_query)
rows = cur.fetchall()
# if table does not exist or is empty, (create it and) populate it with init_data.json
if len(rows) == 0 or get_json_data(app) is None:
cur.execute('CREATE TABLE IF NOT EXISTS Protocols (version_id INTEGER PRIMARY KEY, user, timestamp, JSON_text TEXT)')
json_data_fn = os.path.join(app.config.get('ROOT_DIR'), 'data', 'init_data.json')
print(json_data_fn)
if not os.path.exists(json_data_fn):
print('Unable to populate db with initial json...')
else:
with open(json_data_fn, 'r') as f:
json_data = json.load(f)
json_str = json.dumps(json_data)
now = str(datetime.datetime.now())
user = 'Original Data'
cur.execute("INSERT INTO Protocols (user, timestamp, JSON_text) VALUES (?,?,?)",
(user, now, json_str,))
con.commit()
return app
from app import models | eileenjwang/protocols | container/app/__init__.py | __init__.py | py | 3,902 | python | en | code | 1 | github-code | 90 |
18183178569 | def trans(l):
return [list(x) for x in list(zip(*l))]
from itertools import product
import copy
h, w, k = map(int, input().split())
c = []
for _ in range(h):
c.append([c for c in input()])
A = [i for i in product([1,0], repeat=h)]
B = [i for i in product([1,0], repeat=w)]
ans = 0
for a in A:
temp1 = copy.copy(c)
for i, x in enumerate(a):
if x == 1:
temp1[i] = ["."] * w
for b in B:
temp2 = trans(temp1)
for i, x in enumerate(b):
if x == 1:
temp2[i] = ["."] * h
cnt = 0
for t in temp2:
cnt += t.count("#")
if cnt == k:
ans += 1
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02614/s418370444.py | s418370444.py | py | 686 | python | en | code | 0 | github-code | 90 |
30932963348 | from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..types import UNSET, Unset
T = TypeVar("T", bound="CPF")
@attr.s(auto_attribs=True)
class CPF:
"""
Attributes:
ni (Union[Unset, str]): Número de Inscrição do contribuinte Example: 99999999999.
nome (Union[Unset, str]): Nome do contribuinte Example: PESSOA FISICA DA SILVA.
situacao (Union[Unset, Any]):
nascimento (Union[Unset, str]): Data de nascimento do contribuinte Example: 31011800.
obito (Union[Unset, str]): Ano de óbito do contribuinte Example: 1800.
"""
ni: Union[Unset, str] = UNSET
nome: Union[Unset, str] = UNSET
situacao: Union[Unset, Any] = UNSET
nascimento: Union[Unset, str] = UNSET
obito: Union[Unset, str] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
ni = self.ni
nome = self.nome
situacao = self.situacao
nascimento = self.nascimento
obito = self.obito
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if ni is not UNSET:
field_dict["ni"] = ni
if nome is not UNSET:
field_dict["nome"] = nome
if situacao is not UNSET:
field_dict["situacao"] = situacao
if nascimento is not UNSET:
field_dict["nascimento"] = nascimento
if obito is not UNSET:
field_dict["obito"] = obito
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
ni = d.pop("ni", UNSET)
nome = d.pop("nome", UNSET)
situacao = d.pop("situacao", UNSET)
nascimento = d.pop("nascimento", UNSET)
obito = d.pop("obito", UNSET)
cpf = cls(
ni=ni,
nome=nome,
situacao=situacao,
nascimento=nascimento,
obito=obito,
)
cpf.additional_properties = d
return cpf
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| paulo-raca/python-serpro | serpro/consulta_cpf/models/cpf.py | cpf.py | py | 2,603 | python | pt | code | 0 | github-code | 90 |
74763835176 | import timeit
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.layers import BatchNormalization
from keras.models import Sequential
from keras.utils import plot_model
from sklearn import preprocessing
import oandapyV20
from oandapyV20 import API
import oandapyV20.endpoints.pricing as pricing
import oandapyV20.endpoints.instruments as instruments
import configparser
from matplotlib import pyplot as plt
from matplotlib.finance import candlestick_ohlc
import matplotlib.dates as mdates
from matplotlib import gridspec
from matplotlib.dates import DateFormatter
from stockstats import StockDataFrame
class Stock:
def __init__(self,balance=10000,price = 1.0,hold = 0):
self.balance = balance
self.price = price
self.hold = hold
self.total = balance + price * hold
def update(self,price=0.0):
self.price = price
self.total = self.balance + self.hold*price
def buy(self):
price = self.price
inc_hold = np.floor(self.balance/price)
self.hold += inc_hold
self.balance -= inc_hold*price
def sell(self):
price = self.price
hold = self.hold
self.balance += hold*price
self.hold = 0
def __str__(self):
return 'Trading:\ncode = %s\nbalance = %d\nprice = %f\nhold = %d\ntotal = %d'%(self.balance,self.price,self.hold,self.total)
start_time = timeit.default_timer()
def train_test_split(data,SEQ_LENGTH = 25,test_prop=0.3):
data = data.sort_index()
ntrain = int(len(data) *(1-test_prop))
predictors = data.columns[1:]
print(predictors)
data_pred = data[predictors] #/norms
num_attr = data_pred.shape[1]
result = np.empty((len(data) - SEQ_LENGTH - 1, SEQ_LENGTH, num_attr))
y = np.empty(len(data) - SEQ_LENGTH - 1)
yopen = np.empty(len(data) - SEQ_LENGTH - 1)
for index in range(len(data) - SEQ_LENGTH - 1):
result[index, :, :] = data_pred[index: index + SEQ_LENGTH]
y[index] = data.iloc[index + SEQ_LENGTH + 1].close
yopen[index] = data.iloc[index + SEQ_LENGTH + 1].Open
xtrain = result[:ntrain, :, :]
ytrain = y[:ntrain]
xtest = result[ntrain:, :, :]
ytest = y[ntrain:]
ytest_open = yopen[ntrain:]
return xtrain, xtest, ytrain, ytest, ytest_open
def train_model(xtrain,ytrain,SEQ_LENGTH=25,N_HIDDEN=256):
num_attr = xtrain.shape[2]
model = Sequential()
model.add(LSTM(N_HIDDEN, return_sequences=True, stateful=True, activation='tanh', batch_input_shape=(5, SEQ_LENGTH, num_attr)))
#model.add(BatchNormalization())
#model.add(LSTM(N_HIDDEN, return_sequences=True, stateful=True, activation='hard_sigmoid'))
model.add(LSTM(N_HIDDEN, return_sequences=False, stateful=True, activation='hard_sigmoid'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(1, activation='linear'))
model.compile(loss="mean_squared_error", optimizer='adam', metrics=['accuracy']) ## optimizer = 'rmsprop'
model.fit(xtrain, ytrain, batch_size=5, epochs=1, validation_split=0.1, verbose=1)
model.summary()
plot_model(model, to_file='model2.png', show_layer_names=True, show_shapes=True)
return model
def predict(model,xtest):
predicted = model.predict(xtest, batch_size=5)
return predicted
def policy(xtest,ytest,ytest_open,model):
ypred = model.predict(xtest)
xnow = xtest[0]
price = xnow[-1,2]
stock = Stock(price=price)
pred_price = ypred[0,0]
totals = [stock.total]
for i in range(1,len(xtest)):
price_open = ytest_open[i]
price_close = ytest[i]
stock.update(price=price_open)
pred_price_now = ypred[i,0]
if pred_price_now < pred_price:
stock.buy()
else:
stock.sell()
pred_price = pred_price_now
stock.update(price=price_close)
totals.append(stock.total)
plt.figure(figsize=(18,12))
plt.plot(totals)
plt.title('Wealth curve')
plt.show()
return totals
data = pd.read_csv('EURUSD_indicators4.csv')
data = data.set_index('time')
#scaler = preprocessing.StandardScaler()
#xdata = scaler.fit_transform(data)
df = pd.DataFrame(data)
print('Data shape:', data.shape)
#print('XData shape:', xdata.shape)
print(data.head())
xtrain, xtest, ytrain, ytest, ytest_open = train_test_split(data)
print('xtrain.shape',xtrain.shape)
print('xtest.shape', xtest.shape)
print('ytrain.shape', ytrain.shape)
print('ytest.shape', ytest.shape)
model = train_model(xtrain,ytrain)
predicted_tr = model.predict(xtrain)
plot_predicted_tr = pd.DataFrame(predicted_tr)
print(plot_predicted_tr.head())
plt.figure(figsize=(18,12))
plt.plot(ytrain, label='true values')
plt.plot(predicted_tr, label='predicted values')
plt.legend()
plt.title('train data')
plt.show()
predicted_test = model.predict(xtest)
plt.figure(figsize=(18,12))
plt.plot(ytest, label='true values')
plt.plot(predicted_test, label='predicted values')
plt.legend()
plt.title('test data')
plt.show()
elapsed = np.round(timeit.default_timer() - start_time, decimals = 2)
print('Completed in: ', elapsed)
wealth = policy(xtest, ytest, ytest_open, model) | lux-coder/lstm-on-forex | lstm.py | lstm.py | py | 5,321 | python | en | code | 1 | github-code | 90 |
12334938187 | import RPi.GPIO as GPIO
import time
import sys
GPIO.setmode(GPIO.BCM)
class Button:
but = [0]*8
but[0] = 21
but[1] = 20
but[2] = 16
but[3] = 12
but[4] = 7
but[5] = 24
but[6] = 23
but[7] = 18
for i in range(8):
GPIO.setup(but[i], GPIO.IN, pull_up_down=GPIO.PUD_UP)#Button to GPIO23
def getPressedId(self):
while True:
for i in range(8):
button_state = GPIO.input(self.but[i])
if(button_state == False):
return i
time.sleep(0.2)
| parvindar/E-Rickshaw | proStand/modules/button/Button.py | Button.py | py | 570 | python | en | code | 0 | github-code | 90 |
7711506066 | from django.test import TestCase
from .models import Product
# Create your tests here.
class TestProductViews(TestCase):
def test_all_products_view(self):
page = self.client.get('/products/')
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'allproducts.html')
def test_single_product_view(self):
product = Product.objects.create(title='Black Tea', category='Black Tea', description='Long description', short_description='Short description')
page = self.client.get('/products/{0}/'.format(product.id))
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'product.html')
self.assertContains(page, '<h1 class="text-center">Black Tea</h1>')
| Sarani1612/truebrew | products/test_views.py | test_views.py | py | 755 | python | en | code | 0 | github-code | 90 |
19048193714 | import math
import pygame
import sys
import random
import socket
import threading
from ball import Ball
from player import Player
class Game:
def __init__(self):
self.screen = pygame.display.set_mode((900,500))
pygame.display.set_caption("Ultimate Pong: 2P")
self.run = True
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.ip= input("server ip: ")
self.port= int(input("port: "))
self.player1_x, self.player1_y = 20, 250
self.player2_x, self.player2_y = 860, 250
self.player_size = [20, 80]
self.speed_y_1, self.speed_y_2 = 0, 0
self.player1 = Player(self.player1_x, self.player1_y, self.player_size)
self.player2 = Player(self.player2_x, self.player2_y, self.player_size)
self.ball_direction = [-1, 1]
self.ball = Ball(450, 250, 10, random.choice(self.ball_direction))
self.score_1, self.score_2 = 0, 0
self.ball_x, self.ball_y = None, None
self.player1_position = 250
self.recv_data = False
self.rect = pygame.Rect(0, 0, 900, 500)
def play(self):
self.client.connect((self.ip, self.port))
self.new_thread(self.get_data)
while self.run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
###
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
self.speed_y_2 = -10
if event.key == pygame.K_z:
self.speed_y_2 = 10
###
if event.type == pygame.KEYUP:
self.speed_y_2 = 0
###
self.player1.rect.clamp_ip(self.rect)
self.player2.move(self.speed_y_2)
self.player2.rect.clamp_ip(self.rect)
self.ball.rect.clamp_ip(self.rect)
if self.recv_data:
self.ball.rect.x = self.ball_x
self.ball.rect.y = self.ball_y
self.player1.rect.y = self.player1_position
self.player1.move(self.speed_y_1)
self.player2.move(self.speed_y_2)
position_y_player_2 = f"{ self.player2.rect.y }"
self.client.send(position_y_player_2.encode('utf-8'))
self.recv_data = True
self.screen.fill((50,50,50))
self.message('big', f"Ultimate Pong", [320 , 50, 20 ,20], (255, 255, 255))
self.message('big', f"{ self.score_1 }", [300 , 200, 50 ,50], (255, 255, 255))
self.message('big', f"{ self.score_2 }", [585 , 200, 50 ,50], (255, 255, 255))
self.player1.show(self.screen)
self.player2.show(self.screen)
self.ball.show(self.screen)
pygame.display.flip()
clock = pygame.time.Clock()
clock.tick(30)
def message(self, font, msg, msg_rect, color):
if font == 'small':
font = pygame.font.Font('fonts/GamePlayed-vYL7.ttf', 20)
if font == 'medium':
font = pygame.font.Font('fonts/GamePlayed-vYL7.ttf', 30)
if font == 'big':
font = pygame.font.Font('fonts/GamePlayed-vYL7.ttf', 40)
msg = font.render(msg, True, color)
self.screen.blit(msg, msg_rect)
def new_thread(self, target):
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
def get_data(self):
while True:
data_received = self.client.recv(128).decode('utf-8')
data_received = data_received.split(',')
# print(data_received)
self.player1_position = int(data_received[0])
self.ball_x = int(data_received[1])
self.ball_y = int(data_received[2])
self.score_1, self.score_2 = int(data_received[3]), int(data_received[4])
if __name__ == "__main__":
pygame.init()
g = Game()
g.play()
pygame.quit() | YaelDonat/pythpong3 | client.py | client.py | py | 4,048 | python | en | code | 0 | github-code | 90 |
833286354 | from os.path import dirname, join
import pytest
import mosaik_csv
DATA_FILE = join(dirname(__file__), 'data', 'test.csv')
def test_init_create():
sim = mosaik_csv.CSV()
meta = sim.init('sid', 1., sim_start='2014-01-01 00:00:00',
datafile=DATA_FILE)
assert meta['models'] == {
'ModelName': {
'public': True,
'params': [],
'attrs': ['P', 'Q'],
},
}
entities = sim.create(2, 'ModelName')
assert entities == [
{'eid': 'ModelName_%s' % i, 'type': 'ModelName', 'rel': []}
for i in range(2)
]
def test_init_create_errors():
sim = mosaik_csv.CSV()
# Profile file not found
pytest.raises(FileNotFoundError, sim.init, 'sid', 1.,
sim_start='2014-01-01 00:00:00', datafile='spam')
# Invalid model name
sim.modelname = 'foo'
pytest.raises(ValueError, sim.create, 1, 'bar')
@pytest.mark.parametrize('start_date', [
'2013-01-01 00:00:00',
'2015-01-01 00:00:00',
])
def test_start_date_out_of_range(start_date):
sim = mosaik_csv.CSV()
pytest.raises(ValueError, sim.init, 'sid', 1., sim_start=start_date,
datafile=DATA_FILE)
@pytest.mark.parametrize('time_resolution, next_step', [
(1., 60),
(2., 30),
(.5, 120),
])
def test_step_get_data(time_resolution, next_step):
sim = mosaik_csv.CSV()
sim.init('sid', time_resolution, sim_start='2014-01-01 00:00:00',
datafile=DATA_FILE)
sim.create(2, 'ModelName')
ret = sim.step(0, {}, 60)
assert ret == next_step
data = sim.get_data({'ModelName_0': ['P', 'Q'],
'ModelName_1': ['P', 'Q']})
assert data == {
'ModelName_0': {'P': 0, 'Q': 1},
'ModelName_1': {'P': 0, 'Q': 1},
}
sim.step(next_step, {}, 120)
data = sim.get_data({'ModelName_0': ['P', 'Q'],
'ModelName_1': ['P', 'Q']})
assert data == {
'ModelName_0': {'P': 1, 'Q': 2},
'ModelName_1': {'P': 1, 'Q': 2},
}
def test_step_with_offset():
sim = mosaik_csv.CSV()
sim.init('sid', 1., sim_start='2014-01-01 00:03:00', datafile=DATA_FILE)
sim.create(2, 'ModelName')
sim.step(0, {}, 60)
data = sim.get_data({'ModelName_0': ['P', 'Q'],
'ModelName_1': ['P', 'Q']})
assert data == {
'ModelName_0': {'P': 3, 'Q': 4},
'ModelName_1': {'P': 3, 'Q': 4},
}
pytest.raises(IndexError, sim.step, 60, {}, 120)
| RhysM95/SIT723-RESEARCH-PROJECT | mosaik-csv/tests/test_mosaik.py | test_mosaik.py | py | 2,605 | python | en | code | 0 | github-code | 90 |
38625319623 | #-------------------------------------------------------------------------------
# Name: NCSS_LabDatabase_Geoprocessing_Service.py
# Purpose:
#
# Author: Adolfo.Diaz
# e-mail: adolfo.diaz@usda.gov
# phone: 608.662.4422 ext. 216
#
# Author: Jerry.Monhaupt
# e-mail: jerry.monhaput@usda.gov
#
# Created: 9/16/2021
#-------------------------------------------------------------------------------
## ===================================================================================
def AddMsgAndPrint(msg, severity=0):
# prints message to screen if run as a python script
# Adds tool message to the geoprocessor
#
#Split the message on \n first, so that if it's multiple lines, a GPMessage will be added for each line
try:
print(msg)
try:
f = open(textFilePath,'a+')
f.write(msg + " \n")
f.close
del f
except:
pass
#for string in msg.split('\n'):
#Add a geoprocessing message (in case this is run as a tool)
if severity == 0:
arcpy.AddMessage(msg)
elif severity == 1:
arcpy.AddWarning(msg)
elif severity == 2:
arcpy.AddError("\n" + msg)
except:
pass
## ===================================================================================
def errorMsg():
try:
exc_type, exc_value, exc_traceback = sys.exc_info()
theMsg = "\t" + traceback.format_exception(exc_type, exc_value, exc_traceback)[1] + "\n\t" + traceback.format_exception(exc_type, exc_value, exc_traceback)[-1]
AddMsgAndPrint(theMsg,2)
except:
AddMsgAndPrint("Unhandled error in errorMsg method", 2)
pass
## ================================================================================================================
def splitThousands(someNumber):
""" will determine where to put a thousands seperator if one is needed.
Input is an integer. Integer with or without thousands seperator is returned."""
try:
return re.sub(r'(\d{3})(?=\d)', r'\1,', str(someNumber)[::-1])[::-1]
except:
errorMsg()
return someNumber
## ================================================================================================================
def tic():
""" Returns the current time """
return time.time()
## ================================================================================================================
def toc(_start_time):
""" Returns the total time by subtracting the start time - finish time"""
try:
t_sec = round(time.time() - _start_time)
(t_min, t_sec) = divmod(t_sec,60)
(t_hour,t_min) = divmod(t_min,60)
if t_hour:
return ('{} hour(s): {} minute(s): {} second(s)'.format(int(t_hour),int(t_min),int(t_sec)))
elif t_min:
return ('{} minute(s): {} second(s)'.format(int(t_min),int(t_sec)))
else:
return ('{} second(s)'.format(int(t_sec)))
except:
errorMsg()
# =========================================== Main Body ==========================================
# Import modules
import sys, re, os, traceback, arcpy, time, sqlite3
from arcpy import env
if __name__ == '__main__':
try:
startTime = tic()
DBpath = r'E:\Temp\10.203.23.72, 26022.sde'
#DBpath = r'E:\Pedons\NCSS_Characterization_Database\NewSchema\NCSS_Characterization_Database_newSchema_20200114.gdb'
outFolder = r'E:\Pedons\KSSL_for_NASIS_Morphological'
outName = r'KSSL_Test_2'
textFilePath = outFolder + os.sep + "KSSL_Geoprocessing_Service_logFile.txt"
env.workspace = DBpath
labDataTables = arcpy.ListTables("*.lab_*") #lab_webmap is not captured here
labDataTables.append('sdmONLINE.dbo.lab_webmap')
outFGDB = f"{outFolder}\{outName}.gdb"
outGPKG = f"{outFolder}\{outName}.gpkg"
outSQLite = f"{outFolder}\{outName}.sqlite"
## # Create File Geodatabase
## if not arcpy.Exists(outFGDB):
## arcpy.CreateFileGDB_management(outFolder,outName)
##
## # Create Geopackage 1.3
## if not arcpy.Exists(outGPKG):
## arcpy.CreateSQLiteDatabase_management(outGPKG,'GEOPACKAGE_1.2')
##
## # Create SQLite with SpatiaLite geometry type
## if not arcpy.Exists(outSQLite):
## arcpy.CreateSQLiteDatabase_management(outSQLite,'SpatiaLite')
##
## AddMsgAndPrint(f"There are {len(labDataTables)} Lab data tables to import from the sdmONLINE database")
## AddMsgAndPrint("Importing Tables")
## recordDict = dict()
##
## for labTable in labDataTables:
##
## outTableName = labTable.lstrip('sdmONLINE.dbo') # left strip 'sdmONLINE.dbo' (name violation)
## fgdbTablePath = os.path.join(outFGDB,outTableName) # absolute path of new FGDB table
## gpkgTablePath = os.path.join(outGPKG,outTableName) # absolute path of new Geopackate table
## sqlLTablePath = os.path.join(outSQLite,outTableName) # absolute path of new Geopackate table
##
## # convert the combine_nasis_ncss into a point feature layer
## if labTable.find('combine_nasis') > -1:
##
## # combine_nasis_ncss -> XY Event layer -> feature class
## spatialRef = arcpy.SpatialReference(4326)
## combineNasisTemp = "in_memory\combineNASIS_NCSS_Temp"
## arcpy.management.MakeXYEventLayer(labTable, "longitude_decimal_degrees", "latitude_decimal_degrees", combineNasisTemp, spatialRef)
## arcpy.management.CopyFeatures(combineNasisTemp, fgdbTablePath)
## arcpy.management.CopyFeatures(combineNasisTemp, gpkgTablePath)
## arcpy.management.CopyFeatures(combineNasisTemp, sqlLTablePath)
## arcpy.Delete_management(combineNasisTemp)
##
## # labTable is a regular table
## else:
## # copy labTable from sdmONLINE to FGDB
## arcpy.CopyRows_management(labTable,fgdbTablePath)
##
## # copy rows from FGDB table to Geopackage
## arcpy.CopyRows_management(fgdbTablePath,gpkgTablePath)
##
## # copy rows from FGDB table to SQLite DB
## arcpy.CopyRows_management(fgdbTablePath,sqlLTablePath)
##
## recFGDBcount = arcpy.GetCount_management(fgdbTablePath)[0]
## recPPKGcount = arcpy.GetCount_management(gpkgTablePath)[0]
## #recSQLLcount = arcpy.GetCount_management(sqlLTablePath)[0]
##
## theTabLength = (60 - len(outTableName)) * " "
## AddMsgAndPrint("\t--> " + outTableName + theTabLength + " Records Added: " + splitThousands(recFGDBcount))
## recordDict[outTableName] = recFGDBcount
##
## # relationship Dictionary schema
## # Relationship Name: [origin table, destination table, primary key, foreign key]
## relateDict = {
## "xLabCombineNasisNcss_LabPedon": ["lab_combine_nasis_ncss","lab_pedon","pedon_key","pedon_key","UNIQUE"],
## "xLabCalculationsIncludingEstimates_LabPreparation": ["lab_calculations_including_estimates_and_default_values","lab_preparation","prep_code","prep_code","NON_UNIQUE"],
## "xLabChemicalProperties_LabPreparation": ["lab_chemical_properties","lab_preparation","prep_code","prep_code","NON_UNIQUE"],
## "xLabMethodCode_LabAnalyte": ["lab_method_code","lab_analyte","procedure_key","analyte_key","UNIQUE"],
## "xLabMethodCode_LabAnalysisProcedure": ["lab_method_code","lab_analysis_procedure","procedure_key","procedure_key","UNIQUE"],
## "xLabLayer_LabCalculationsIncludingEstimates": ["lab_layer","lab_calculations_including_estimates_and_default_values","labsampnum","labsampnum","UNIQUE"],
## "xLabLayer_LabChemicalProperties": ["lab_layer","lab_chemical_properties","labsampnum","labsampnum","UNIQUE"],
## "xLabLayer_LabMajorAndTraceElementsAndOxides": ["lab_layer","lab_major_and_trace_elements_and_oxides","labsampnum","labsampnum","UNIQUE"],
## "xLabLayer_LabMineralogyGlassCount": ["lab_layer","lab_mineralogy_glass_count","labsampnum","labsampnum","UNIQUE"],
## "xLabLayer_LabPhysicalProperties": ["lab_layer","lab_physical_properties","labsampnum","labsampnum","UNIQUE"],
## "xLabLayer_LabXrayAndThermal": ["lab_layer","lab_xray_and_thermal","labsampnum","labsampnum","UNIQUE"],
## "xLabLayer_LabRosettaKey": ["lab_layer","lab_rosetta_key","layer_key","layer_key","UNIQUE"],
## "xLabMajorAndTraceElementsAndOxides_LabPreperation": ["lab_major_and_trace_elements_and_oxides","lab_preparation","prep_code","prep_code","NON_UNIQUE"],
## "xLabPedon_LabWebMap": ["lab_pedon","lab_webmap","pedon_key","pedon_key","UNIQUE"],
## "xLabPedon_LabSite": ["lab_pedon","lab_site","site_key","site_key","UNIQUE"],
## "xLabPhysicalProperties_LabPreparation": ["lab_physical_properties","lab_preparation","prep_code","prep_code","NON_UNIQUE"],
## "xLabPreparation_LabMineralogyGlassCount": ["lab_preparation","lab_mineralogy_glass_count","prep_code","prep_code","NON_UNIQUE"],
## "xLabPreparation_LabXrayAndThermal": ["lab_preparation","lab_xray_and_thermal","prep_code","prep_code","NON_UNIQUE"]
## }
##
## # -------------------------------------------------------- Add attribute index to primary and foreign keys in FGDB
## env.workspace = outFGDB
## AddMsgAndPrint(f"\nCreating Attribute Indices:")
## for relate in relateDict:
##
## # Attribute Index Parameter
## origTable = relateDict[relate][0]
## destTable = relateDict[relate][1]
## pKey = relateDict[relate][2]
## fKey = relateDict[relate][3]
## uniqueParam = relateDict[relate][4]
##
## # Look for indexes present for primary and foreign keys
## origTblIndex = [f.name for f in arcpy.ListIndexes(origTable)]
## destTblIndex = [f.name for f in arcpy.ListIndexes(destTable)]
##
## # Add FGDB Index for primary key if not present
## keyIndex = f"IDX_{origTable}_{pKey}"
## if not keyIndex in origTblIndex:
## arcpy.AddIndex_management(origTable,pKey,keyIndex,uniqueParam,"NON_ASCENDING")
## AddMsgAndPrint(f"\tFGDB - {origTable} - {pKey}")
##
## # Add FGDB Index for foregin key if not present
## keyIndex = f"IDX_{destTable}_{fKey}"
## if not keyIndex in destTblIndex:
## arcpy.AddIndex_management(destTable,fKey,keyIndex,uniqueParam,"NON_ASCENDING")
## AddMsgAndPrint(f"\tFGDB - {destTable} - {fKey}")
##
## AddMsgAndPrint("\n")
## # -------------------------------------------------------- Add attribute index to primary and foreign keys in GPKG and SQLITE
## for DBproduct in (outGPKG,outSQLite):
## sqliteConnection = sqlite3.connect(DBproduct)
## sqliteCursor = sqliteConnection.cursor()
## existingIndices = []
##
## for relate in relateDict:
##
## # Attribute Index Parameter
## origTable = relateDict[relate][0]
## destTable = relateDict[relate][1]
## pKey = relateDict[relate][2]
## fKey = relateDict[relate][3]
## uniqueParam = relateDict[relate][4]
##
## #createIndex = (f"CREATE{' UNIQUE' if uniqueParam == 'UNIQUE' else ''} INDEX IF NOT EXISTS {keyIndex} ON {tbl} ({key})")
## for tbl in (origTable,destTable):
##
## key = pKey if tbl == origTable else fKey
## keyIndex = f"IDX_{tbl}_{pKey}"
##
## if not keyIndex in existingIndices:
## createIndex = (f"CREATE INDEX IF NOT EXISTS {keyIndex} ON {tbl} ({key})")
## sqliteCursor.execute(createIndex)
## existingIndices.append(keyIndex)
## AddMsgAndPrint(f"\t{'SQLITE' if DBproduct.endswith('.sqlite') else 'GPKG'} - {tbl} - {key}")
##
## sqliteConnection.close()
## AddMsgAndPrint("\n")
##
## # -------------------------------------------------------- Add relationships to FGDB
## for relate in relateDict:
##
## # Relationship Parameters
## relateName = relate
## origTable = relateDict[relate][0]
## destTable = relateDict[relate][1]
## pKey = relateDict[relate][2]
## fKey = relateDict[relate][3]
## forwardLabel = f"> {destTable}"
## backwardLabel = f"< {origTable}"
##
## try:
## arcpy.CreateRelationshipClass_management(origTable,destTable,relateName,"SIMPLE",forwardLabel,backwardLabel,"NONE","ONE_TO_MANY","NONE",pKey,fKey)
## AddMsgAndPrint(f"Created Relationship Class between {origTable} - {destTable}")
## except:
## errorMsg()
# Adjust Column Names
env.workspace = outFGDB
FGDBdataTables = arcpy.ListTables("lab_*")
FGDBdataTables.append(arcpy.ListFeatureClasses("lab_*")[0])
sqliteConnection = sqlite3.connect(outSQLite)
sqliteCursor = sqliteConnection.cursor()
gpkgConnection = sqlite3.connect(outGPKG)
gpkgCursor = gpkgConnection.cursor()
for tbl in FGDBdataTables:
# FGDB Table Fields
gdbFlds = [f.name for f in arcpy.ListFields(f"{outFGDB}\{tbl}")]
# SQLITE Table Fields
sqliteCursor = sqliteConnection.execute(f"select * from {tbl}")
sqlFlds = [description[0] for description in sqliteCursor.description]
# GPKG Table Fields
gpkgCursor = gpkgConnection.execute(f"select * from {tbl}")
gpkgFlds = [description[0] for description in gpkgCursor.description]
for fldName in gdbFlds:
sqliteFld = sqlFlds[gdbFlds.index(fldName)]
try:
if not fldName == sqliteFld:
sqliteCursor.execute(f"ALTER TABLE {tbl} RENAME COLUMN {sqliteFld} TO {fldName}")
AddMsgAndPrint(f"Renamed SQLite - {tbl} - {sqliteFld}")
gpkgFld = gpkgFlds[gdbFlds.index(fldName)]
if not fldName == gpkgFld:
gpkgCursor.execute(f"ALTER TABLE {tbl} RENAME COLUMN {gpkgFld} TO {fldName}")
AddMsgAndPrint(f"Renamed GPKG - {tbl} - {gpkgFld}")
except:
pass
sqliteConnection.close()
gpkgConnection.close()
stopTime = toc(startTime)
AddMsgAndPrint(f"Total Processing Time: {stopTime}")
except:
errorMsg()
| ncss-tech/NCSS-Pedons | NCSS_LabDatabase_Geoprocessing_Service.py | NCSS_LabDatabase_Geoprocessing_Service.py | py | 15,261 | python | en | code | 2 | github-code | 90 |
12397596430 | import os
import utilities.api_clients.api_call as ApiCallUtil
class GoalifyClient:
apiUrl = "https://g2.goalifyapp.com/api/1.0.1"
headers = {}
apiClient = None
def __init__(self):
self.headers['Authorization'] = "Bearer " + os.getenv("GOALIFY_ACCESS_TOKEN")
self.apiClient = ApiCallUtil.ApiCallHelper(self.apiUrl, self.headers)
def getGoalProgress(self, goalId):
uri = "/goals/" + goalId
queryString = {
'kpi': 'perf_d_1'
}
response = self.apiClient.sendGet(uri, queryString)
responseData = {} if not ApiCallUtil.isJson(response.content) else response.json()
if ('result' not in responseData) \
or ('goal' not in responseData['result']) \
or ('kpi' not in responseData['result']['goal']) \
or ('perf_d_1' not in responseData['result']['goal']['kpi']):
raise Exception("invalid response structure: {:s}".format(str(response.content)))
return responseData['result']['goal']['kpi']['perf_d_1']
GoalifyClientInstance = None
def getGoalifyClient():
global GoalifyClientInstance
if GoalifyClientInstance is None:
GoalifyClientInstance = GoalifyClient()
return GoalifyClientInstance
| YansenChristian/automation | utilities/api_clients/goalify_client.py | goalify_client.py | py | 1,269 | python | en | code | 0 | github-code | 90 |
33419593962 | import torch
BATCH_SIZE = 4 # 一个batch的样本数,需根据GPU内存大小更改此值
RESIZE_TO = 512 # 缩放训练图片到此大小
NUM_EPOCHS = 100 # 训练多少epochs
DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
#DEVICE = torch.device('cpu')
# training data directory
TRAIN_DIR = '../Microcontroller Detection/train'
# validation data directory
VALID_DIR = '../Microcontroller Detection/test'
# 探测物体类标签
CLASSES = ['background', 'Arduino_Nano', 'ESP8266', 'Raspberry_Pi_3', 'Heltec_ESP32_Lora']
NUM_CLASSES = 5
# 是否看一看变化后的图像
VISUALIZE_TRANSFORMED_IMAGES = True
#输出路径
OUT_DIR = '../outputs'
SAVE_PLOTS_EPOCH = 2 # 这么多epoch后存储loss plots
SAVE_MODEL_EPOCH = 2 # 这么多epoch后存储model
| yolyyin/fasterrcnn_study | fine_tune_sample/src/config.py | config.py | py | 803 | python | en | code | 0 | github-code | 90 |
30494700545 | VERBOSE_LOGGING = False
import random
import os
def logv(string):
if VERBOSE_LOGGING:
print(string)
def log(string):
print(string)
def fileadd(path,string):
try:
file = open(path,"a")
file.write(string)
x = True
except:
x = False
finally:
file.close()
return x
def fileoverwrite(path,lines):
try:
file = open(path,"w")
for l in lines:
file.write(l)
x = True
except:
x = False
finally:
file.close()
return x
def fileread(path):
try:
file = open(path,"r")
lines = file.readlines()
except:
lines = None
finally:
file.close()
return lines
def db_add(id,title,size):
st = id + "\t" + title + "\t" + str(size) + "\n"
fileadd("todo",st)
def db_remove(id):
lines = fileread("todo")
linestoretain = []
for l in lines:
if not l.startswith(id):
linestoretain.append(l)
fileoverwrite("todo",linestoretain)
def db_random():
lines = fileread("todo")
if (len(lines) == 0):
return ""
ids = []
for l in lines:
id = l.split("\t")[0]
if not fileDone(id):
ids.append(id)
if (len(ids) == 0):
return ""
nextup = random.choice(ids)
log("Next ID to download: " + nextup)
return nextup
def db_list():
loadedfilesraw = os.listdir(path="videos/")
# loadedfiles = []
# for f in loadedfilesraw:
# id = f.split(".")[0]
# size = 0
# if (f.endswith(".part")):
# done = False
# size = os.path.getsize("download/videos/" + f)
# elif (f.endswith(".mp4")):
# done = True
#
# log("Found file: Done: " + str(done) + ", ID: " + id + ", size: " + str(size))
#
# if (id in l['id'] for l in loadedfiles):
# l['size'] += size
#
# else:
# loadedfiles.append({})
#
lines = fileread("todo")
list = []
for l in lines:
data = l.split("\t")
id = data[0]
title = data[1]
size = int(data[2])
currentsize = 0
loaded = 0
done = False
for f in loadedfilesraw:
logv("Video " + id + " checking file " + f)
if (f.split(".")[0] == id and f.endswith(".mp4")):
loaded = 100
done = True
break
elif (f.split(".")[0] == id):
currentsize += os.path.getsize("videos/" + f)
if not done:
loaded = int(currentsize * 100 / size)
if (loaded > 99):
loaded = 99
list.append({'id':id,'title':title,'size':size,'loaded':loaded})
return list
def fileDone(id):
loadedfilesraw = os.listdir(path="videos/")
for lf in loadedfilesraw:
if (lf.endswith(".mp4") and lf.split(".")[0] == id) and not "temp" in lf.split("."):
return True
return False
def createSettingsFile():
if not os.path.isfile("settings.ini"):
log("Settings file not found, creating!")
open('settings.ini',"w+").close()
def createVideoFile():
if not os.path.isfile("todo"):
log("Video file not found, creating!")
open('todo',"w+").close()
| swipswaps/surselva | serverutil.py | serverutil.py | py | 2,749 | python | en | code | null | github-code | 90 |
70379116458 | class constants:
"""Class of constants for each component of detector
"""
class bgsub:
"""Background subtraction/segmentation
mod [str] the segmentation model (MOG2, KNN, GMG)
"""
mod = 'MOG2'
class HSV:
"""HSV inRange filtering
maximum values and initial values
"""
max_value = 255
max_value_H = 360//2
low_H = 40
low_S = 30
low_V = 30
high_H = 75
high_S = 255
high_V = 255
low_H_name = 'Low H'
low_S_name = 'Low S'
low_V_name = 'Low V'
high_H_name = 'High H'
high_S_name = 'High S'
high_V_name = 'High V'
class window:
"""Window control
names of windows
"""
window1 = 'Altered'
window2 = 'Original'
class asth:
"""Aesthetics
font [enum int] font used for description
text [bool] should text be imprinted on image?
"""
font = 0
text = False
class cntr:
"""Controls for program
next_k - next image
prev_k - prev image
save - save single image (in mode)
save_all - save all images (in mode)
exit_k - exit the program
dice - calculate dice value
dice_more - show all dice values based on dataset
m1_k etc. - mode selection
modes [dict] dictionary with mode names
"""
next_k = ord('m')
prev_k = ord('n')
save = ord('s')
save_all = ord('z')
exit_k = 27
dice = ord('d')
dice_more = ord('f')
m1_k = ord('1')
m2_k = ord('2')
m3_k = ord('3')
m4_k = ord('4')
m5_k = ord('5')
modes = {
0: 'original',
1: 'hsv_filter',
2: 'ws_mask',
3: 'ws_mask_bg',
4: 'fgbg_segm',
5: 'ws_fgbg_segm'
}
class xtra:
"""Ends and odds
disco [bool] random colors for masks on each loop?
show_save_all [bool] run saving all in foreground?
"""
disco = False
show_save_all = True
| julzerinos/python-opencv-leaf-detection | constants.py | constants.py | py | 2,173 | python | en | code | 19 | github-code | 90 |
8965775127 |
# -*- coding: utf-8 -*
import threading
class Account:
def __init__(self):
self.balance = 0
def add(self, lock):
# 获得锁
print("获得锁add")
lock.acquire()
for i in range(0, 100000):
self.balance += 1
# 释放锁
print("add balance %s" % self.balance)
print("获得锁add")
lock.release()
def delete(self, lock):
# 获得锁
print("获得锁delete")
lock.acquire()
for i in range(0, 100000):
self.balance -= 1
# 释放锁
print("delete balance %s" % self.balance)
print("释放锁delete")
lock.release()
if __name__ == "__main__":
account = Account()
lock = threading.Lock()
# 创建线程
print("创建线程")
thread_add = threading.Thread(target=account.add, args=(lock,), name='Add')
thread_delete = threading.Thread(target=account.delete, args=(lock,), name='Delete')
# 启动线程
print("启动线程")
thread_add.start()
thread_delete.start()
# 等待线程结束
print("等待子线程结束")
thread_add.join()
thread_delete.join()
print('The final balance is: {}'.format(account.balance)) | hug123456/python_train | Train/test_进程_线程_协程/线程01.py | 线程01.py | py | 1,252 | python | en | code | 0 | github-code | 90 |
8197769295 | from werkzeug.datastructures import FileStorage
from flask_restplus.reqparse import RequestParser
def setup_parser(parser: RequestParser) -> RequestParser:
"""
Setup request arguments parser.
:param parser: app arguments parser
:return: customized parser
"""
parser.add_argument('file', location='files', type=FileStorage,
required=True, dest='file',
help='File to upload into the database.')
parser.add_argument('--col', action='split', dest='col_names',
help='List of new column names in correct order '
'as a comma-separated string. The number '
'of names must match the number of columns '
'in the existing file.')
parser.add_argument('--head', type=int, dest='header',
help='Row number to use as the column names (header).')
parser.add_argument('--index', action='split', dest='index',
help='List of column names to set index on it '
'(as a comma-separated string).')
parser.add_argument('--type', type=eval, dest='type',
help='Set data type to the column(s). Argument is '
'a dictionary {\'column name\': \'type\'}. '
'Available types: int, float, str, datetime.')
return parser
| viconstel/hse_test_task | bin/parser.py | parser.py | py | 1,455 | python | en | code | 0 | github-code | 90 |
37930780453 |
'''
utility for parsing fund_code.xml
Get FundClear Fund code and Fund name
'''
from lxml import etree
import logging, pickle
from fundclear.models import dFundCodeModel
FUND_CODE_FILE = 'fundclear/fund_code.xml'
def get_name_with_fundcode_list(p_code,p_fundcode_list=None):
t_fundcode_list = p_fundcode_list
if t_fundcode_list is None:
t_fundcode_list = get_fundcode_list()
t_code_list = [row[1] for row in t_fundcode_list]
if p_code in t_code_list:
return t_fundcode_list[t_code_list.index(p_code)][2]
else:
return ''
def get_fundcode_dictlist():
t_fundcode_list = get_fundcode_list()
t_fundcode_dictlist = []
for t_fundcode in t_fundcode_list:
t_fundcode_dictlist.append({
'index': t_fundcode[0],
'code': t_fundcode[1],
'name': t_fundcode[2],
})
return t_fundcode_dictlist
def get_fundcode_list():
t_keyname = FUND_CODE_FILE
t_model = dFundCodeModel.get_or_insert(t_keyname)
return pickle.loads(t_model.content)
def save_fundcode_config():
t_fundinfo_list = get_fund_info_list()
t_keyname = FUND_CODE_FILE
t_fundcode = dFundCodeModel.get_or_insert(t_keyname)
t_fundcode.content = pickle.dumps(t_fundinfo_list)
t_fundcode.put()
return 'save_fundcode_config done'
class FundInfo():
code = ''
name = ''
index = ''
def __init__(self, code, name, index):
self.code = code
self.name = name
self.index = index
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return '[' + self.code + ',' + unicode(self.name) + ',' + self.index + ']'
def get_fund_info_list():
'''
return dict for {code : FundInfo} object
'''
t_root = etree.parse(FUND_CODE_FILE)
t_qdata = t_root.find('qData')
if t_qdata == None:
logging.warning(__name__ + ', get_fund_info_list: Can not find qData element')
return None
t_fund_info_list = []
logging.debug(__name__ + ', get_fund_info_list: check total row ' + str(len(t_qdata)))
for t_row in t_qdata[:]:
t_code = t_name = None
t_attrib = t_row.attrib
if 'index' in t_attrib.keys():
t_index = int(t_row.attrib['index'])
else:
logging.debug(__name__ + ', get_fund_info_list: no index attribute')
for t_element in t_row:
#logging.debug('check tag ' + t_element.tag)
if t_element.tag == 'fundCode':
#logging.debug('find fundCode')
t_code = t_element
if t_element.tag == 'fundName':
#logging.debug('find fundName: ' + t_element.text)
t_name = t_element
if t_code is not None and t_name is not None:
t_fund_code = t_code.text
t_fund_name = t_name.text
t_fund_info_list.append([t_index,t_fund_code,t_fund_name])
logging.debug(__name__ + ', get_fund_info_list: add entry for code ' + t_fund_code + ' index ' + str(t_index))
else:
logging.warning(__name__ + ', get_fund_info_list: Can not find fundCode or fundName for element content:\n' + etree.tostring(t_row))
t_fund_info_list.sort(key=lambda x: x[0])
logging.debug(__name__ + ', get_fund_info_list result total ' + str(len(t_fund_info_list)))
return t_fund_info_list
def get_fund_code_name():
'''
return dict for {code, name}
'''
t_root = etree.parse(FUND_CODE_FILE)
t_qdata = t_root.find('qData')
if t_qdata == None:
logging.warning(__name__ + ', get_fund_code_name: Can not find qData element')
return None
t_fund_code_name = {}
logging.debug(__name__ + ', get_fund_code_name: check total row ' + str(len(t_qdata)))
for t_row in t_qdata[:]:
t_code = t_name = None
for t_element in t_row:
#logging.debug('check tag ' + t_element.tag)
if t_element.tag == 'fundCode':
#logging.debug('find fundCode')
t_code = t_element
if t_element.tag == 'fundName':
#logging.debug('find fundName: ' + t_element.text)
t_name = t_element
if t_name is None:
logging.debug('t_name is None')
if t_code is not None and t_name is not None:
t_fund_code = t_code.text
t_fund_name = t_name.text
t_fund_code_name[t_fund_code] = t_fund_name
logging.debug(__name__ + ', get_fund_code_name: add entry (' + t_fund_code + ',' + unicode(t_fund_name).encode('utf8') + ')')
else:
logging.warning(__name__ + ', get_fund_code_name: Can not find fundCode or fundName for element content:\n' + etree.tostring(t_row))
logging.debug(__name__ + ', get_fund_code_name result total ' + str(len(t_fund_code_name)))
return t_fund_code_name
| slee124565/philcop | fundclear/fcreader.py | fcreader.py | py | 5,175 | python | en | code | 1 | github-code | 90 |
18483312629 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
from copy import deepcopy
def main():
n = int(input())
la = sorted([int(input()) for _ in range(n)])
lo = deepcopy(la)
ind = 0
def f(la, ind):
lx = []
while len(la) > 0:
lx.append(la.pop(ind))
ind = -1 if ind == 0 else 0
ls = ([abs(lx[i] - lx[i-1]) for i in range(len(lx))])
return sum(ls)-min(ls)
print(max(f(deepcopy(la), 0), f(deepcopy(la), -1)))
if __name__=='__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03229/s271242172.py | s271242172.py | py | 478 | python | en | code | 0 | github-code | 90 |
19143107755 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2020/1/16 11:49
@Author : duanpy001
@File : 6.py
@Link : https://leetcode-cn.com/problems/zigzag-conversion/
"""
class Solution:
def convert(self, s: str, numRows: int) -> str:
if numRows == 1:
return s
rows = [''] * min(numRows, len(s))
cur_row = 0
going_down = False
for c in s:
rows[cur_row] += c
if cur_row == 0 or cur_row == numRows - 1:
going_down = not going_down
cur_row += 1 if going_down else -1
res = ''.join(rows)
return res
def convert_2(self, s: str, numRows: int) -> str:
if numRows == 1:
return s
cycle_len = 2 * numRows - 2
res = ''
for i in range(numRows):
for j in range(0, len(s) - i, cycle_len):
# 相当于s[k * (2 * numRows-2) + i],第一行最后一个字符索引最大值为len(s)-1-numRows
res += s[i + j]
# 对于中间行,
if i != 0 and i != numRows - 1 and j + cycle_len - i < len(s):
# 此时的j = k * cycle_len = k * (2 * numRows-2)
# s[j + cycle_len - i] 相当于s[(k + 1) * (2 * numRows-2) - i]
res += s[j + cycle_len - i]
return res
def convert_3(self, s: str, numRows: int) -> str:
if numRows == 1:
return s
cycle_len = 2 * numRows - 2
res = ''
for i in range(numRows):
for j in range(0, len(s) - i, cycle_len):
# 相当于s[k * (2 * numRows-2) + i],第一行最后一个字符索引最大值为len(s)-1-numRows
res += s[i + j]
# 对于中间行,
if i != 0 and i != numRows - 1 and j + cycle_len - i < len(s):
# 此时的j = k * cycle_len = k * (2 * numRows-2)
# s[j + cycle_len - i] 相当于s[(k + 1) * (2 * numRows-2) - i]
res += s[j + cycle_len - i]
return res
if __name__ == '__main__':
s = Solution()
test_data = [('LEETCODEISHIRING', 3), ('LEETCODEISHIRING', 4), ("PAYPALISHIRING", 4)]
test_res = ['LCIRETOESIIGEDHN', 'LDREOEIIECIHNTSG', 'PINALSIGYAHRPI']
for i in test_data:
print(s.convert_2(i[0], i[1]))
| sevenzero/daily-study | LeetCode/6.py | 6.py | py | 2,431 | python | en | code | 0 | github-code | 90 |
70904593257 | import math
from collections import deque
progresses = [95, 95, 95, 95]
speeds = [4, 3, 2, 1]
def solution(progresses, speeds):
queue = deque([math.ceil((100 - progress) / speed) for speed, progress in zip(speeds, progresses)])
result = [1]
now_data = queue.popleft()
while queue:
if now_data >= queue[0]:
result[-1] += 1
queue.popleft()
else:
now_data = queue.popleft()
result.append(1)
return result
print(solution(progresses, speeds)) | dohun31/algorithm | 2021/week_02/210716/기능개발.py | 기능개발.py | py | 524 | python | en | code | 1 | github-code | 90 |
21554816340 | import time
from shutil import copyfile
import pandas as pd
import tracemalloc
import numpy as np
import pickle
import os
from stable_baselines3.common.monitor import Monitor
class SB_Experiment(object):
def __init__(self, env, model, dict):
'''
A simple class to run a MDP Experiment with a stable baselines model.
Args:
env - an instance of an Environment
model - a stable baselines model
dict - a dictionary containing the arguments to send for the experiment, including:
seed - random seed for experiment
recFreq - proportion of episodes to save to file
targetPath - path to the file for saving
deBug - boolean of whether to include
nEps - number of episodes
numIters - the number of iterations to run experiment
saveTrajectory - boolean of whether to save trajectory information
'''
self.seed = dict['seed']
self.epFreq = dict['recFreq']
self.dirPath = dict['dirPath']
# self.targetPath = dict['targetPath']
self.deBug = dict['deBug']
self.nEps = dict['nEps']
self.env = env
self.epLen = dict['epLen']
self.num_iters = dict['numIters']
self.save_trajectory = dict['saveTrajectory']
self.model = model
# print('epLen: ' + str(self.epLen))
if self.save_trajectory:
self.trajectory = []
np.random.seed(self.seed)
# Runs the experiment
def run(self):
print('**************************************************')
print('Running experiment')
print('**************************************************')
index = 0
traj_index = 0
episodes = []
iterations = []
rewards = []
times = []
memory = []
# Running an experiment
# TODO: Determine how to save trajectory information
for i in range(self.num_iters):
tracemalloc.start()
self.model.learn(total_timesteps=self.epLen*self.nEps)
current, peak = tracemalloc.get_traced_memory()
tracemalloc.stop()
episodes = np.append(episodes,np.arange(0, self.nEps))
iterations = np.append(iterations, [i for _ in range(self.nEps)])
memory = np.append(memory, [current for _ in range(self.nEps)])
rewards = np.append(rewards, self.env.get_episode_rewards())
# Times are calculated cumulatively so need to calculate the per iteration time complexity
orig_times = [0.] + self.env.get_episode_times()
times = [orig_times[i] - orig_times[i-1] for i in np.arange(1, len(orig_times))]
# Combining data in dataframe
self.data = pd.DataFrame({'episode': episodes,
'iteration': iterations,
'epReward': rewards,
'time': np.log(times),
'memory': memory})
print('**************************************************')
print('Experiment complete')
print('**************************************************')
# Saves the data to the file location provided to the algorithm
def save_data(self):
print('**************************************************')
print('Saving data')
print('**************************************************')
print(self.data)
dir_path = self.dirPath
data_loc = 'data.csv'
dt = self.data
dt = dt[(dt.T != 0).any()]
print('Writing to file ' + dir_path + data_loc)
if os.path.exists(dir_path):
dt.to_csv(os.path.join(dir_path,data_loc), index=False, float_format='%.5f', mode='w')
else:
os.makedirs(dir_path)
dt.to_csv(os.path.join(dir_path, data_loc), index=False, float_format='%.5f', mode='w')
print('**************************************************')
print('Data save complete')
print('**************************************************')
return dt | maxsolberg/ORSuite | or_suite/experiment/sb_experiment.py | sb_experiment.py | py | 4,202 | python | en | code | 0 | github-code | 90 |
9879713317 | #!/usr/bin/env python
from __future__ import annotations
import os.path
from unittest import mock
import pytest
from gcsfs import GCSFileSystem
from cdp_backend.file_store import functions
###############################################################################
FILENAME = "file.txt"
BUCKET = "bucket"
FILEPATH = "fake/path/" + FILENAME
SAVE_NAME = "fakeSaveName"
EXISTING_FILE_URI = "gs://bucket/" + SAVE_NAME
GCS_FILE_URI = functions.GCS_URI.format(bucket=BUCKET, filename=FILENAME)
###############################################################################
def test_initialize_gcs_file_system() -> None:
with mock.patch("gcsfs.credentials.GoogleCredentials.connect"):
assert isinstance(
functions.initialize_gcs_file_system("path/to/credentials"), GCSFileSystem
)
@pytest.mark.parametrize(
"filename, bucket, exists, expected",
[
(
FILENAME,
BUCKET,
True,
functions.GCS_URI.format(bucket=BUCKET, filename=FILENAME),
),
(FILENAME, BUCKET, False, None),
],
)
def test_get_file_uri(
filename: str,
bucket: str,
exists: bool,
expected: str | None,
) -> None:
with mock.patch("gcsfs.credentials.GoogleCredentials.connect"):
with mock.patch("gcsfs.GCSFileSystem.exists") as mock_exists:
mock_exists.return_value = exists
assert expected == functions.get_file_uri(bucket, filename, "path/to/creds")
@pytest.mark.parametrize(
"bucket, filepath, save_name, remove_local, overwrite, existing_file_uri, expected",
[
(
BUCKET,
FILEPATH,
SAVE_NAME,
True,
True,
EXISTING_FILE_URI,
EXISTING_FILE_URI,
),
(
BUCKET,
FILEPATH,
SAVE_NAME,
True,
True,
None,
EXISTING_FILE_URI,
),
(
BUCKET,
FILEPATH,
SAVE_NAME,
True,
False,
EXISTING_FILE_URI,
EXISTING_FILE_URI,
),
(
BUCKET,
FILEPATH,
SAVE_NAME,
False,
True,
EXISTING_FILE_URI,
EXISTING_FILE_URI,
),
(
BUCKET,
FILEPATH,
SAVE_NAME,
False,
True,
None,
EXISTING_FILE_URI,
),
(
BUCKET,
FILEPATH,
SAVE_NAME,
False,
False,
EXISTING_FILE_URI,
EXISTING_FILE_URI,
),
(BUCKET, FILEPATH, None, False, True, GCS_FILE_URI, GCS_FILE_URI),
(BUCKET, FILEPATH, None, False, True, None, GCS_FILE_URI),
(BUCKET, FILEPATH, None, False, False, None, GCS_FILE_URI),
(BUCKET, FILEPATH, None, True, True, GCS_FILE_URI, GCS_FILE_URI),
(BUCKET, FILEPATH, None, True, True, None, GCS_FILE_URI),
(BUCKET, FILEPATH, None, True, False, None, GCS_FILE_URI),
],
)
def test_upload_file(
bucket: str,
filepath: str,
save_name: str | None,
remove_local: bool,
overwrite: bool,
existing_file_uri: str,
expected: str,
) -> None:
with mock.patch("cdp_backend.file_store.functions.initialize_gcs_file_system"):
with mock.patch(
"cdp_backend.file_store.functions.get_file_uri"
) as mock_file_uri:
with mock.patch("cdp_backend.file_store.functions.remove_local_file"):
with mock.patch("pathlib.Path.resolve") as mock_path:
mock_file_uri.return_value = existing_file_uri
mock_path.return_value.name = FILENAME
assert expected == functions.upload_file(
"path/to/creds",
bucket,
filepath,
save_name,
remove_local,
overwrite,
)
# Type ignore because changing tmpdir typing
def test_remove_local_file(tmpdir) -> None: # type: ignore
print(type(tmpdir))
p = tmpdir.mkdir("sub").join("hello.txt")
p.write("content")
file_path = str(p)
assert os.path.isfile(file_path)
functions.remove_local_file(file_path)
assert not os.path.isfile(file_path)
| CouncilDataProject/cdp-backend | cdp_backend/tests/file_store/test_functions.py | test_functions.py | py | 4,430 | python | en | code | 19 | github-code | 90 |
39013215004 | import os
from utils.enums import DeployStrategy
DEBUG = False
INSTANCE_NAME = ''
# Server primary configuration
SERVER_CONFIG = {
# Port of service
"PORT": 7722,
# Mongo Section
"MONGO_HOST": "192.168.100.1",
"MONGO_PORT": 27017,
"MONGO_USER": "superuser",
"MONGO_PWD": "******",
# Resource
"RESOURCE_DIR": "./resource",
# Log Section
"LOG_DIR": "/log/",
"LOG_FILE_NAME": "deploy_server",
# Biz Section
"TAG_LIST_SIZE": 10 # size of tag list in admin interface
}
# Configuration of Redis
REDIS = {
"HOST": "192.168.100.5",
"PORT": 6379,
"DBID": 3
}
# Webhook secret of github
GITHUB = {
"SECRET": "********"
}
# SMTP to send email
EMAIL = {
"SMTP": "smtp.exmail.qq.com",
"USER": "zqhua@zqhua.cn",
"PASSWORD": "********"
}
# ! Configurations of Repos. Using list if watching more than one repos
REPOSITORY = [
{
"REPO_NAME": "repo_name", # repo name
"GIT_PATH": "/home/deploy/_github/repoA/", # path where repo resides in, needed in both production/test mode
"DEPLOY_PATH": "/home/deploy/_online/", # path where deploy to, needed in production mode
"PACKAGE_PATH": "/home/deploy/_package/", # path where packages save to, need in production mode
"BACKUP_PATH": "/home/deploy/_backup/", # path where backup tar file save to, need in production mode
"STRATEGY": DeployStrategy.PRO_MODE, # mode switcher
"BRANCH": "master", # branch filter
# services should restart when files have changed, key is first child directory of repo root('*' matches anything else like finally), value is service name in supervisor, 'None' means no service need restart, also support list if multi services need restart.
"SERVICES": {
"admin": "admin:admin_3377",
"api": "api:api_2919",
"dw": None,
"config": ["mf2:mf2_3333", "poster:poster_2234", "telesales:telesales_3335"],
"*": "ts:ts_3335",
},
# services priority as restart order, Key is service name in supervisor, value is priority level, little numbers have higher priorities.
"SERVICES_PRI": {
"admin:admin_3377": 3,
"api:api_2919": 1,
"poster:poster_2234": 2,
"pyds:pyds_3355": 2,
"telesales:telesales_3335": 3,
"mf2:mf2_3333": 2,
},
# map from hostname to roles of host
"HOSTS": {
"zqhua01": ["web", "data"],
"zqhua02": ["web", "data", "weixin"]
},
# map from host role to service names
"HOST_ROLE": {
"web": [
"admin:admin_3377",
"api:api_2919",
"mf2:mf2_3333",
"telesales:telesales_3335"
],
"data": [
"pyds:pyds_3355",
],
},
# Command Strings to run after NPM or package install
"POST_ACTIONS": [
{"cmd": "npm start", "cwd": "/home/deploy/foo"},
],
# Exclude filename which contains file pattern should not rsync
"EXCLUDE_FILENAME": None,
# Pip script path live in virtualenv
"PIP_SCRIPT": ""
}
]
LOGGING = {
"version": 1,
"formatters": {
"verbose": {
"format": "[%(levelname)s][%(module)s-%(lineno)d][thread-%(thread)d]%(asctime)s %(name)s:%(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose"
},
"file": {
"level": "DEBUG",
"class": "logging.handlers.TimedRotatingFileHandler",
"when": "D",
"formatter": "verbose",
"filename": SERVER_CONFIG["LOG_DIR"] + os.sep + SERVER_CONFIG["LOG_FILE_NAME"] + '.log'
},
"err_file": {
"level": "ERROR",
"class": "logging.handlers.TimedRotatingFileHandler",
"when": "D",
"formatter": "verbose",
"filename": SERVER_CONFIG["LOG_DIR"] + os.sep + SERVER_CONFIG["LOG_FILE_NAME"] + '.err'
},
"t_access_file": {
"level": "ERROR",
"class": "logging.handlers.TimedRotatingFileHandler",
"when": "D",
"formatter": "verbose",
"filename": SERVER_CONFIG["LOG_DIR"] + os.sep + 'tornado.access'
},
"t_error_file": {
"level": "ERROR",
"class": "logging.handlers.TimedRotatingFileHandler",
"when": "D",
"formatter": "verbose",
"filename": SERVER_CONFIG["LOG_DIR"] + os.sep + 'tornado.error'
}
},
"loggers": {
"DeployServer": {
"handlers": ["console", "file", "err_file"],
"propagate": False,
"level": "DEBUG"
},
"tornado.access": {
"handlers": ["t_access_file"],
"propagate": False
},
"tornado": {
"handlers": ["t_error_file"],
"propagate": False
}
}
} | 11dimension/niner | config/example.py | example.py | py | 5,131 | python | en | code | 2 | github-code | 90 |
27552970347 | #-*- coding: utf-8 -*-
import os
import sys
import time
import ConfigParser
from Tkinter import *
class TkinterMessage():
def __init__(self):
self._get_config()
self.phrase_window = Tk()
self.frame = Frame(self.phrase_window)
self.phrase_label = Label(self.frame)
def _get_config(self):
self.config = ConfigParser.ConfigParser()
self.config.read(os.path.join(os.environ['PRODROOT'],'etc/message.cfg'))
self.font = self.config.get('text','font')
self.font_size = int(self.config.get('text', 'font_size'))
self.font_color = self.config.get('text', 'font_color')
self.background_color = self.config.get('text', 'background_color')
self.text_margin = int(self.config.get('text','margin'))
self.window_position_x = self.config.get('window', 'position_x')
self.window_position_y = self.config.get('window', 'position_y')
self.window_margin_x = int(self.config.get('window', 'margin_x'))
self.window_margin_y = int(self.config.get('window', 'margin_y'))
self.window_max_width = int(self.config.get('window', 'max_width'))
self.window_border_width = int(self.config.get('window', 'border_width'))
self.window_border_color = self.config.get('window', 'border_color')
self.window_delay_displaying = int(self.config.get('window', 'delay_displaying'))
def show_message(self, phrase):
self.frame.config(bd=self.window_border_width, bg=self.window_border_color)
self.frame.pack()
self.phrase_label.config(text=phrase,
font=(self.font, self.font_size),
bg=self.background_color, fg=self.font_color,
wraplength=self.window_max_width, justify=LEFT,
padx=self.text_margin, pady=self.text_margin)
self.phrase_label.pack()
window_width = self.phrase_label.winfo_reqwidth()
window_height = self.phrase_label.winfo_reqheight()
if self.window_position_x == 'LEFT':
window_x = self.window_margin_x
else:
window_x = self.phrase_window.winfo_screenwidth() - window_width - self.window_margin_x
if self.window_position_y == 'TOP':
window_y = self.window_margin_y
else:
window_y = self.phrase_window.winfo_screenheight() - window_height - self.window_margin_y
self.phrase_window.geometry('%dx%d+%d+%d' % (window_width, window_height, window_x, window_y))
self.phrase_window.overrideredirect(True)
self.phrase_window.after(self.window_delay_displaying, lambda: self.phrase_window.destroy())
self.phrase_window.mainloop()
if __name__ == '__main__':
msg = sys.argv[1]
Tmsg = TkinterMessage()
Tmsg.show_message(msg)
| MasterSergius/Frazer | src/show_tkinter_message.py | show_tkinter_message.py | py | 2,856 | python | en | code | 0 | github-code | 90 |
7780873245 | # -*- coding: utf-8 -*-
if __name__ == "__main__":
N = int(input())
tag_dict = {}
for i in range(N):
No = int(input())
M, S = list(map(int, input().split()))
tags = input().split()
for tag in tags:
if tag in tag_dict:
tag_dict[tag] += S
else:
tag_dict[tag] = S
# 回答を出力する
tag_items = list(tag_dict.items())
tag_items.sort(key=lambda x: x[0])
tag_items.sort(key=lambda x: x[1], reverse=True)
item_len = len(tag_items)
if item_len < 10:
RANGE_MAX = item_len
else:
item_len = 10
for i in range(item_len):
print("{} {}".format(tag_items[i][0], tag_items[i][1]))
| taketakeyyy/yukicoder | q628/main.py | main.py | py | 756 | python | en | code | 0 | github-code | 90 |
18485044989 | import math
import sys
n, m = map(int, input().split())
s = list(input())
t = list(input())
a = (n*m)//math.gcd(n, m)
b0 = [i*(a//n)+1 for i in range(n)]
b1 = [i*(a//m)+1 for i in range(m)]
b2 = set(b0)
b3 = set(b1)
for j, i in enumerate(b0):
if i in b3:
if s[j] != t[b1.index(i)]:
print(-1)
sys.exit()
for j, i in enumerate(b1):
if i in b2:
if s[b0.index(i)] != t[j]:
print(-1)
sys.exit()
print(a)
| Aasthaengg/IBMdataset | Python_codes/p03231/s407382103.py | s407382103.py | py | 485 | python | en | code | 0 | github-code | 90 |
40189807482 | #!/usr/bin/env python
# coding: utf-8
import numpy as np
from sklearn.model_selection import train_test_split
import Preprocessing
from keras.models import Sequential
from keras.layers import Dense
def neuralNetwork(datadict, nbneurons, epochs) :
"""
Author: Karel Kedemos\n
Train and execute a neural network with len(nbneurons) + 1 layers, each with nbneurons[i] neurons, for binary classification.
Args:
datadict: dictionary returned by function "preprocessing_main" in Preprocessing.py
nbneurons : list containing the number of neurons for each layer, except the last one which has 1 output neuron. The number of layers is equal to len(nbneurons) + 1
epochs : number of epochs for the training
Returns:
The model, and the accuracy of the model on the testing data.
"""
nb = len(nbneurons)
if nb < 1 :
return 'nb must be greater than 1'
model = Sequential()
model.add(Dense(nbneurons[0], activation = "relu", input_dim = datadict.get("data_train").shape[1], kernel_initializer = 'random_normal'))
for layer in range(1, nb) :
model.add(Dense(nbneurons[layer], activation = "relu", kernel_initializer = 'random_normal'))
model.add(Dense(1, activation = "sigmoid", kernel_initializer = 'random_normal'))
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
model.fit(datadict.get("data_train"), datadict.get("label_train"), epochs = epochs, verbose=0)
test_loss, test_acc = model.evaluate(datadict.get("data_test"), datadict.get("label_test"), verbose=0)
return model, test_acc
def neuralNetworkGridSearch(datadict, param_grid) :
"""
Author: Karel Kedemos\n
Train several neural network models with different parameters, and return the best one based on the accuracy of the testing data.
Args:
datadict: dictionary returned by function "preprocessing_main" in Preprocessing.py
param_grid : Dictionary with parameters names (string) as keys and lists of parameter settings to try as values. This enables searching over any sequence of parameter settings
Returns:
The model with the best parameters, the best parameters, and the accuracy of the best model on the testing data
"""
best_test_acc_sum = 0
best_model = Sequential()
best_params = []
for nbneurons in param_grid["nbneurons"] :
for epochs in param_grid["epochs"] :
test_acc_sum = 0
for i in range(3) :
model, test_acc = neuralNetwork(datadict, nbneurons, epochs)
test_acc_sum += test_acc
if test_acc_sum > best_test_acc_sum :
best_test_acc_sum = test_acc_sum
best_model = model
best_params = [nbneurons, epochs]
return best_model, best_params, best_test_acc_sum/3
if __name__ == '__main__':
kidney, banknote, kidney_pca, banknote_pca, kidney_tsne, banknote_tsne = Preprocessing.preprocess_main()
data = kidney
model, test_acc = neuralNetwork(data, [32,64,16,8], 300)
param_grid = {"nbneurons" : [[4,12,8], [16,32,24,12], [32,64,16,8]], "epochs" : [100,200,300]}
best_model, best_params, best_test_acc = neuralNetworkGridSearch(data, param_grid)
print(best_params)
print(best_test_acc)
| imomayiz/Binary-classification-using-Python | neuralNetwork.py | neuralNetwork.py | py | 3,335 | python | en | code | 0 | github-code | 90 |
30754654073 | def merge(li, low, mid, high):
"""
归并两个列表,从小到大排列
"""
i = low
j = mid + 1
ltmp = []
while i <= mid and j <= high: #只要左右两边都有数
if li[i] < li[j]:
ltmp.append(li[i])
i += 1
else:
ltmp.append(li[j])
j += 1
#while执行完毕,有一组没数,一组还有数
while i <= mid:
ltmp.append(li[i])
i += 1
while j <= high:
ltmp.append(li[j])
j += 1
#把剩下的数都处理完了
li[low:high + 1] = ltmp
def merge_sort(li, low, high):
"""拆分成单独元素,然后合并"""
if low < high: #至少有两个元素,开始递归
mid = (low + high) // 2
merge_sort(li, low, mid)
merge_sort(li, mid+1, high)
merge(li, low , mid, high)
return li
| BruceStallone/Python_algorithm | merge_sort.py | merge_sort.py | py | 771 | python | en | code | 0 | github-code | 90 |
5563508860 | from __future__ import annotations
from threading import Thread
from builder import Pizzaiolo, PizzaBuilder
from pizza import Flour, Product
def _test_pizzaiolo(builder):
pizzaiolo = Pizzaiolo(builder)
print(pizzaiolo.builder)
if __name__ == "__main__":
pizza_builder = PizzaBuilder()
pizza_builder2 = PizzaBuilder()
pizza_builder2.set_crust(flour=Flour.GLUTEN_FREE)
# test Singleton
print("builder1: ", pizza_builder, "builder2: ", pizza_builder2)
process1 = Thread(target=_test_pizzaiolo, args=(pizza_builder,))
process2 = Thread(target=_test_pizzaiolo, args=(pizza_builder2,))
process1.start()
process2.start()
# make pizzas
pizzaiolo = Pizzaiolo(pizza_builder)
margherita = pizzaiolo.make_margherita()
print(margherita)
# make author pizza
pizza_builder.set_ingredient(Product.TOMATO_SAUCE)
pizza_builder.set_ingredient(Product.MOZZARELLA, 2)
pizza_builder.set_ingredient(Product.BURRATA_CHEESE)
pizza_builder.set_ingredient(Product.PROSCIUTTO)
pizza_builder.set_ingredient(Product.OLIVES)
author_pizza = pizza_builder.bake
print(author_pizza)
| kristyko/SoftwareDesignPatterns | Builder + Singleton/main.py | main.py | py | 1,150 | python | en | code | 0 | github-code | 90 |
31942168027 | from dgl.nn.pytorch import GINConv
import torch.nn as nn
import torch
from models.utils import get_mask
class GTShapelet(nn.Module):
def __init__(self, k, embed_dim=128, num_heads=4):
super(GTShapelet, self).__init__()
self.embed_dim = embed_dim
self.num_nodes = 1 << 2 * k
self.embed = nn.Embedding(self.num_nodes, self.embed_dim)
self.convs = nn.ModuleList()
self.norm_gcn = nn.LayerNorm(embed_dim)
self.convs.append(GINConv(nn.Linear(embed_dim, 2 * embed_dim)))
self.convs.append(GINConv(nn.Linear(2 * embed_dim, 2 * embed_dim)))
self.convs.append(GINConv(nn.Linear(2 * embed_dim, embed_dim)))
self.act = nn.GELU()
self.MHA = nn.MultiheadAttention(embed_dim, num_heads=num_heads, batch_first=True) # MultiHeadAttention
self.cls_embedding = nn.Parameter(torch.randn([1, 1, embed_dim], requires_grad=True))
self.norm_after = nn.LayerNorm(embed_dim)
self.set_parameter()
def set_parameter(self):
for name, param in self.named_parameters():
if 'norm' in name:
continue
if 'bias' in name:
nn.init.zeros_(param)
continue
nn.init.kaiming_uniform_(param)
def forward(self, g, sw):
h = self.embed(g.ndata['mask'])
for gnn in self.convs:
h = gnn(g, h, edge_weight=g.edata['weight'].float())
h = self.act(h)
with g.local_scope():
g.ndata['h'] = h
src_padding_mask = get_mask(g)
h = h.view(-1, self.num_nodes, self.embed_dim) # batch_size * num_node * embed
# h = torch.einsum('bne,bn->bne', h, sw)
expand_cls_embedding = self.cls_embedding.expand(h.size(0), 1, -1) # batchsize * 1 * embed
h = torch.cat([h, expand_cls_embedding], dim=1) # batch * length * dim
zeros = src_padding_mask.data.new(src_padding_mask.size(0), 1).fill_(0)
src_padding_mask = torch.cat([src_padding_mask, zeros], dim=1)
attn_output, _ = self.MHA(h, h, h, key_padding_mask=src_padding_mask)
h = self.norm_after(h + attn_output)
return h[:, -1, :]
| zhouxuxian/gShapeLnoc | models/GTShapelet.py | GTShapelet.py | py | 2,190 | python | en | code | 0 | github-code | 90 |
74099221418 | """
Given a start word, an end word, and a dictionary of valid words,
find the shortest transformation sequence from start to end such that
only one letter is changed at each step of the sequence, and each transformed
word exists in the dictionary.
If there is no possible transformation, return null.
Each word in the dictionary have the same length as start and end and is lowercase.
For example, given start = "dog",
end = "cat", and
dictionary = {"dot", "dop", "dat", "cat"},
return ["dog", "dot", "dat", "cat"].
Given start = "dog", end = "cat", and dictionary = {"dot", "tod", "dat", "dar"},
return null as there is no possible transformation from dog to cat.
"""
import nltk
nltk.download('words')
from nltk.corpus import words
word_list = words.words()
cleaned_word_list = [ word.strip(' ') for word in word_list if len(word.strip(' ')) == 5]
cleaned_word_list.extend(['biden', 'trump'])
start = "trump"
end = "biden"
dictionary = cleaned_word_list
start_word_set = [start]
checked_words = {start}
def list_one_off(start_word_set, dictionary, checked_words = checked_words):
one_step_words = []
for starting_word in start_word_set:
for possible_word in dictionary:
candidate = possible_word.lower()
try:
count = 0
for letter in range(len(possible_word)):
if starting_word[letter] != possible_word[letter]:
count += 1
if count == 1 and possible_word not in checked_words:
one_step_words.append(possible_word)
except Exception:
print(candidate)
pass
return one_step_words
print(list_one_off(start_word_set, dictionary))
def add_contents_of(set, additions):
for word in additions:
if word not in set:
set.add(word.lower())
def update_checked_words(newly_checked_words, checked_words_set=checked_words):
add_contents_of(checked_words_set, newly_checked_words)
while end not in checked_words:
new_words = list_one_off(start_word_set, dictionary)
update_checked_words(new_words)
start_word_set = new_words
print(checked_words)
# at the moment, this only terminates if there is a path to the target word
# it also would error if the dictionary contains words of different sizes | danny-hunt/Problems | doublets/doublets.py | doublets.py | py | 2,383 | python | en | code | 2 | github-code | 90 |
32408428421 | #!/usr/bin/env python3
# -*- coding: utf-8 -*
"""
Description :
Author : Cirun Zhang
Contact : cirun.zhang@envision-digital.com
Time : 2020/7/8
Software : PyCharm
"""
from PyQt5.QtWidgets import *
from src.item_window import ItemWindow
class Window(QMainWindow):
item_list = []
def __init__(self):
super().__init__()
self.buttonsWidget = QWidget()
self.buttonsWidgetLayout = QHBoxLayout(self.buttonsWidget)
self.button_new = QPushButton("NEW")
self.button_new.clicked.connect(self.action_new)
self.button_modify = QPushButton("MODIFY")
self.button_delete = QPushButton("DELETE")
self.button_delete.clicked.connect(self.delete_item)
self.buttonsWidgetLayout.addWidget(self.button_new)
self.buttonsWidgetLayout.addWidget(self.button_modify)
self.buttonsWidgetLayout.addWidget(self.button_delete)
self.listwidget = QListWidget(self)
self.centralWidget = QWidget()
self.setCentralWidget(self.centralWidget)
self.vLayout = QVBoxLayout(self.centralWidget)
self.vLayout.addWidget(self.listwidget)
self.vLayout.addWidget(self.buttonsWidget)
def action_new(self):
itemwindow = ItemWindow(self)
itemwindow.show()
def new_item(self, name):
item_new = QListWidgetItem(name)
self.item_list.append({'name': name, 'item': item_new})
self.listwidget.addItem(item_new)
def delete_item(self):
items = self.listwidget.selectedItems()
if len(items) > 0:
row = self.listwidget.row(items[0])
self.listwidget.takeItem(row)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_()) | zhangcirun/labelMe | tests/test.py | test.py | py | 1,812 | python | en | code | 0 | github-code | 90 |
34093745625 | num = 4
count = 0
for i in range(1, num+1):
x = 0
p = str(i)
for j in range(len(p)):
x += int(p[j])
if x % 2 == 0:
count += 1
print(count)
| Hotheadthing/leetcode.py | Count integers with even digit sum.py | Count integers with even digit sum.py | py | 173 | python | en | code | 2 | github-code | 90 |
18101865249 | import collections
N = int(input())
M = []
for i in range(N):
a = list(map(int,input().strip().split()))
b = [int(i+1 in a[2:]) for i in range(N)]
M.append(b)
D = [-1 for _ in range(N)]
D[0] = 0 # 始点への距離は 0, 他の距離は-1
Q = collections.deque()
Q.append(0) # 始点
while len(Q) > 0:
#print("bfs", Q) # 各ステップでの Q の動作を確認
cur = Q.popleft()
for dst in range(N):
# curからdstに移動可能かつ、dstが未訪問だったら
if M[cur][dst] == 1 and D[dst] == -1:
D[dst] = D[cur]+1
Q.append(dst) # Qにdstを詰める
for v in range(N):
print(v+1, D[v])
| Aasthaengg/IBMdataset | Python_codes/p02239/s584859976.py | s584859976.py | py | 669 | python | ja | code | 0 | github-code | 90 |
6101347535 | # coding=utf-8
import os
import csv
import codecs
import shutil
import tkinter as tk
from tkinter import ttk
from prodtools.db import ws_journals
from prodtools.config import config
from prodtools import BIN_MARKUP_PATH
from prodtools import ICON
from prodtools import _
ROW_MSG = 9
ROW_SELECT_A_COLLECTION = 9
ROW_COMBOBOX = 10
ROW_SELECTED = 11
ROW_DOWNLOADING = 12
ROW_DOWNLOADED = 13
ROW_FINISHED = 14
ROW_DOWNLOAD_BUTTON = 21
ROW_CLOSE_BUTTON = 22
class MkpDownloadJournalListGUI(tk.Frame):
def __init__(self, master, collections, filename, temp_filename):
super().__init__(master)
self.master = master
self.collections = collections
self.filename = filename
self.temp_filename = temp_filename
def configure(self):
self.master.minsize(400, 200)
self.master.title(_('Download journals data'))
self.master.wm_iconbitmap(ICON)
self.pack()
label = ttk.Label(self, text=_('Select a collection:'))
label.grid(column=0, row=ROW_SELECT_A_COLLECTION)
options = ['All']
options.extend(sorted(self.collections.keys()))
self.choice = tk.StringVar(self)
self.choice.set(options[0])
combobox = ttk.Combobox(
self, width=30, textvariable=self.choice)
combobox['values'] = tuple(options)
combobox.grid(column=0, row=ROW_COMBOBOX)
execute_button = ttk.Button(
self, text=_('download'), command=self.download)
execute_button.grid(column=0, row=ROW_DOWNLOAD_BUTTON)
close_button = ttk.Button(
self, text=_('close'),
command=lambda: self.master.destroy())
close_button.grid(column=0, row=ROW_CLOSE_BUTTON)
self.mainloop()
def download(self):
choice = self.choice.get()
msg = ttk.Label(self,
text=_("Select one collection to use its journals "
"data for the Markup Program"))
msg.grid(column=0, row=ROW_MSG)
label1 = ttk.Label(
self, text=_("Selecionado: {}".format(choice)))
label1.grid(column=0, row=ROW_SELECTED)
if choice == 'All':
choice = None
label2 = ttk.Label(self, text=_("Downloading.."))
label2.grid(column=0, row=ROW_DOWNLOADING)
journals = get_journals_list(self.collections, choice)
generate_input_for_markup(journals, self.temp_filename, self.filename)
label4 = ttk.Label(
self, text=_("Downloaded: {} journals").format(len(journals)))
label4.grid(column=0, row=ROW_DOWNLOADED)
label3 = ttk.Label(self, text=_("Finished"))
label3.grid(column=0, row=ROW_FINISHED)
def open_main_window(
collections, destination_filename, temp_filename):
root = tk.Tk()
app = MkpDownloadJournalListGUI(
root, collections, destination_filename, temp_filename)
app.configure()
#app.mainloop()
def journals_by_collection(filename):
collections = {}
with open(filename, 'r', encoding="utf-8") as csvfile:
spamreader = csv.reader(csvfile, delimiter='\t')
for item in spamreader:
if len(item) >= 10:
if item[1] != 'ISSN':
j = {}
j['collection'] = item[0]
j['collection-name'] = item[4]
j['issn-id'] = item[1]
j['pissn'] = item[2]
j['eissn'] = item[3]
j['acron'] = item[5]
j['short-title'] = item[6]
j['journal-title'] = item[7]
j['nlm-title'] = item[8]
j['publisher-name'] = item[9]
if len(item) >= 12:
j['license'] = item[11]
_col = j.get('collection-name')
if _col == '':
_col = j.get('collection')
if _col not in collections.keys():
collections[_col] = []
collections[_col].append(j)
if 'Symbol' in collections.keys():
del collections['Symbol']
if 'Collection Name' in collections.keys():
del collections['Collection Name']
return collections
def get_journals_list(collections, collection_name=None):
journals = {}
if collection_name:
journals = get_collection_journals_list(collections, collection_name)
if len(journals) == 0:
journals = get_all_journals_list(collections)
c = []
for k in sorted(journals.keys()):
c.append(journals[k])
return c
def generate_row(item):
column = []
column.append(item['journal-title'])
column.append(item['nlm-title'])
column.append(item['short-title'])
column.append(item['acron'])
column.append(item['issn-id'])
column.append(item['pissn'])
column.append(item['eissn'])
column.append(item['publisher-name'])
if item.get('license'):
column.append(item.get('license'))
return '|'.join(column)
def get_collection_journals_list(collections, collection_name):
journals = {}
for item in collections.get(collection_name, []):
journals[item['journal-title'].lower()] = collection_name + '|' + generate_row(item)
return journals
def get_all_journals_list(collections):
journals = {}
for collection_key, collection_journals in collections.items():
for item in collection_journals:
journals[item['journal-title'].lower() + ' | ' + item['collection-name'].lower()] = collection_key + '|' + generate_row(item)
return journals
def generate_input_for_markup(journals, tmp_filepath, journals_filepath):
if os.path.isfile(tmp_filepath):
os.unlink(tmp_filepath)
content = "\r\n".join(journals)
with codecs.open(tmp_filepath.replace(".csv", ".utf8.csv"),
mode='w+', encoding="utf-8") as fp:
fp.write(content)
content = content.encode("cp1252")
content = content.decode("cp1252")
with codecs.open(tmp_filepath, mode='w+', encoding="cp1252") as fp:
fp.write(content)
if os.path.isfile(tmp_filepath):
shutil.copyfile(tmp_filepath, journals_filepath)
def main():
configuration = config.Configuration()
markup_journals_filename = BIN_MARKUP_PATH + '/markup_journals.csv'
tmp_mkp_journal_filepath = BIN_MARKUP_PATH + '/temp_markup_journals.csv'
for filename in [markup_journals_filename, tmp_mkp_journal_filepath]:
temp_path = os.path.dirname(filename)
if not os.path.isdir(temp_path):
os.makedirs(temp_path)
_ws_journals = ws_journals.Journals(configuration.app_ws_requester)
_ws_journals.update_journals_file()
journals_collections = journals_by_collection(
_ws_journals.downloaded_journals_filename)
open_main_window(
journals_collections, markup_journals_filename,
tmp_mkp_journal_filepath)
if __name__ == "__main__":
main()
| scieloorg/PC-Programs | src/scielo/bin/xml/prodtools/download_markup_journals.py | download_markup_journals.py | py | 7,270 | python | en | code | 7 | github-code | 90 |
29462905313 | import shutil
import os
import time
from tkinter import *
import tkinter as tk
from tkinter import messagebox
import programgui
import main
Seconds_In_Day = 24 * 60 * 60
now = time.time()
before = now - Seconds_In_Day
def last_mod_time(files): #function to return modification time of file
return os.path.getmtime(files)
def center_window(self, w, h): #centering window
screen_width = self.master.winfo_screenwidth()
screen_height = self.master.winfo_screenheight()
x = int((screen_width/2)-(w/2))
y = int((screen_height/2) - (h/2))
centerGeo = self.master.geometry('{}x{}+{}+{}'.format(w,h,x,y))
return centerGeo
#function to open directory search to allow user path selection for source and destination
def sourcedirectory(self):
self.sourcedir.delete(0,END)
dir = tk.filedialog.askdirectory()
self.sourcedir.insert(0, dir)
def destdirectory(self):
self.destdir.delete(0,END)
dir = tk.filedialog.askdirectory()
self.destdir.insert(0, dir)
#function to retrieve files that have been modified in the last 24 hours and automatically move them to selected destination
def update(self):
src = self.sourcedir.get()
dst = self.destdir.get()
for files in os.listdir(src):
srcfiles = os.path.join(src, files)
if last_mod_time(srcfiles) > before:
self.dirlist.insert(0, files)
dstfiles = os.path.join(dst, files)
shutil.move(srcfiles, dstfiles)
#function to check for modified files in the list directory and print the output to the listbox
def check(self):
src = self.sourcedir.get()
for files in os.listdir(src):
srcfiles = os.path.join(src, files)
if last_mod_time(srcfiles) > before:
self.dirlist.insert(0, files)
#function to move items selected in listbox and move them to the destination folder. NOT FUNCTIONAL YET.
def move(self):
src = self.sourcedir.get()
dst = self.destdir.get()
sel = self.dirlist.curselection()
for files in os.listdir(src):
srcfiles = os.path.join(src, files)
for i in sel:
dstfiles = os.path.join(dst, self)
shutil.move(srcfiles,dstfiles)
def ask_quit(self):
if messagebox.askokcancel("Exit program","Are you sure you would like to exit?"):
self.master.destroy()
os._exit(0)
if __name__ == "__main__":
pass | taekionic/Python_Projects | Learning Files/file transfer assignment/programcontrol.py | programcontrol.py | py | 2,404 | python | en | code | 0 | github-code | 90 |
14939702918 |
print("Hello World!")
print(200)
print(3.14)
type("hello World!")
type(200)
type(3.14)
##############
# pseudocode #
##############
# non-polymorphism designing
shapes = [trl, sql, crl]
for a_shape in shapes:
if type(a_shape) == "Triangle":
a_shape.draw_triangle()
if type(a_shape) == "Square":
a_shape.draw_square()
if type(a_shape) == "Circle":
a_shape.draw_circle()
# polymorphic application
shapes = [ trl
, sql
, crl
]
for a_shape in shapes:
a_shape.draw()
#######
# EOF #
####### | ewan-zhiqing-li/PYTK | exercise/book_the_self_taught_programmer/code_20210113_object_oriented/c_polymorphism.py | c_polymorphism.py | py | 595 | python | en | code | 0 | github-code | 90 |
72977845098 | # -*- coding: utf-8 -*-
# by Elias Showk <elias@showk.me>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import json
from push_notifications.webpush import WebPushError
def send_web_push(web_push_record, device):
'''
Send a push notification to a subscribed device
Save the status into the WebPushRecord model
'''
try:
# send the request to the endpoint
response = device.send_message(web_push_record.payload, ttl=web_push_record.ttl)
web_push_record.response = json.dumps(response)
if 'success' in response and response['success'] == 1:
web_push_record.set_status_ok()
else:
web_push_record.set_status_err()
except WebPushError as wperr:
# the device subscription is invalid
if re.match('410', str(wperr)) is not None:
device.is_active = False
device.save()
# set the record to error
web_push_record.set_status_err()
print(wperr)
return web_push_record
| elishowk/django-webpush-demo | webpush/push.py | push.py | py | 1,635 | python | en | code | 4 | github-code | 90 |
17986668689 | N = int(input())
A = list(map(int,input().split()))
c = [0] * 9
for i in A:
if i < 3200:
c[i//400]+=1
else:
c[8]+=1
cnt = 0
for i in range(8):
if c[i] != 0:
cnt += 1
mini = max(1,cnt)
maxi = cnt+c[8]
print(mini,maxi)
| Aasthaengg/IBMdataset | Python_codes/p03695/s013754541.py | s013754541.py | py | 253 | python | en | code | 0 | github-code | 90 |
21911687962 | from qaoa import *
def cost_function_den_4pts(G):
C = 0
#PreferTimes_3
if G.nodes["Event18"]['color'] != 0:
C += 1
#PreferTimes_4
if G.nodes["Event19"]['color'] != 2:
C += 1
#PreferTimes_5
if G.nodes["Event20"]['color'] != 1:
C += 1
#PreferTimes_6
if G.nodes["Event21"]['color'] != 3:
C += 1
return C
def main():
print("Starting program\n")
# --------------------------
# School Instances
# --------------------------
school = "Den"
# --------------------------
# Parse XML file
# --------------------------
events = parseXML('dataset/den-smallschool.xml')
# --------------------------
# Preparing Conflict Graph
# --------------------------
G = create_graph_from_events(events)
print("--------------------------")
print("Graph information\n")
print("Nodes = ", G.nodes)
coloring = [G.nodes[node]['color'] for node in G.nodes]
print("\nPre-coloring", coloring)
degree = [deg for (node, deg) in G.degree()]
print("\nDegree of each node", degree)
# --------------------------
# Coloring Conflict Graph
# --------------------------
# Greedy coloring to be used in cases where a trivial coloring cannot be
# found
# -----------------------------------------------------------------
#color_graph_greedy(G)
# If a suitable coloring can be found without the greedy method use
# the color_graph_num method
# -----------------------------------------------------------------
#num_colors = 5 # Denmark colors
#color_graph_from_num(G, num_colors)
# If a initial state was chosen in advance use color_graph_from_coloring
# ----------------------------------------------------------------------
# Coloring 23 points
coloring = [1, 0, 2, 3, 1, 2, 1, 2, 3, 0, 0, 2, 0, 3, 1, 3, 0, 1, 0, 3, 2, 2, 1, 2, 3]
# Optimal Coloring
#coloring = [0, 2, 3, 1, 2, 3, 3, 2, 0, 1, 0, 3, 2, 1, 0, 2, 3, 0, 2, 1, 3, 3, 0, 3, 1]
color_graph_from_coloring(G, coloring)
#coloring = [G.nodes[node]['color'] for node in G.nodes]
print("\nInitial coloring", coloring)
#num_colors = len(set(coloring))
num_colors = 5
print("\nNumber of colors", num_colors)
initial_function_value = cost_function_den_4pts(G)
print("\nInitial Function Value Max 4", initial_function_value)
# ---------------------------
# Verifying Graph consistency
#----------------------------
print("----------------------------")
print("Verifying Graph consistency")
for i in G.nodes:
print("\nNode",i,"Color", G.nodes[i]['color'])
color_and_neighbour = [(neighbour, G.nodes[neighbour]['color']) for neighbour in G[i]]
print("Neighbours | Color")
for pair in color_and_neighbour:
print(pair)
#----------------------------
# Starting QAOA
#----------------------------
print("----------------------------")
print("Running QAOA")
num_nodes = G.number_of_nodes()
number_of_qubits = num_nodes*num_colors+num_nodes
print("Necessary number of qubits: ", number_of_qubits)
# QAOA parameter
goal_p = 8
# Minimizing Example DEN
minimization_process_cobyla(goal_p, G, num_colors, school, cost_function_den_4pts)
print("Program End")
print("----------------------------")
if __name__ == '__main__':
main()
| OttoMP/qaoa-school-timetable | den.py | den.py | py | 3,436 | python | en | code | 0 | github-code | 90 |
34132664540 | COLORS = ["red", "orange", "yellow", "green", "blue", "purple"]
NUM_CARS = 20
STARTING_MOVE_DISTANCE = 0.5
MOVE_INCREMENT = 10
from turtle import Turtle
from random import randint, choice
class CarManager:
def __init__(self) -> None:
self.cars = []
self.create_cars()
def create_cars(self):
for i in range(NUM_CARS):
self.cars.append(Turtle())
self.cars[i].shape("square")
self.cars[i].color(choice(COLORS))
self.cars[i].shapesize(1, 2)
self.cars[i].penup()
self.cars[i].goto(randint(-280, 280), randint(-250, 280))
def move(self):
for i in range(NUM_CARS):
self.cars[i].goto(self.cars[i].xcor() - STARTING_MOVE_DISTANCE, self.cars[i].ycor())
def reset(self):
for i in range(NUM_CARS):
if self.cars[i].xcor() < -300:
self.cars[i].goto(300, randint(-250, 280))
def collides(self, player):
for i in range(NUM_CARS):
if self.cars[i].distance(player) < 30 and (player.ycor() > self.cars[i].ycor() - 20 or player.ycor() < self.cars[i].ycor() + 20):
return True
return False
| ajaythumala/100-Days-of-Code | 100/day_23/car_manager.py | car_manager.py | py | 1,210 | python | en | code | 0 | github-code | 90 |
72115928618 |
def main():
def check_letters(word, correct_letters, letter_guessed):
correct_guess = False
for index, letter in enumerate(word):
if letter_guessed == letter:
correct_letters[index] = letter
correct_guess = True
return correct_guess
word = 'hello'.lower()
game_over = False
correct_letters = ['_'] * len(word)
strikes = 3
while not game_over:
letter_guessed = input('Guess a letter: ')
correct_guess = check_letters(word, correct_letters, letter_guessed)
if not correct_guess:
print("You didn't guess correctly")
strikes = strikes - 1
print(strikes)
if strikes == 0:
print('You lose')
game_over = True
if '_' not in correct_letters:
print(correct_letters)
print('You win!')
game_over = True
if not game_over:
print(correct_letters)
if __name__ == '__main__':
main() | BrandtRobert/PythonCrashCourse | Day1/TicTacToe.py | TicTacToe.py | py | 1,024 | python | en | code | 0 | github-code | 90 |
4195232288 | import unittest
from propnet.core.registry import Registry
class RegistryTest(unittest.TestCase):
def test_basic_registry(self):
test_reg = Registry("test")
test_reg2 = Registry("test")
test_reg3 = Registry("test2")
self.assertIsInstance(test_reg, dict)
self.assertTrue(test_reg is test_reg2)
self.assertTrue(test_reg is not test_reg3)
def test_clear_registries(self):
Registry("to_clear")['entry'] = 'data'
self.assertIn('to_clear', Registry.all_instances.keys())
self.assertIn('entry', Registry("to_clear").keys())
self.assertEqual(Registry("to_clear")['entry'], 'data')
Registry.clear_all_registries()
self.assertNotIn('to_clear', Registry.all_instances.keys())
if __name__ == "__main__":
unittest.main()
| materialsintelligence/propnet | propnet/core/tests/test_registry.py | test_registry.py | py | 823 | python | en | code | 66 | github-code | 90 |
18540405189 | #!/usr/bin/env python
import sys
from collections import Counter
from itertools import permutations, combinations
from fractions import gcd
#from math import gcd
from math import ceil, floor
import bisect
sys.setrecursionlimit(10 ** 6)
inf = float("inf")
def input():
return sys.stdin.readline()[:-1]
def main():
N = int(input())
a = tuple(map(int, input().split()))
s = [0] * (N+1)
for i in range(N):
s[i+1] = s[i] + a[i]
c = Counter(s)
ans = 0
for key in c.keys():
if c[key] > 1:
ans += c[key]*(c[key]-1) / 2
print(int(ans))
if __name__ == '__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p03363/s027965201.py | s027965201.py | py | 633 | python | en | code | 0 | github-code | 90 |
19420433153 | import re
import os
def add_discourse_start_end(discourse_df, cfg, datatype="train"):
idx = discourse_df.essay_id.values[0]
filename = os.path.join(cfg.data.train_txt_path, idx + ".txt")
with open(filename, "r", encoding='utf-8') as f:
text = f.read()
min_idx = 0
starts = []
ends = []
for _, row in discourse_df.iterrows():
discourse_text = row["discourse_text"]
matches = list(re.finditer(re.escape(discourse_text.strip()), text))
if len(matches) == 1:
discourse_start = matches[0].span()[0] ## matchesで検索することで元の部分の位置情報を得ることが可能 . span に格納されている
discourse_end = matches[0].span()[1] ## spanは単語単位ではなく、文字単位のindexで取り出される
min_idx = discourse_end
elif len(matches) > 1:
for match in matches:
discourse_start = match.span()[0]
discourse_end = match.span()[1]
if discourse_start >= min_idx:
min_idx = discourse_end
break
else:
discourse_start = -1
discourse_end = -1
starts.append(discourse_start)
ends.append(discourse_end)
discourse_df.loc[:, "discourse_start"] = starts
discourse_df.loc[:, "discourse_end"] = ends
return discourse_df | inabakaiso/template | src/preprocess.py | preprocess.py | py | 1,438 | python | en | code | 0 | github-code | 90 |
13589269200 | import requests
n = input('회차를 입력하세요: ')
url = f'https://dhlottery.co.kr/common.do?method=getLottoNumber&drwNo={n}'
response = requests.get(url)
# response.text #=> string
lotto = response.json() #=> dict
# winner = []
# for i in range(1, 7):
# winner.append(lotto[f'drwtNo{i}'])
winner = [lotto[f'drwtNo{i}'] for i in range(1, 7)]
bonus = lotto['bnusNo']
print(f'당첨 번호는 {winner} + {bonus}입니다.') | 4th5-deep-a/web | python/lotto.py | lotto.py | py | 437 | python | en | code | 4 | github-code | 90 |
39071181897 | import json
def json_for_dashboard(input_fasta, input_json, tree, output, wildcards):
with open(input_json, 'r') as input_file:
indices = json.load(input_file)
with open(input_fasta) as file:
fasta = file.read()
with open(tree) as file:
newick = file.read()
output_dict = {
'fasta': fasta,
'newick': newick,
'CDR3': indices['CDR3'],
'FR3': indices['FR3']
}
with open(output, 'w') as file:
json.dump(output_dict, file, indent=4)
| veg/bcell-phylo | python/output.py | output.py | py | 523 | python | en | code | 0 | github-code | 90 |
6438732561 | # 회전 방향
# 반시계 방향 북 서 남 동
dir = [(-1,0),(0,-1),(1,0),(0,1)]
n,m = map(int,input().split()) # 맵의 크기
x,y,d = map(int,input().split()) # 주인공의 위치 x, y, 바라보고있는 방향 d 0 북쪽
arr = [list(map(int,input().split())) for _ in range(n)] # 맵
visited = [[False] * m for _ in range(n)]
# 시작점 방문 체크
visited[x][y] = True
# 방문한 칸 수 1 증가
ans = 1
while True:
# 회전 횟수
step = 0
# 바라보고있는 방향 i = 1
for i in range(4):
j = (i+d)%4 # j = 1 동쪽
nx = x + dir[j][0]
ny = y + dir[j][1]
# print("이동전 %d %d %d" %(x, y, j))
# 이동할 수 있으면 이동
if nx>=0 and nx<n and ny>=0 and ny<m and arr[nx][ny]==0 and not visited[nx][ny]:
# 방문 체크
visited[nx][ny] = True
# 현재 위치 갱신
x=nx
y=ny
# 바라보고있는 방향 갱신
d = j
# 방문한 칸 수 1 증가
ans += 1
# print("이동후 %d %d %d" %(x, y, d))
else:
step+=1
# 4번 회전했으면 while문을 나감
if step==4:
# 뒤로 가기
nx = x - dir[d][0]
ny = y - dir[d][1]
# 뒤가 땅이라서 갈 수 있으면
if arr[nx][ny] == 0:
x = nx
y = ny
# 갈 수 없으면
else:
break
print(ans) | namoo1818/SSAFY_Algorithm_Study | 이민지/[2주차]구현/4-4.py | 4-4.py | py | 1,471 | python | ko | code | 0 | github-code | 90 |
18454728689 | def func(n):
return 3*n + 1 if n % 2 != 0 else n//2
s = int(input())
inf = 1000000
l = []
l.append(s)
for i in range(1, inf+1):
ai = func(l[i-1])
if ai in l:
print(i+1)
break
else:
l.append(ai) | Aasthaengg/IBMdataset | Python_codes/p03146/s922123630.py | s922123630.py | py | 235 | python | en | code | 0 | github-code | 90 |
32455888167 | import PySimpleGUI as sg
import DataBase
import random
import APIFuntion
run = True
steamKey = ""
bpKey = ""
startingSteamID = ""
maxLevel = 1 # this is the minimum amount to run once anything less will just be pointless
minValue = 0
while run:
layout = [
[sg.Text("Steam API ID:", size=(20, 1)), sg.InputText(steamKey)],
[sg.Text("BP.tf API ID Key:", size=(20, 1)), sg.InputText(bpKey)],
[sg.Text("Starting Steam Profile ID:", size=(20, 1)), sg.InputText()],
[sg.Text("Levels of Search:", size=(20, 1)), sg.InputText(maxLevel)],
[sg.Text("Minimum Value:", size=(20, 1)), sg.InputText(minValue)],
[sg.Submit()]
]
# Create the window
window = sg.Window("TF2 Backpack Finder", layout)
event, values = window.read()
window.close()
try:
maxLevel = int(values[3])
minValue = int(values[4])
except Exception as e:
print("This is supposed to be a number: ", e)
exit()
steamKey = str(values[0]) # string
bpKey = str(values[1]) # String
startingSteamID = str(values[2]) # String # this is curId in the code below
apiClass = APIFuntion.SteamAIP(steamKey, bpKey)
tempList = [] # used when breaking apart larger lists
worthWhile = [] # ids of potential people to add
urlList = [] # ids and worth without the urls yet
dataList = [] # the list that will be added to the database
# ^ id, worth, URL
layout = [
[sg.Text(size=(40, 1), key='-TASK-')],
[sg.Text(size=(45, 1), key='-OUTPUT-')],
[sg.ProgressBar(1, orientation='h', size=(35, 20), key='progress')]
]
window = sg.Window('TF2 Backpack Finder', layout).Finalize()
progress_bar = window.FindElement('progress')
# This loop works by taking a friends list then going through each friend and getting their friend list
# Then it determines if they are worthwhile then adds them to the worth while list
window['-TASK-'].update("Searching For Backpacks:")
levels = 1
curId = startingSteamID # srtarting curid
while levels <= maxLevel:
progress = 0
friendsList = apiClass.getFriendslist(curId)
curId = friendsList[random.randint(0, (len(friendsList) - 1))]
for id in friendsList:
outputTxt = str("Level: " + str(levels) + " | ID: " + str(id))
window['-OUTPUT-'].update(outputTxt)
progress_bar.UpdateBar(progress, len(friendsList))
innerFList = apiClass.getFriendslist(id)
while len(innerFList) > 100:
tempList = innerFList[:100]
innerFList = innerFList[100:]
worthWhile = worthWhile + apiClass.hasWorth(tempList, .04, minValue)
worthWhile = worthWhile + apiClass.hasWorth(innerFList, .04, minValue)
progress += 1
worthWhile = APIFuntion.delDups(worthWhile)
levels += 1
window['-TASK-'].update("Checking Playtime:")
progress = 0
for id in worthWhile:
progress_bar.UpdateBar(progress, len(worthWhile))
outputTxt = str("Profile: " + str(id[0]) + " | " + str(progress + 1) + " Out Of " + str(len(worthWhile)))
window['-OUTPUT-'].update(outputTxt)
if not apiClass.hasPlayed(id[0]):
dataList.append(id)
progress += 1
window['-TASK-'].update("Getting Information For Database:")
progress = 0
for i in dataList:
progress_bar.UpdateBar(progress, len(dataList))
outputTxt = str(str(progress + 1) + " Out Of " + str(len(dataList)))
window['-OUTPUT-'].update(outputTxt)
urlList.append(i[0])
progress = + 1
progress = 0
returnList = []
while len(urlList) > 100:
progress_bar.UpdateBar(progress, (len(urlList) / 100) + 1)
outputTxt = str(str(progress + 1) + " Out Of " + str((len(urlList) / 100) + 1))
window['-OUTPUT-'].update(outputTxt)
tempList = urlList[:100]
urlList = urlList[100:]
returnList = returnList + apiClass.getProfUrl(tempList)
progress += 1
returnList = returnList + apiClass.getProfUrl(urlList)
returnList.sort()
dataList.sort()
progress_bar.UpdateBar((len(urlList) / 100) + 1, (len(urlList) / 100) + 1)
count = 0
for i in dataList:
progress_bar.UpdateBar(count, len(dataList))
outputTxt = str(str(count + 1) + " Out Of " + str(len(dataList)))
window['-OUTPUT-'].update(outputTxt)
i.append(returnList[count][1])
count += 1
window['-TASK-'].update("Adding Information to Database:")
window['-OUTPUT-'].update("adding...")
progress_bar.UpdateBar(0, 1)
# change path
accessDB = DataBase.AccessDB("SteamDB.accdb")
accessDB.insetInto(dataList, 'STable1')
accessDB.close()
progress_bar.UpdateBar(1, 1)
window['-TASK-'].update("Done")
window['-OUTPUT-'].update("Done ")
layout = [
[sg.Text("Done!", size=(40, 1))],
[sg.ProgressBar(1, orientation='h', size=(35, 20), key='progress')],
[sg.Text("Would you like to go again", size=(40, 1))],
[sg.Button('Continue'), sg.Quit()]
]
window = sg.Window('TF2 Backpack Finder', layout).Finalize()
progress_bar = window.FindElement('progress')
progress_bar.UpdateBar(1, 1)
event, values = window.read()
if event == 'Quit':
run = False
| henryphilbrook02/TF2_Backpack_Finder | GUI.py | GUI.py | py | 5,544 | python | en | code | 0 | github-code | 90 |
39056209200 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 10 11:52:57 2023
@author: richardfremgen
"""
from sklearn.naive_bayes import ComplementNB
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from nltk.tokenize import RegexpTokenizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from sklearn.model_selection import cross_val_score
from sklearn.svm import LinearSVC
import pandas as pd
from nltk.stem import WordNetLemmatizer, PorterStemmer, SnowballStemmer, LancasterStemmer
import pickle
import os
import warnings
warnings.filterwarnings('ignore')
os.chdir('/Users/richardfremgen/Documents/Portfolio/Code/Data')
df = pd.read_pickle("./df_clean_75.pkl")
#%% Further Preprocess Text data
def lemmatize_words(text):
""" Lemmatizes a data frame column """
lemmatizer = WordNetLemmatizer()
words = text.split()
words = [lemmatizer.lemmatize(word,pos='v') for word in words]
return (' '.join(words))
def stem_sentences(sentence):
""" Convert sentence to a stem for a data frame column """
porter_stemmer = PorterStemmer()
tokens = sentence.split()
stemmed_tokens = [porter_stemmer.stem(token) for token in tokens]
return (' '.join(stemmed_tokens))
def clean_data2(data):
""" Clean and process data before performing sentiment analysis """
#stop_words = stopwords.words('english')
#data['sentence'] = data['sentence'].apply(lemmatize_words)
#data['sentence'] = data['sentence'].apply(remove_single_letter)
data['sentence'] = data['sentence'].apply(stem_sentences)
return(data)
df = clean_data2(df)
#%% Tune Model Function
def Tune_Model(data, model_type, param_grid, ngram_range = (1,1), tf_idf = False):
""" Find optimal hypermatters """
if tf_idf == True:
# Preprocess data - TF-IDF Approach
tfidf = TfidfVectorizer(ngram_range = ngram_range, binary=True)
text_counts = tfidf.fit_transform(data['sentence'])
else:
# Preprocess data - DTM Matrix
token = RegexpTokenizer(r'[a-zA-Z0-9]+')
cv = CountVectorizer(stop_words='english', ngram_range = ngram_range,
tokenizer = token.tokenize, binary=True)
text_counts = cv.fit_transform(data['sentence'])
# Split into 80-20 train-validation-test sets
X_train, X_test, y_train, y_test = train_test_split(text_counts, data['sentiment'],
test_size=0.2, random_state=123)
tune_model = model_type
clf = GridSearchCV(tune_model, param_grid = param_grid, cv = 10,
scoring='accuracy', n_jobs = -1)
best_clf = clf.fit(X_train,y_train)
print("Tuned Hyperparameters :", best_clf.best_params_)
print("Accuracy :",best_clf.best_score_)
# If you want to return the results from every split
test_df = pd.DataFrame(best_clf.cv_results_)
return(test_df)
#%% Find Optimal Hyperparameters
#Naive Bayes - HP Tuning
param_grid = [{'alpha': [0.00001, 0.0001, 0.001, 0.1, 1, 10, 100, 1000]}]
# Multinomial Naive Bayes
mnb_uni_cv = Tune_Model(df, model_type = MultinomialNB(), param_grid = param_grid, ngram_range = (1,1), tf_idf = False)
mnb_bi_cv = Tune_Model(df, model_type = MultinomialNB(), param_grid = param_grid, ngram_range = (2,2), tf_idf = False)
mnb_combo_cv = Tune_Model(df, model_type = MultinomialNB(), param_grid = param_grid, ngram_range = (1,2), tf_idf = False)
mnb_uni_tf = Tune_Model(df, model_type = MultinomialNB(), param_grid = param_grid, ngram_range = (1,1), tf_idf = True)
mnb_bi_tf = Tune_Model(df, model_type = MultinomialNB(), param_grid = param_grid, ngram_range = (2,2), tf_idf = True)
mnb_combo_tf = Tune_Model(df, model_type = MultinomialNB(), param_grid = param_grid, ngram_range = (1,2), tf_idf = True)
# Complement Naive Bayes
cnb_uni_cv = Tune_Model(df, model_type = ComplementNB(), param_grid = param_grid, ngram_range = (1,1), tf_idf = False)
cnb_bi_cv = Tune_Model(df, model_type = ComplementNB(), param_grid = param_grid, ngram_range = (2,2), tf_idf = False)
cnb_combo_cv = Tune_Model(df, model_type = ComplementNB(), param_grid = param_grid, ngram_range = (1,2), tf_idf = False)
cnb_uni_tf = Tune_Model(df, model_type = ComplementNB(), param_grid = param_grid, ngram_range = (1,1), tf_idf = True)
cnb_bi_tf = Tune_Model(df, model_type = ComplementNB(), param_grid = param_grid, ngram_range = (2,2), tf_idf = True)
cnb_combo_tf = Tune_Model(df, model_type = ComplementNB(), param_grid = param_grid, ngram_range = (1,2), tf_idf = True)
# Linear SVC - HP Tuning
param_grid = [{'C' : [0.01, 0.1, 100, 1000] + list(range(1,20,1))}] # Tune C parameter
lsvc_uni_cv = Tune_Model(df, model_type = LinearSVC(), param_grid = param_grid, ngram_range = (1,1), tf_idf = False)
lsvc_bi_cv = Tune_Model(df, model_type = LinearSVC(), param_grid = param_grid, ngram_range = (2,2), tf_idf = False)
lsvc_combo_cv = Tune_Model(df, model_type = LinearSVC(), param_grid = param_grid, ngram_range = (1,2), tf_idf = False)
lsvc_uni_tf = Tune_Model(df, model_type = LinearSVC(), param_grid = param_grid, ngram_range = (1,1), tf_idf = True)
lsvc_bi_tf = Tune_Model(df, model_type = LinearSVC(), param_grid = param_grid, ngram_range = (2,2), tf_idf = True)
lsvc_combo_tf = Tune_Model(df, model_type = LinearSVC(), param_grid = param_grid, ngram_range = (1,2), tf_idf = True)
# k-NN Parameters
param_grid = [{'n_neighbors' : list(range(1,50)),
'weights' : ['uniform','distance'],
'metric' : ['minkowski','euclidean','manhattan']}]
# k-NN - HP Tuning
knn_uni_cv = Tune_Model(df, model_type = KNeighborsClassifier(), param_grid = param_grid, ngram_range = (1,1), tf_idf = False)
knn_bi_cv = Tune_Model(df, model_type = KNeighborsClassifier(), param_grid = param_grid, ngram_range = (2,2), tf_idf = False)
knn_combo_cv = Tune_Model(df, model_type = KNeighborsClassifier(), param_grid = param_grid, ngram_range = (1,2), tf_idf = False)
knn_uni_tf = Tune_Model(df, model_type = KNeighborsClassifier(), param_grid = param_grid, ngram_range = (1,1), tf_idf = True)
knn_bi_tf = Tune_Model(df, model_type = KNeighborsClassifier(), param_grid = param_grid, ngram_range = (2,2), tf_idf = True)
knn_combo_tf = Tune_Model(df, model_type = KNeighborsClassifier(), param_grid = param_grid, ngram_range = (1,2), tf_idf = True)
#%% Print Hyperparameter Tuning Results
def top_hp(data, name, model = 'knn') :
""" Extract top performing hyperparamter from ML validation sets """
if model == 'knn':
col_save = ['model', 'param_n_neighbors', 'param_weights', 'mean_test_score', 'std_test_score']
best_p = data[data['rank_test_score'] == 1]
best_p['model'] = name
best_p = best_p[col_save]
if model == 'nb':
col_save = ['model', 'param_alpha','mean_test_score', 'std_test_score']
best_p = data[data['rank_test_score'] == 1]
best_p['model'] = name
best_p = best_p[col_save]
if model == 'svm':
col_save = ['model', 'param_C', 'mean_test_score', 'std_test_score']
best_p = data[data['rank_test_score'] == 1]
best_p['model'] = name
best_p = best_p[col_save]
return(best_p)
cv1 = top_hp(lsvc_uni_cv, name = "lsvc_uni_cv", model = "svm")
cv2 = top_hp(lsvc_bi_cv, name = "lsvc_bi_cv", model = "svm")
cv3 = top_hp(lsvc_combo_cv, name = "lsvc_combo_cv", model = "svm")
cv4 = top_hp(lsvc_uni_tf, name = "lsvc_uni_tf", model = "svm")
cv5 = top_hp(lsvc_bi_tf, name = "lsvc_bi_tf", model = "svm")
cv6 = top_hp(lsvc_combo_tf, name = "lsvc_combo_tf", model = "svm")
cv7 = top_hp(knn_uni_cv, name = "knn_uni_cv", model = 'knn')
cv8 = top_hp(knn_bi_cv, name = "knn_bi_cv", model = 'knn')
cv9 = top_hp(knn_combo_cv, name = "knn_combo_cv", model = 'knn')
cv10 = top_hp(knn_uni_tf, name = "knn_uni_tf", model = 'knn')
cv11 = top_hp(knn_bi_tf, name = "knn_bi_tf", model = 'knn')
cv12 = top_hp(knn_combo_tf, name = "knn_combo_tf", model = 'knn')
cv13 = top_hp(mnb_uni_cv, name = "mnb_uni_cv", model = 'nb')
cv14 = top_hp(mnb_bi_cv, name = "mnb_bi_cv", model = 'nb')
cv15 = top_hp(mnb_combo_cv, name = "mnb_combo_cv", model = 'nb')
cv16 = top_hp(mnb_uni_tf, name = "mnb_uni_tf", model = 'nb')
cv17 = top_hp(mnb_bi_tf, name = "mnb_bi_tf", model = 'nb')
cv18 = top_hp(mnb_combo_tf, name = "mnb_combo_tf", model = 'nb')
cv19 = top_hp(cnb_uni_cv, name = "cnb_uni_cv", model = 'nb')
cv20 = top_hp(cnb_bi_cv, name = "cnb_bi_cv", model = 'nb')
cv21 = top_hp(cnb_combo_cv, name = "cnb_combo_cv", model = 'nb')
cv22 = top_hp(cnb_uni_tf, name = "cnb_uni_tf", model = 'nb')
cv23 = top_hp(cnb_bi_tf, name = "cnb_bi_tf", model = 'nb')
cv24 = top_hp(cnb_combo_tf, name = "cnb_combo_tf", model = 'nb')
cv_df_svm = pd.concat([cv1, cv2, cv3, cv4, cv5, cv6], ignore_index=True)
cv_df_knn = pd.concat([cv7, cv8, cv9, cv10, cv11, cv12], ignore_index=True)
cv_df_nb = pd.concat([cv13, cv14, cv15, cv16, cv17, cv18,
cv19, cv20, cv21, cv22, cv23, cv24], ignore_index=True)
del cv1, cv2, cv3, cv4, cv5, cv6, cv7, cv8, cv9, cv10, cv11, cv12
del cv13, cv14, cv15, cv16, cv17, cv18, cv19, cv20, cv21, cv22, cv23, cv24
print("\n")
print("=================== SVM HYPERPARAMTER RESULTS ===================")
print(cv_df_svm)
print("\n")
print("=================== k-NN HYPERPARAMTER RESULTS ===================")
print(cv_df_knn)
print("\n")
print("=================== NB HYPERPARAMTER RESULTS ===================")
print(cv_df_nb)
#%% Save Cross Validation Results and Export to Pickle File
#Save CV Results to one dicitonary
m1_tune = {"mnb_uni_cv" : mnb_uni_cv, "mnb_bi_cv" : mnb_bi_cv, "mnb_combo_cv" : mnb_combo_cv,
"mnb_uni_tf" : mnb_uni_tf, "mnb_bi_tf" : mnb_bi_tf, "mnb_combo_tf" : mnb_combo_tf,
"cnb_uni_cv" : cnb_uni_cv, "cnb_bi_cv" : cnb_bi_cv, "cnb_combo_cv" : cnb_combo_cv,
"cnb_uni_tf" : cnb_uni_tf, "cnb_bi_tf" : cnb_bi_tf, "cnb_combo_tf" : cnb_combo_tf,
"knn_uni_cv" : knn_uni_cv, "knn_bi_cv" : knn_bi_cv, "knn_combo_cv" : knn_combo_cv,
"knn_uni_tf" : knn_uni_tf, "knn_bi_tf" : knn_bi_tf, "knn_combo_tf" : knn_combo_tf,
"lsvc_uni_cv" : lsvc_uni_cv, "lsvc_bi_cv" : lsvc_bi_cv, "lsvc_combo_cv" : lsvc_combo_cv,
"lsvc_uni_tf" : lsvc_uni_tf, "lsvc_bi_tf" : lsvc_bi_tf, "lsvc_combo_tf" : lsvc_combo_tf}
# Export to Pickle File
# file_to_write = open("m1_tune.pkl", "wb")
# pickle.dump(m1_tune, file_to_write)
#%% Load Pickle File - CV Results
# import pickle
# import pandas as pd
# pickle_in = open("m1_tune.pkl","rb")
# df = pickle.load(pickle_in)
# del(pickle_in)
| richfremgen/Fremgen_MSS_Portfolio | Code/3a_m1_tune.py | 3a_m1_tune.py | py | 10,997 | python | en | code | 1 | github-code | 90 |
71726031976 | from flask import Flask
from flask_cors import CORS
import requests
from decouple import config
app = Flask(__name__)
CORS(app)
http_proxy = config('PROXY')
https_proxy = config('PROXY')
url = "https://www.guadeloupe.gouv.fr/booking/create/12828/0"
proxyDict = {
"http": http_proxy,
"https": https_proxy,
}
def main():
try:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36"'}
r = requests.post(url, proxies=proxyDict, headers=headers,data={'condition':'on','nextButton':'Effectuer+une+demande+de+rendez-vous'})
data={"response":r.text}
return data
except:
return {"response":"Error during request"}
@app.route("/")
def hello_world():
return main()
if __name__ == '__main__':
app.run(debug=True) | stevenfeliz/python-flask | app.py | app.py | py | 868 | python | en | code | 0 | github-code | 90 |
28437740865 | # Chapter 19. GAN, Auto-encoder
# 생성적 적대 신경망 (GAN, Generative Adversarial Networks): 가상의 이미지를 만들어내는 알고리즘
# GAN 내부에서 (적대적인) 경합을 진행
# (Ian Goodfellow said) 보다 진짜 같은 가짜를 만들고자 하는 위조지폐범과 진짜 같은 가짜를 판별하고자 하는 경찰의 경합
# 이때 위조지폐범, 즉 가짜를 만들어 내는 파트를 생성자 (Generator)
# (나머지) 경찰, 즉 진위를 가려내는 파트를 판별자 (Discriminator)
# DCGAN (Deep Convolutional GAN): Convolutional + GAN
# 초창기 GAN은 굳이 이미지를 타겟으로 하지 않아서 그랬는지? 아니면 CNN 개념이 나오기 전이라서 그랬는지 Convolutional 계층을 이용하지 않았음. 그래서 DCGAN이 등장하면서 GAN 알고리즘을 확립한 느낌
# 1. 가짜 제조 공장, 생성자
# optimizer X: GAN's Generator에는 학습 결과에 대한 판별이 필요하지 않으므로 최적화하거나 컴파일하는 과정이 없대.
# padding: 입력과 출력의 크기를 맞추기 위해서 패딩은 이용하지만, 같은 이유로 풀링은 이용하지 않음.
# batch normalization: 층이 늘어나도 안정적인 학습을 하기 위해서 다른 층의 전처리로 표준화 과정을 거침.
# activation: 연산 과정에선 relu를 이용하고, 판별자로 주기 전에 크기를 맞추는 과정에선 tanh를 이용해서 [-1, 1]로 맞추기.
from keras.models import Sequential
from keras.layers import Dense, LeakyReLU, BatchNormalization, Reshape, UpSampling2D, Conv2D, Activation
generator = Sequential()
generator.add(Dense(128 * 7 * 7, input_dim=100, activation=LeakyReLU(0.2)))
# 100개가 들어와서 (128 * 7 * 7)의 갯수로 내보내기
# GAN에서 ReLU를 이용하면 학습이 불안정(결과적으로 봤을 때 loss가 튄다든지, 최적화가 안 되고 멈춘다든지)해지는 경우가 많아, 조금 변형한 LeakyReLU를 이용
# LeakyReLU는 ReLU에서 x < 0 => 0이 되어 뉴런들이 일찍 소실되는 단점을 보완하기 위해, 0보다 작으면 들어온 인수(여기서는 0.2)를 곱해 보낸다.
generator.add(BatchNormalization())
generator.add(Reshape((7, 7, 128)))
# tensorflow에서 인식하는 차원은 n, 1D, 2D, color(3D)다.
generator.add(UpSampling2D())
# sub-sampling의 일종으로, (색채) 차원을 제외한 기본 이미지를 2배로 만드는 과정
generator.add(Conv2D(64, kernel_size=5, padding="same"))
generator.add(BatchNormalization())
generator.add(Activation(LeakyReLU(0.2)))
generator.add(UpSampling2D())
generator.add(Conv2D(1, kernel_size=5, padding="same", activation="tanh"))
# 연산으로 (색채) 차원 줄이기
# 사실 UpSmapling + Conv2D => Conv2DTranspose()로 하나로 표현할 수 있다.
# padding="same"으로 입력과 출력의 이미지 크기를 동일하게끔 합니다.
generator.summary()
# 작은 이미지를 늘려서 Convolutional 레이어를 지나치게 하는 것이 DCGAN의 특징.
# 2. 진위를 가려내는 장치, 판별자
# 판별자는 CNN 구조를 그대로 이용합니다. (이미지를 보고 클래스만 맞추면 되니까)
# 이전에 이용했던 CNN을 그대로 이용하지만, 결과적으로 학습해야 하는 건 생성자라 판별자는 학습하지 않는다.
from keras.models import Sequential
from keras.layers import Conv2D, Activation, LeakyReLU, Dropout, Flatten, Dense
discriminator = Sequential()
discriminator.add(Conv2D(64, input_shape=(28, 28, 1), kernel_size=5, strides=2, padding="same"))
# stride는 kernel window를 여러 칸 움직이게 해서 새로운 특징을 뽑아주는 효과가 생긴대.
# local적인 부분을 (약간이지만) 배제하기 때문인 것으로 파악됨.
# 맞았음. Dropout이나 Pooling처럼 새로운 필터를 적용한 효과를 내는 거래.
discriminator.add(Activation(LeakyReLU(0.2)))
discriminator.add(Dropout(0.3))
discriminator.add(Conv2D(128, kernel_size=5, strides=2, padding="same"))
discriminator.add(Activation(LeakyReLU(0.2)))
discriminator.add(Dropout(0.3))
discriminator.add(Flatten())
discriminator.add(Dense(1, activation="sigmoid"))
# 0 ~ 1의 값이어야 하고, 굳이 확률로 바꿀 필요가 없으니 sigmoid. (굳이 한다면 softmax도 가능)
discriminator.compile(loss="binary_crossentropy", optimizer="adam")
discriminator.trainable = False
# 3. 적대적 신경망 연결하기
# 실제 image -> discriminator 가중치 설정
# (->) input -> generator(input) -> discriminator(generator(input))
# 바로 위에 단계를 반복하면서 discriminator의 정답률이 0.5가 되면 학습을 종료시킴.
from keras.models import Input, Model
ginput = Input(shape=(100,))
dis_output = discriminator(generator(ginput))
gan = Model(ginput, dis_output)
gan.compile(loss="binary_crossentropy", optimizer="adam")
# 학습을 진행해줄 함수의 선언
from keras.datasets import mnist
import numpy as np
def gan_train(epoch, batch_size, saving_interval):
# batch_size: 한 번에 몇 개의 실제 이미지와 몇 개의 가상 이미지를 판별자에 넣을 건지.
# 그래서 모델에 2 * batch_size가 들어간다는 얘긴 아니겠지. 각각 batch_size / 2개씩이겠지?
# 답은 두번째였구요.
(X_train, _), (_, _) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype("float32")
X_train = (X_train - 127.5) / 127.5
true = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for i in range(epoch):
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
d_loss_real = discriminator.train_on_batch(imgs, true)
# 딱 한 번 학습을 실시해 모델을 업데이트 개념상의 Gradient Descent다.
noise = np.random.normal(0, 1, (batch_size, 100))
# 그러나 생성자 input은 noise이고, tanh의 결과값에 따라가야 해서 정수가 아니네.
gen_imgs = generator.predict(noise)
d_loss_fake = discriminator.train_on_batch(gen_imgs, fake)
d_loss = np.add(d_loss_real, d_loss_fake) * 0.5
# 진짠데 가짜에 대한 loss와 가짠데 가짜에 대한 loss를 평균내면 판별자의 오차(loss)
g_loss = gan.train_on_batch(noise, true)
# 좋아, 이제 학습을 진행하자.
print(f"epoch: {i}", "d_loss: %.4f" % d_loss, "g_loss: %.4f" % g_loss)
# 근데 이러면 마치 discriminator가 학습을 하지 않는다는 게 아니라 generator랑 같은 속도로 학습하게 하기 위해서 loss 계산 이외에 부분을 switch off 시켰다는 게 더 말이 됨.
# 4. 이미지의 특징을 추출하는 오토인코더
# Auto-Encoder (AE): GAN이 세상에 존재하지 않는 이미지를 만들어내는 거라면, AE는 입력 데이터의 특징을 (효율적으로) 담아낸 이미지를 만들어 냅니다.
# 다시, GAN이 random input에서 가중치를 통해 입력 이미지와 비슷한 형태를 만드는 거라면, AE는 입력 데이터의 특징을 가진 이미지를 나타내는 것.
# 따라서 GAN은 좀 더 명확한 이미지를 만들고, AE는 얼굴이라는 걸 알아볼 수 있을 정도의 특징만 나타내서 해상도가 낮은 것처럼 보일 수 있음.
# 개인적으로 GAN이 있을 법한 입력 데이터를 만드는 거라면, AE는 그림(사물의 특징)을 그리는 거랄까.
# AE: 영상 의학 분야 등 아직 데이터 수가 충분하지 않은 분야에서 사용될 수 있음.
# GAN은 가상의 것이므로 실제 데이터에 의존하는 분야에는 적합하지 않음.
# 확실히 이번 장부터는 다른 머신과 결합된 형태를 보여주고 있음.
# Encoder + Decoder의 형태로 이루어지며
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, UpSampling2D
autoencoder = Sequential()
autoencoder.add(Conv2D(16, input_shape=(28, 28, 1), kernel_size=3, padding="same", activation="relu"))
autoencoder.add(MaxPooling2D(pool_size=2, padding="same"))
# 예상이 맞았다. sub-sampling에서 padding은 큰 의미를 갖지 않을 수도 있다.
autoencoder.add(Conv2D(8, kernel_size=3, padding="same", activation="relu"))
autoencoder.add(MaxPooling2D(pool_size=2, padding="same"))
autoencoder.add(Conv2D(8, kernel_size=3, strides=2, padding="same", activation="relu"))
autoencoder.add(Conv2D(8, kernel_size=3, padding="same", activation="relu"))
autoencoder.add(UpSampling2D())
autoencoder.add(Conv2D(8, kernel_size=3, padding="same", activation="relu"))
autoencoder.add(UpSampling2D())
autoencoder.add(Conv2D(16, kernel_size=3, activation="relu"))
autoencoder.add(UpSampling2D())
autoencoder.add(Conv2D(1, kernel_size=3, padding="same", activation="sigmoid"))
autoencoder.summary()
| Myul23/Deep-Learning-for-everyone | 19. 세상에 없는 얼굴 GAN, 오토인코더/implementation.py | implementation.py | py | 8,829 | python | ko | code | 0 | github-code | 90 |
18235466789 | n=int(input())
s=list(input())
r=[]
g=[]
b=[]
for i in range(n):
if s[i]=='R':
r.append(i)
elif s[i]=='G':
g.append(i)
else:
b.append(i)
import bisect
def binary_search(a, x):
# 数列aのなかにxと等しいものがあるか返す
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return True
else:
return False
ans=0
for i in range(len(r)):
for j in range(len(g)):
p=0
if r[i]<g[j]:
if binary_search(b,g[j]-r[i]+g[j]):
p+=1
if binary_search(b,r[i]+(g[j]-r[i])/2):
p+=1
if binary_search(b,r[i]-(g[j]-r[i])):
p+=1
else:
if binary_search(b,r[i]-g[j]+r[i]):
p+=1
if binary_search(b,g[j]+(r[i]-g[j])/2):
p+=1
if binary_search(b,g[j]-(r[i]-g[j])):
p+=1
ans+=len(b)-p
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02714/s193707958.py | s193707958.py | py | 966 | python | en | code | 0 | github-code | 90 |
34346616387 | import math
def bisection(a,b,f,tolerance=1e-6,max_tolerance=100 ):
for i in range(max_tolerance):
c=(a+b)/2
if abs (f(c))< tolerance:
print(f"Root found at x={c:7f}")
return
elif (f(c)*f(a)) < 0:
max_tolerance=100
b=c
else:
a=c
def f(x):
return 3*x**4+3*x**3-x**2
bisection(-1,1,f)
import math
def newton_raphson(x0,f,df,tolerance= 1e-7,max_tolerance=100):
for i in range(max_tolerance):
fx=f(x0)
dfx=df(x0)
x1=x0 - fx/dfx
if abs(f(x1)) < tolerance:
print(f"Root found at x={x1:6f}")
return
else:
x0=x1
def f(x:int):
return 3*x**4+3*x**3-x**2-19
def df(x:int):
return 12*x**3+9*x**2-2*x
newton_raphson(1,f,df)
import timeit
from timeit import Timer
max_loops=1
bisection_timer= Timer("bisection(-1,1,f)","from __main__ import bisection,f")
bisection_time= bisection_timer.timeit(max_loops)
print("bisection time", bisection_time, "ms")
print()
newton_timer= Timer("newton_raphson(1,f,df)","from __main__ import newton_raphson,f,df")
newton_time= newton_timer.timeit(max_loops)
print("Newton time", newton_time, "ms")
| cmm25/SCIENTIFIC-COMPUTING | assignment.py | assignment.py | py | 1,241 | python | en | code | 0 | github-code | 90 |
34377446470 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 5 11:57:39 2017
@author: Andrei
"""
from gdcb_explore import GDCBExplorer
import pandas as pd
if __name__=="__main__":
gdcb = GDCBExplorer()
df_cars = gdcb.df_cars[["ID"]]
df_codes = gdcb.df_predictors[["Code", "ID"]]
df_cars.columns = ['CarID']
df_codes.columns = ['Code', 'CodeID']
df_codes["key"] = 1
df_cars["key"] = 1
df = pd.merge(df_cars,df_codes, on="key")
df.drop("key", axis = 1, inplace = True)
df["ID"] = list(range(len(df)))
df = df[["ID", "CarID", "Code", "CodeID"]]
#gdcb.sql_eng.OverwriteTable(df, "CarsXCodes") | GoDriveCarBox/GDCB-4E-DEV | WORK/gdcb_explorer/gdcb_loader1.py | gdcb_loader1.py | py | 607 | python | en | code | 0 | github-code | 90 |
2117616246 | from filterpy.kalman import UnscentedKalmanFilter, MerweScaledSigmaPoints
from filterpy.kalman import KalmanFilter
from scipy.spatial import distance
import numpy as np
class Box:
def __init__(self, positions, id=-1):
self.positions = positions
self.id = id
self.time = 0
self.missedTime = 0
self.kf = None
def increase_time(self):
self.time += 1
def update_id(self, new_id):
self.id = new_id
def __str__(self):
return f"""Box(id={self.id},
positions={self.positions},
time={self.time},
missedTime={self.missedTime},
ukf={self.kf})"""
class Tracker:
id_counter = 0
def __init__(self):
self.currBoxes = []
self.preBoxes = []
self.missBoxes = []
def calculate_distance(self, pos1, pos2):
center1 = self.get_center(pos1)
center2 = self.get_center(pos2)
return distance.euclidean(center1, center2)
def get_center(self, positions):
x1, y1, x2, y2 = positions
center_x = (x1 + x2) / 2
center_y = (y1 + y2) / 2
return [center_x, center_y]
def update_box_ids(self):
if not self.preBoxes:
for i in range(len(self.currBoxes)):
self.currBoxes[i].update_id(Tracker.id_counter)
positions = self.get_center( self.currBoxes[i].positions)
# self.currBoxes[i].kf = self.initialize_kalman_filter(positions)
# self.currBoxes[i].kf.update(self.get_center(self.currBoxes[i].positions))
Tracker.id_counter += 1
return
for preBox in self.preBoxes:
matchingIdx = None
min_distance = float('inf')
# dùng khoảng cách euclid ban đầu
# if preBox.time <= 6:
for i, currBox in enumerate(self.currBoxes):
if currBox.id == -1:
distance = self.calculate_distance(currBox.positions, preBox.positions)
if distance < min_distance and distance < 15:
min_distance = distance
matchingIdx = i
# elif preBox.time > 6:
#use UKF
# print("THIS ID PREBOX================>",preBox)
# predicted_position = preBox.kf.predict() # Dự đoán vị trí
# print("RESULT OF KF.PREDICT =======>: ", predicted_position)
# for i, currBox in enumerate(self.currBoxes):
# curr_position = currBox.positions # Vị trí hiện tại của currBox
# distance = self.calculate_distance(predicted_position, curr_position)
# if distance < min_distance and distance < 15:
# min_distance = distance
# matchingIdx = i
if matchingIdx is not None:
self.currBoxes[matchingIdx].update_id(preBox.id)
self.currBoxes[matchingIdx].time = preBox.time + 1
# self.currBoxes[matchingIdx].kf = preBox.kf
# self.currBoxes[matchingIdx].kf.update(self.get_center(self.currBoxes[matchingIdx].positions))
# tạo id cho 1 box được xác định là box mới
for i in range(len(self.currBoxes)):
if self.currBoxes[i].id == -1:
self.currBoxes[i].update_id(Tracker.id_counter)
Tracker.id_counter += 1
positions = self.get_center( self.currBoxes[i].positions)
# self.currBoxes[i].kf = self.initialize_kalman_filter(positions)
# self.currBoxes[i].kf.update(self.get_center(self.currBoxes[i].positions))
def track_boxes(self, positions_list):
self.preBoxes = self.currBoxes.copy()
self.currBoxes = []
for positions in positions_list:
box = Box(positions)
self.currBoxes.append(box)
self.update_box_ids()
self.update_missed_boxes()
self.remove_disappeared_boxes()
for box in self.currBoxes:
box.increase_time()
return self.currBoxes
def update_missed_boxes(self):
for preBox in self.preBoxes:
if preBox.id != -1 and preBox not in self.currBoxes:
preBox.missedTime = 1
self.missBoxes.append(preBox)
def remove_disappeared_boxes(self):
self.currBoxes = [box for box in self.currBoxes if box.id != -1]
for missBox in self.missBoxes:
if missBox.missedTime >= 6:
self.missBoxes.remove(missBox)
def initialize_kalman_filter(self, positions):
kf = KalmanFilter(dim_x=4, dim_z=2)
dt = 0.1
kf.F = np.array([[1, dt, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, dt],
[0, 0, 0, 1]])
kf.H = np.array([[1, 0, 0, 0],
[0, 0, 1, 0]])
kf.Q = np.eye(4) * 0.01
kf.R = np.eye(2) * 1
kf.x = np.array([positions[0], positions[1], 0, 0])
kf.P = np.eye(4) * 10
return kf
def transition_function(self, x, dt):
# Hàm chuyển đổi trạng thái
F = np.array([[1, dt, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, dt],
[0, 0, 0, 1]])
return np.dot(F, x)
def measurement_function(self, x):
# Hàm chuyển đổi đầu ra
return np.array([x[0], x[2]])
def calculate_iou(box1, box2):
x1, y1, x2, y2 = box1
x1_, y1_, x2_, y2_ = box2
area_box1 = (x2 - x1 + 1) * (y2 - y1 + 1)
area_box2 = (x2_ - x1_ + 1) * (y2_ - y1_ + 1)
xA = max(x1, x1_)
yA = max(y1, y1_)
xB = min(x2, x2_)
yB = min(y2, y2_)
inter_area = max(0, xB - xA + 1) * max(0, yB - yA + 1)
iou = inter_area / float(area_box1 + area_box2 - inter_area)
return iou
def non_max_suppression2(boxes, iou_threshold):
if iou_threshold is None:
iou_threshold = 0.5
if len(boxes) == 0:
return []
sorted_boxes = sorted(boxes, key=lambda x: (x[2] - x[0]) * (x[3] - x[1]), reverse=True)
suppressed_boxes = [False] * len(sorted_boxes)
for i in range(len(sorted_boxes)):
if suppressed_boxes[i]:
continue
current_box = sorted_boxes[i]
for j in range(i + 1, len(sorted_boxes)):
if suppressed_boxes[j]:
continue
box = sorted_boxes[j]
iou = calculate_iou(current_box, box)
if iou >= iou_threshold:
suppressed_boxes[j] = True
results = [box for i, box in enumerate(sorted_boxes) if not suppressed_boxes[i]]
return results
| Tox1cCoder/Object-Tracking | tracker2.py | tracker2.py | py | 6,831 | python | en | code | 0 | github-code | 90 |
36338542553 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import time
import random
from tqdm import *
from glob import glob
from collections import defaultdict
import cPickle
from tensorflow.python.platform import gfile
# Special vocabulary symbols - we always put them at the start.
_PAD = "_PAD"
_GO = "_GO"
_UNK = "_UNK"
_START_VOCAB = [_PAD, _GO, _UNK]
PAD_ID = 0
GO_ID = 1
UNK_ID = 2
# Regular expressions used to tokenize.
_WORD_SPLIT = re.compile("([.,!?\"':;)(])")
_DIGIT_RE = re.compile(r"\d")
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(_WORD_SPLIT.split(space_separated_fragment))
return [w for w in words if w]
def dmqa_file_reader(dfile):
with gfile.GFile(dfile, mode="r") as f:
lines = f.read().split("\n\n")
return lines
def load_dataset(data_dir, dataset_name, vocab_size, max_nsteps, part="training"):
data = []
data_path = os.path.join(data_dir, dataset_name, "questions", part)
readed_data_path = os.path.join(data_dir, dataset_name, "%s_v%d_mn%d.pkl" %(part, vocab_size, max_nsteps))
if os.path.exists(readed_data_path):
print("Load data from %s" %(readed_data_path))
data = cPickle.load(open(readed_data_path))
else:
print("Load data from %s" %(data_path))
for fname in tqdm(glob(os.path.join(data_path, "*.question.ids%s" % (vocab_size)))):
try:
tokens = dmqa_file_reader(fname)
# check max_nsteps
d = [int(t) for t in tokens[1].strip().split(' ')]
q = [int(t) for t in tokens[2].strip().split(' ')]
a = [int(tokens[3])]
if len(d) + len(q) < max_nsteps:
data.append((d,q,a))
except Exception as e:
print(" [!] Error occured for %s: %s" % (fname, e))
print("Save data to %s" %(readed_data_path))
cPickle.dump(data, open(readed_data_path, 'w'))
return data
def batch_iter(data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data_size = len(data)
num_batches_per_epoch = int(len(data)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
random.shuffle(data)
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield data[start_index:end_index]
def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,
tokenizer=None, normalize_digits=False):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocabulary_path: path where the vocabulary will be created.
data_path: data file that will be used to create vocabulary.
max_vocabulary_size: limit on the size of the created vocabulary.
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
Edit by Miao
"""
if not gfile.Exists(vocabulary_path):
print("Creating vocabulary %s from data %s" % (vocabulary_path, data_path))
vocab = {}
for fname in tqdm(glob(os.path.join(data_path, "*.question"))):
try:
_, d, q, a, _ = dmqa_file_reader(fname)
context = d + " " + q
tokens = tokenizer(context) if tokenizer else basic_tokenizer(context)
for w in tokens:
word = _DIGIT_RE.sub("0", w) if normalize_digits else w
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
except:
print(" [!] Error occured for %s" % fname)
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
if len(vocab_list) > max_vocabulary_size:
vocab_list = vocab_list[:max_vocabulary_size]
with gfile.GFile(vocabulary_path, mode="w") as vocab_file:
for w in vocab_list:
vocab_file.write(w + "\n")
def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file.
We assume the vocabulary is stored one-item-per-line, so a file:
dog
cat
will result in a vocabulary {"dog": 0, "cat": 1}, and this function will
also return the reversed-vocabulary ["dog", "cat"].
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="r") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip() for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
def sentence_to_token_ids(sentence, vocabulary,
tokenizer=None, normalize_digits=False):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Args:
sentence: the sentence in bytes format to convert to token-ids.
vocabulary: a dictionary mapping tokens to integers.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
Returns:
a list of integers, the token-ids for the sentence.
"""
if tokenizer:
words = tokenizer(sentence)
else:
words = basic_tokenizer(sentence)
if not normalize_digits:
return [vocabulary.get(w, UNK_ID) for w in words]
# Normalize digits by 0 before looking words up in the vocabulary.
return [vocabulary.get(_DIGIT_RE.sub("0", w), UNK_ID) for w in words]
def data_to_token_ids(data_path, target_path, vocab,
tokenizer=None, normalize_digits=False):
"""Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path. See comment
for sentence_to_token_ids on the details of token-ids format.
Args:
data_path: path to the data file in DMQA format.
target_path: path where the file with token-ids will be created.
vocabulary_path: path to the vocabulary file.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(target_path):
try:
results = dmqa_file_reader(data_path)
with gfile.GFile(target_path, mode="w") as target_file:
for i in range(5):
if i == 0 or i == 4:
target_file.write(results[i] + "\n\n")
else:
ids = sentence_to_token_ids(results[i], vocab, tokenizer,
normalize_digits)
target_file.write(" ".join(str(tok) for tok in ids) + "\n\n")
except Exception as e:
print(" [-] %s, %s" % (data_path, e))
def questions_to_token_ids(data_path, vocab_fname, vocab_size):
vocab, _ = initialize_vocabulary(vocab_fname)
for fname in tqdm(glob(os.path.join(data_path, "*.question"))):
data_to_token_ids(fname, fname + ".ids%s" % vocab_size, vocab)
def prepare_data(data_dir, dataset_name, vocab_size):
train_path = os.path.join(data_dir, dataset_name, 'questions', 'training')
validation_path = os.path.join(data_dir, dataset_name, 'questions', 'validation')
test_path = os.path.join(data_dir, dataset_name, 'questions', 'test')
vocab_fname = os.path.join(data_dir, dataset_name, '%s.vocab%s' % (dataset_name, vocab_size))
if not os.path.exists(vocab_fname):
print(" [*] Create vocab from %s to %s ..." % (train_path, vocab_fname))
create_vocabulary(vocab_fname, train_path, vocab_size)
else:
print(" [*] Skip creating vocab")
print(" [*] Convert data in %s into vocab indicies..." % (train_path))
questions_to_token_ids(train_path, vocab_fname, vocab_size)
print(" [*] Convert data in %s into vocab indicies..." % (validation_path))
questions_to_token_ids(validation_path, vocab_fname, vocab_size)
print(" [*] Convert data in %s into vocab indicies..." % (test_path))
questions_to_token_ids(test_path, vocab_fname, vocab_size)
if __name__ == '__main__':
if len(sys.argv) < 3:
print(" [*] usage: python data_utils.py DATA_DIR DATASET_NAME VOCAB_SIZE")
else:
data_dir = sys.argv[1]
dataset_name = sys.argv[2]
if len(sys.argv) > 3:
vocab_size = sys.argv[3]
else:
vocab_size = 100000
prepare_data(data_dir, dataset_name, int(vocab_size))
| limiao06/DMQA | data_utils.py | data_utils.py | py | 9,462 | python | en | code | 0 | github-code | 90 |
36443250959 | import pandas.util.testing as pdt
import pandas as pd
import common
import collections
import airbnb
import os
import glob
def test_airbnb():
# clean the outputs folder first
output_folder = 'tests/outputs/'
expected_output_folder = 'tests/expected_outputs/'
files = glob.glob(output_folder + '/*')
for file in files:
os.remove(file)
try:
# if there is no such a folder there, then create a new folder
os.mkdir(output_folder)
except:
pass
airbnb.main(['--info_str', 'tests/info_str.txt', '--learners', 'xgb', '--session', '--submission', '--input_folder', 'tests/', '--output_folder', 'tests/outputs/'])
expected_output_files = sorted(os.listdir(expected_output_folder))
output_files = sorted(os.listdir(output_folder))
for output_file, expected_output_file in zip(output_files, expected_output_files):
outputs = open(output_folder + output_file)
expected_outputs = open(expected_output_folder + expected_output_file)
for output_line, expected_output_line in zip(outputs, expected_outputs):
if output_line.startswith('time:'):
continue
assert output_line == expected_output_line
| luc14/Kaggle_Airbnb | test_airbnb.py | test_airbnb.py | py | 1,234 | python | en | code | 0 | github-code | 90 |
71164332137 | from sklearn.preprocessing import MinMaxScaler
import os, sys
import json
from concurrent.futures import ThreadPoolExecutor
from functools import partial
sys.path.append(os.path.abspath(os.path.join("..", "..")))
O_scaler = MinMaxScaler(feature_range=(-3.2, 2.3))
C_scaler = MinMaxScaler(feature_range=(-2.5, 2.4))
E_scaler = MinMaxScaler(feature_range=(-3.6, 2.5))
A_scaler = MinMaxScaler(feature_range=(-3.0, 2.1))
N_scaler = MinMaxScaler(feature_range=(-3.0, 2.0))
def ScoreMinMaxScaler(result, type):
score = [
[0],
[1],
[2],
[3],
[4],
[5],
[6],
[7],
[8],
[9],
[10],
[11],
[12],
[13],
[14],
[15],
[16],
[17],
[18],
[19],
[20],
[21],
[22],
[23],
[24],
[25],
[26],
[27],
[28],
[29],
[30],
[31],
[32],
[33],
[34],
[35],
[36],
[37],
[38],
[39],
[40],
]
# Openness Scaler
if type == "Openness":
# O_scaler=MinMaxScaler(feature_range=(-3.2, 2.3))
O_score_data = O_scaler.fit_transform(score)
O_score_scale = O_score_data[result]
return O_score_scale
# Conscientiousness Scaler
if type == "Conscientiousness":
# C_scaler=MinMaxScaler(feature_range=(-2.5, 2.4))
C_score_data = C_scaler.fit_transform(score)
C_score_scale = C_score_data[result]
return C_score_scale
# Extroversion Scaler
if type == "Extroversion":
# E_scaler=MinMaxScaler(feature_range=(-3.6, 2.5))
E_score_data = E_scaler.fit_transform(score)
E_score_scale = E_score_data[result]
return E_score_scale
# Agreeableness Scaler
if type == "Agreeableness":
# A_scaler=MinMaxScaler(feature_range=(-3.0, 2.1))
A_score_data = A_scaler.fit_transform(score)
A_score_scale = A_score_data[result]
return A_score_scale
# Neuroticism Scaler
if type == "Neuroticism":
# N_scaler=MinMaxScaler(feature_range=(-3.0, 2.0))
N_score_data = N_scaler.fit_transform(score)
N_score_scale = N_score_data[result]
return N_score_scale
def BigFiveFormula(calculation):
# Extroversion
E_score = (
20
+ calculation[0]
- calculation[5]
+ calculation[10]
- calculation[15]
+ calculation[20]
- calculation[25]
+ calculation[30]
- calculation[35]
+ calculation[40]
- calculation[45]
)
# E_score_scale = ScoreMinMaxScaler(E_score, "Extroversion")
# Agreeableness
A_score = (
14
- calculation[1]
+ calculation[6]
- calculation[11]
+ calculation[16]
- calculation[21]
+ calculation[26]
- calculation[31]
+ calculation[36]
+ calculation[41]
+ calculation[46]
)
# A_score_scale = ScoreMinMaxScaler(A_score, "Agreeableness")
# Conscientiousness
C_score = (
14
+ calculation[2]
- calculation[7]
+ calculation[12]
- calculation[17]
+ calculation[22]
- calculation[27]
+ calculation[32]
- calculation[37]
+ calculation[42]
+ calculation[47]
)
# C_score_scale = ScoreMinMaxScaler(C_score, "Conscientiousness")
# Neuroticism
N_score = (
38
- calculation[3]
+ calculation[8]
- calculation[13]
+ calculation[18]
- calculation[23]
- calculation[28]
- calculation[33]
- calculation[38]
- calculation[43]
- calculation[48]
)
# N_score_scale = ScoreMinMaxScaler(N_score, "Neuroticism")
# Openness
O_score = (
8
+ calculation[4]
- calculation[9]
+ calculation[14]
- calculation[19]
+ calculation[24]
- calculation[29]
+ calculation[34]
+ calculation[39]
+ calculation[44]
+ calculation[49]
)
# O_score_scale = ScoreMinMaxScaler(O_score, "Openness")
OCEAN_score_scale = []
OCEAN_score_scale.append(E_score)
OCEAN_score_scale.append(A_score)
OCEAN_score_scale.append(C_score)
OCEAN_score_scale.append(N_score)
OCEAN_score_scale.append(O_score)
return OCEAN_score_scale
################################################################################################
def to_result_txt(Result: list, Comment: list, file_result_path: str):
with open(file_result_path, "w", encoding="utf-8") as file:
file.write("Extroversion Score: " + str(int(Result[0][0])) + "/40\n")
file.write(str(Comment[0]) + "\n")
file.write("Agreeableness Score: " + str(int(Result[1][0])) + "/40\n")
file.write(str(Comment[1]) + "\n")
file.write("Conscientiousness Score: " + str(int(Result[2][0])) + "/40\n")
file.write(str(Comment[2]) + "\n")
file.write("Neuroticism Score: " + str(int(Result[3][0])) + "/40\n")
file.write(str(Comment[3]) + "\n")
file.write("Openness to Experience Score: " + str(int(Result[4][0])) + "/40\n")
file.write(str(Comment[4]) + "\n")
# ########
def to_result_json(Result: list, file_result_path: str):
# Prepare the data to be saved as JSON
data = {
"e": int(Result[0]),
# "ec": (Comment[0].replace("Extroversion Comment: ", "")),
"a": int(Result[1]),
# "ac": (Comment[1].replace("Agreeableness Comment: ", "")),
"c": int(Result[2]),
# "cc": (Comment[2].replace("Conscientiousness Comment: ", "")),
"n": int(Result[3]),
# "nc": (Comment[3].replace("Neuroticism Comment: ", "")),
"o": int(Result[4]),
# "oc": (Comment[4].replace("Openness to Experience Comment: ", "")),
}
return data
# =============================================================================
def handle_big_five_audio(session_id: str):
file_qa_path = f"./public/interview/{session_id}/"
with open(file_qa_path + "qa.txt", "r") as file:
# Initialize an empty array
values = []
# Read the file line by line and append each numeric value to the array
for line in file:
numeric_value = int(line.strip()) # Convert line to a float
values.append(numeric_value)
# drawGraph(Avg_Inverse_Result(BigFiveFormula(values)), 1, file_qa_path)
file_result_json_path = file_qa_path + "audio.json"
Result = BigFiveFormula(values)
return to_result_json(Result, file_result_json_path)
| iscv-lab/iscv-machine | tools/big_five/audio.py | audio.py | py | 6,731 | python | en | code | 0 | github-code | 90 |
24340911215 | import time
from bluejay_bonanza_slot_machine_data import\
my_paytable, my_reel_1, my_reel_2, my_reel_3,\
my_symbols_to_unicode, my_virtual_stops
from bandit import Bandit
from random_squence_generator import get_random_sequence
from utils import take_integer_on_input
slot_machine = Bandit(my_reel_1,
my_reel_2,
my_reel_3,
my_paytable,
my_virtual_stops,
my_symbols_to_unicode)
availabble_moves = ('spin', 's','help','exit', 'autoplay', 'cheat spin')
print('WELCOME TO CASINO!')
take_integer_on_input('Make you Deposit(integer value): ')
slot_machine.draw()
while True:
move = input('Make your move:')
while move not in availabble_moves:
print('type \'help\' for help')
move = input('Make your move:')
if move == 'help':
print("""
type 'spin' or 's' to start the round;
type 'exit' to take your money and leave;
type 'autoplay', make computer spin for you every second;
type 'help' for help.
pssst, secret command:'cheat spin'.
""")
if move == 'spin' or move == 's':
sequence = get_random_sequence(128)
slot_machine.make_move(sequence)
if move == 'cheat spin':
slot_machine.make_move([127,127,127])
if move == 'autoplay':
games_number = take_integer_on_input('type number of games you want to autoplay: ')
for counter in range(games_number):
sequence = get_random_sequence(128)
slot_machine.make_move(sequence)
time.sleep(1)
if move =='exit':
break
print(f'You Got ${slot_machine.cash}')
| Japolk/one-armed-bandit | game.py | game.py | py | 1,764 | python | en | code | 0 | github-code | 90 |
32693033108 | import os
from capytaine import *
import capytaine.post_pro
import numpy as np
import logging
import matplotlib.pyplot as plt
os.system('cls')
def plate_flexure_mode_shape(x, y, z):
from math import pi, cos, sin, cosh, sinh
device_height = 10.0
device_width = 6.0
device_thickness = 0.50
base_height = 0.3
depth = -10.0
z_center = depth + base_height + device_height / 2
u = cos(pi * y / device_width) * cos(pi*(z - z_center) / device_height)
v = 0.0
w = 0.0
# x_disp = (z + height) ** 2 / (height ** 2) # Initialize parabolic mode shape for estimates
return (u, v, w)
# Set logger configuration
logging.basicConfig(level=logging.INFO, format="%(levelname)s:\t%(message)s")
# Material parameters
density = 5000
elastic_modulus = 1e6
nu = 0.3
# Create OSWEC mesh
device_height = 10.0
device_width = 6.0
device_thickness = 0.50
base_height = 0.3
depth = -10.0
wave_direction = 0.0
volume = device_width * device_height * device_thickness
mass = density * volume
omega_range = np.linspace(0.1, 5.0, 50)
full_oswec = RectangularParallelepiped(size=(device_thickness, device_width, device_height + 2),
resolution=(4, 40, 32),
center = (0.0, 0.0, depth + base_height + device_height / 2))
dissipation_matrix = np.zeros(shape=(2, 2))
mass_matrix = mass * np.array([[1.0, 0.0], [0.0, 0.25]])
stiffness_matrix = 1e5 * np.eye(N=2)
# Add custom defined pitch axis about constrained axis
pitch_axis = Axis()
pitch_axis.point = np.array([0.0, 0.0, depth + base_height])
pitch_axis.vector = np.array([0.0, 1.0, 0.0])
full_oswec.add_rotation_dof(name='Pitch', axis = pitch_axis)
full_oswec.dofs['Plate Flexure'] = np.array([plate_flexure_mode_shape(x, y, z) for x, y, z, in full_oswec.mesh.faces_centers])
full_oswec.mass = full_oswec.add_dofs_labels_to_matrix(mass_matrix)
full_oswec.dissipation = full_oswec.add_dofs_labels_to_matrix(dissipation_matrix)
full_oswec.hydrostatic_stiffness = full_oswec.add_dofs_labels_to_matrix(stiffness_matrix)
oswec = full_oswec.copy()
oswec.keep_immersed_part(sea_bottom=depth)
full_oswec.show()
oswec.show()
# Animate rigid body pitch DOF along with modal flexure DOF
animation = full_oswec.animate(motion={'Pitch': 0.40, 'Plate Flexure': 1.25}, loop_duration=6.0)
animation.run()
# Problem definition
oswec_problems = [RadiationProblem(body=oswec,sea_bottom=depth,
radiating_dof=dof, omega=omega)
for dof in oswec.dofs
for omega in omega_range]
oswec_problems += [DiffractionProblem(body=oswec, sea_bottom=depth,
omega=omega, wave_direction=wave_direction)
for omega in omega_range]
# Solve for results and assemble data
solver = BEMSolver()
results = [solver.solve(problem) for problem in sorted(oswec_problems)]
data = assemble_dataset(results)
rao_data = capytaine.post_pro.rao(dataset=data, wave_direction=0.0, dissipation=None, stiffness=None)
# Plot results
for dof in full_oswec.dofs:
plt.figure()
plt.plot(
omega_range,
data['added_mass'].sel(radiating_dof=dof, influenced_dof=dof),
label='Added Mass',
marker='o'
)
plt.plot(
omega_range,
data['radiation_damping'].sel(radiating_dof=dof, influenced_dof=dof),
label='Radiation Damping',
marker='o'
)
plt.xlabel('$\omega$')
plt.legend()
plt.title(dof)
#plt.savefig(dof + 'results.png', bbox_inches='tight')
plt.tight_layout()
plt.show()
for dof in full_oswec.dofs:
plt.figure()
plt.plot(
omega_range,
np.abs(rao_data.sel(radiating_dof=dof))
)
plt.xlabel('$\omega$')
plt.ylabel('RAO')
plt.title(dof)
#plt.savefig(dof + 'rao_results.png', bbox_inches='tight')
plt.tight_layout()
plt.show() | berriera/wec_design_optimization | wec_design_optimization/oscillating_surge_device/oscillating_surge_device.py | oscillating_surge_device.py | py | 3,835 | python | en | code | 0 | github-code | 90 |
7937923365 | def Solver():
"""
Input: None.
Output: the shortest path in a reable way.
"""
S = genStates()
G = genGraph(S)
s = "EEEEEEE" #source node
d = "WWWWWWW" #destination node
result = genShortestPath(G,s,d)
husband_wife = [' blue husband',' blue wife',' green husband',' green wife',' red husband',' red wife']
genTrip(result,husband_wife)
def genTrip(result, husband_wife):
"""
Input: p, the shortest path from s to d
Output: print out the solution to the problem
"""
for i in range(1,len(result)):
if result[i][6] == "E":
source = "west"
direction = "east"
else:
source = "east"
direction = "west"
who= []
for j in range(6): #from 0 to 5 referring to 6 ppl
if result[i-1][j] != result[i][j]:
who.append(husband_wife[j])
if len(who) > 1:
print(str(i) +" The" + who[0] + " and" + who[1] +" go"+" from the "+ source +" to the " + direction + ".")
else:
print(str(i) +" The" + who[0] +" goes" +" from the "+ source +" to the " + direction + ".")
def genStates(): #generates all 128 states
"""
Input: None
Output: Return a set of all possible states for the problem
"""
side = ("E", "W")
states = []
for i in side:
for j in side:
for k in side:
for l in side:
for m in side:
for n in side:
for b in side: #b for boat
aState = i + j + k + l + m + n + b
states.append(aState)
return states
def genGraph(S):
"""
Input: S, a set of all possible states
Output: Return a graph connnecting all the legal states in S
"""
G = [] #G is the set that contains all legal states
graph={} #create dictionary for the Graph
for i in range(len(S)):
if isLegal(S[i]) == True:
G.append(S[i])
for i in range(len(G)):
result1 = nextStates(G[i],G)
graph.update({G[i]:result1[1:]}) #add possible states to each legal states, put it in graph
return graph
def isLegal(S):
"""
Input: S, a state
Output: return True if s is legal; otherwise, return False.
"""
if ( S[1] != S[0] and ( S[1] == S[2] or S[1] == S[4])) or ( S[3] != S[2] and ( S[3] == S[0] or S[3] == S[4])) or( S[5] != S[4] and ( S[5] == S[0] or S[5] == S[2])) :
return False
elif (S[0] == S[1] and S[0] == S[2] and S[0] == S[3] and S[0] == S[4] and S[0] == S[5] and S[6] != S[0] ) :
return False
else:
return True
def nextStates(n,R):
"""
Input: n, the starting node (one entity in R); R, the set of legal states
Output: return a set of n's neighboring states in R
"""
possible = [n] # a set of n's possible neighboring states
for i in range(len(R)):
if n[6] =="E" and R[i][6] == "W":
c = 0
valid = 1
for j in range(6):
if n[j] != R[i][j] and n[j] == "E":
c+=1
#continue
elif n[j] != R[i][j] and n[j] == "W":
valid=0
if c >= 1 and c <= 2 and valid !=0:
possible.append(R[i])
continue
elif n[6] == "W" and R[i][6] == "E":
c = 0
valid = 1
for j in range(6):
if n[j] != R[i][j] and n[j] == "W":
c+=1
#continue
elif n[j] != R[i][j] and n[j] == "E":
valid=0
if c >= 1 and c <= 2 and valid !=0:
possible.append(R[i])
return possible
"""
def find_all_paths(graph, start, end, path=[]):
path = path + [start]
if start == end:
return [path]
if start not in graph:
return None
paths = []
for node in graph[start]:
if node not in path:
newpaths = find_all_paths(graph, node, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths
"""
def genShortestPath(graph, start, end, path=[]): #find the shortest path using recursion method
"""
Input:graph, a graph; start, a source node; end, a distination node.
Output: shortest, a path connecting from s to d with minimum distance.
Source: https://www.python.org/doc/essays/graphs/
"""
path = path + [start]
if start == end:
return path
if start not in graph:
return None
shortest = None
for node in graph[start]:
if node not in path:
newpath = genShortestPath(graph, node, end, path)
if newpath:
if not shortest or len(newpath) < len(shortest):
shortest = newpath
return shortest
"""
def find_all_paths(graph, start, end, path=[]):
path = path + [start]
if start == end:
return [path]
if not graph.has_key(start):
return []
paths = []
for node in graph[start]:
if node not in path:
newpaths = find_all_paths(graph, node, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths
def genShortestPath(graph, start, end, path=[]):
path = path + [start] #1. work on the path- a list from start to end
if start == end:
return [path]
if not (start in graph):
return None
shortest = None
paths = []
for node in graph[start]:
if node not in path:
newpath = genShortestPath(graph, node, end, path)
if newpath:
if not shortest or len(newpath) < len(shortest):
shortest = newpath
paths.append(shortest)
elif len(newpath) == len(shortest):
paths.append(newpath)
return paths
"""
Solver()
| shellysolomonwang/Couple-River-crossing-Problem | couple-river-crossing-problem.py | couple-river-crossing-problem.py | py | 6,328 | python | en | code | 0 | github-code | 90 |
27670800500 | #!/usr/bin/env python3
import os
import requests
import aiml
import irc.bot
USERNAME = "FoxyVamp" # Substitute your bot's username
TOKEN = "oauth:obejk6vxzumkae8hhgxpujowx1px3u" # OAUTH token (Get one here: https://twitchapps.com/tmi/)
CHANNEL = "#Foxy_Fury" # Twitch channel to join
STARTUP_FILE = "std-startup.xml"
BOT_PREFIX = ('?', '!')
class ChattyCathy(irc.bot.SingleServerIRCBot):
def __init__(self, username, token, channel):
self.token = token
self.channel = "#" + channel
# Load AIML kernel
self.aiml_kernel = aiml.Kernel()
self.aiml_kernel.learn(STARTUP_FILE)
self.aiml_kernel.respond("LOAD AIML B")
# Create IRC bot connection
server = 'irc.chat.twitch.tv'
port = 667
print("Connecting to {} on port {}...".format(server, port))
irc.bot.SingleServerIRCBot.__init__(self, [(server, port, token)], username, channel)
def on_welcome(self, c, e):
print("Joining " + self.channel)
c.join(self.channel)
def on_pubmsg(self, c, e):
print(e.arguments[0])
# If a chat message starts with an exclamation point, try to run it as a command
if e.arguments[0][:1] in BOT_PREFIX:
cmd = e.arguments[0].split(' ')[0][1:]
print("Received command: " + cmd)
self.do_command(e, cmd)
return
aiml_response = self.aiml_kernel.respond(e.arguments[0])
c.privmsg(self.channel, aiml_response)
def do_command(self, e, cmd):
c = self.connection
# Handle commands here
if cmd == "booty":
c.privmsg(self.channel, "(__)__)")
if __name__ == "__main__":
bot = ChattyCathy(USERNAME, TOKEN, CHANNEL)
bot.start()
| CWing22/Fail1 | cathy/cathytwitch.py | cathytwitch.py | py | 1,747 | python | en | code | 0 | github-code | 90 |
73884510697 | """
script to make postfit comparisons
"""
import ROOT
import os,sys,math
from collections import OrderedDict
import json
import re
import numpy as np
from CMSPLOTS.myFunction import DrawHistos, DrawConfig
ROOT.gROOT.SetBatch(True)
def MakePostPlot(ifilename: str, channel: str, prepost: str, bins: np.array, suffix: str, showpull: bool = False, x_label: str = "", is5TeV: bool = False, startbin: int = 1):
"""
compare the unrolled postfit of data and templates
"""
print("")
print("#"*50)
print("channel:", channel)
print("prepost:", prepost)
print("suffix: ", suffix)
print("ifile: ", ifilename)
ifile = ROOT.TFile(ifilename)
horgdata = ifile.Get("obs")
# get the list of histograms saved in the file
hkeys = ifile.GetListOfKeys()
hkeys = [hkey.GetName() for hkey in hkeys]
hnames_sig = []
hnames_sig_z = []
hnames_qcd = []
for hkey in hkeys:
hkey_str = str(hkey)
if not hkey_str.endswith(prepost):
continue
# w signal
if bool(re.match(r"expproc_\w*plus_sig_\w*fit$", hkey)) or bool(re.match(r"expproc_\w*minus_sig_\w*fit$", hkey)):
hnames_sig.append( hkey )
# z signal
elif bool(re.match(r"expproc_\w*sig_\w*fit$", hkey)):
hnames_sig_z.append( hkey )
# qcd
elif bool(re.match(r"expproc_qcd_\w*fit$", hkey)):
hnames_qcd.append( hkey )
assert len(hnames_sig)>=1, "There should be at least one sig histogram in file: {}".format(ifilename)
print(f"W signals: {hnames_sig}")
print(f"Z signals: {hnames_sig_z}")
print(f"QCD: {hnames_qcd}")
# ewk bkg includes W->tau+nu, z->ll, and diboson process (for w's)
# ewk processes for z's
hnames_ewks = [f"expproc_ewk_{prepost}"]
hnames_ttbar = [f"expproc_tt_{prepost}"]
## read the postfit plots from input file
hexpsig = None
hexpsig_z = None
hexpewk = None
hexpqcd = None
hexpttbar = None
for hkey in hkeys:
if hkey in hnames_sig:
if hexpsig is None:
hexpsig = ifile.Get(hkey)
else:
hexpsig.Add( ifile.Get(hkey) )
if hkey in hnames_sig_z:
if hexpsig_z is None:
hexpsig_z = ifile.Get(hkey)
else:
hexpsig_z.Add( ifile.Get(hkey) )
if hkey in hnames_ewks:
if hexpewk is None:
hexpewk = ifile.Get(hkey)
else:
hexpewk.Add( ifile.Get(hkey) )
if hkey in hnames_ttbar:
if hexpttbar is None:
hexpttbar = ifile.Get(hkey)
else:
hexpttbar.Add( ifile.Get(hkey) )
if hkey in hnames_qcd:
if hexpqcd is None:
hexpqcd = ifile.Get(hkey)
else:
hexpqcd.Add( ifile.Get(hkey) )
# the combined prediction of all processes,
# which should have included the correct total postfit uncertainties
hexpfull = ifile.Get(f"expfull_{prepost}")
# the histograms saved in the root file does not follow the original bining
# recover the original binning
nbins = len(bins) - 1
binnings = (nbins, bins)
bin_width = bins[1] - bins[0]
for ibin in range(nbins):
assert bins[ibin+1] - bins[ibin] == bin_width
#binnings = (newbins.shape[0]-1, newbins)
hdata = ROOT.TH1D("hdata_{}_{}".format( channel, suffix), "hdata_{}_{}".format( channel, suffix), *binnings)
hsig = ROOT.TH1D("hsig_{}_{}".format( channel, suffix), "hsig_{}_{}".format( channel, suffix), *binnings)
hsig_z = ROOT.TH1D("hsig_z_{}_{}".format(channel, suffix), "hsig_z_{}_{}".format(channel, suffix), *binnings)
hewk = ROOT.TH1D("hewk_{}_{}".format( channel, suffix), "hewk_{}_{}".format( channel, suffix), *binnings)
httbar = ROOT.TH1D("httbar_{}_{}".format(channel, suffix), "httbar_{}_{}".format(channel, suffix), *binnings)
hqcd = ROOT.TH1D("hqcd_{}_{}".format( channel, suffix), "hqcd_{}_{}".format( channel, suffix), *binnings)
hratio = ROOT.TH1D("hrato_{}_{}".format( channel, suffix), "hratio_{}_{}".format(channel, suffix), *binnings)
hpull = ROOT.TH1D("hpull_{}_{}".format( channel, suffix), "hpull_{}_{}".format( channel, suffix), *binnings)
for ibin in range(1, nbins + 1):
hdata.SetBinContent(ibin, horgdata.GetBinContent(ibin + startbin-1))
hdata.SetBinError(ibin, horgdata.GetBinError(ibin + startbin-1 ))
if hexpsig:
hsig.SetBinContent(ibin, hexpsig.GetBinContent(ibin + startbin-1))
if hexpsig_z:
hsig_z.SetBinContent(ibin, hexpsig_z.GetBinContent(ibin + startbin-1))
if hexpewk:
hewk.SetBinContent(ibin, hexpewk.GetBinContent(ibin + startbin-1))
if hexpttbar:
httbar.SetBinContent(ibin, hexpttbar.GetBinContent(ibin + startbin-1))
if hexpqcd:
hqcd.SetBinContent(ibin, hexpqcd.GetBinContent(ibin + startbin-1))
hratio.SetBinContent(ibin, hexpfull.GetBinContent(ibin + startbin - 1))
hratio.SetBinError(ibin, hexpfull.GetBinError(ibin + startbin - 1))
diff = horgdata.GetBinContent(ibin + startbin - 1) - hexpfull.GetBinContent(ibin + startbin - 1)
# take the sigma as sqrt(data**2 + templates**2)
# not 100% sure if this is the correct way to calculate pull
sig = math.sqrt(horgdata.GetBinError(ibin + startbin - 1)**2 + hexpfull.GetBinError(ibin + startbin - 1)**2)
hpull.SetBinContent(ibin, diff/(sig+1e-6))
# deal with the uncertainty bar
for ibin in range(1, hratio.GetNbinsX()+1):
val = hratio.GetBinContent(ibin)
err = hratio.GetBinError(ibin)
hratio.SetBinContent(ibin, 1.0)
if val!=0:
hratio.SetBinError(ibin, err/val)
else:
hratio.SetBinError(ibin, 0.)
hsig.SetFillColor(ROOT.TColor.GetColor(222, 90, 106))
hsig.SetLineColor(1)
hsig_z.SetFillColor(ROOT.TColor.GetColor(100, 192, 232))
hsig_z.SetLineColor(1)
hewk.SetFillColor(ROOT.TColor.GetColor("#E1F5A9"))
hewk.SetLineColor(1)
httbar.SetFillColor(ROOT.TColor.GetColor(155, 152, 204))
httbar.SetLineColor(1)
hqcd.SetFillColor(ROOT.TColor.GetColor(250, 202, 255))
hqcd.SetLineColor(1)
nevts = OrderedDict()
nevts['data'] = hdata.Integral()
nevts['sig'] = hsig.Integral()
nevts['sig_z'] = hsig_z.Integral()
nevts['ewk'] = hewk.Integral()
nevts['ttbar'] = httbar.Integral()
nevts['qcd'] = hqcd.Integral()
hdata.SetMarkerStyle(20)
hdata.SetMarkerSize(1)
hdata.SetMarkerColor(1)
hdata.SetLineColor(1)
hdata.Scale(1.0, "width")
hqcd.Scale(1.0, "width")
httbar.Scale(1.0, "width")
hewk.Scale(1.0, "width")
hsig.Scale(1.0, "width")
hsig_z.Scale(1.0, "width")
siglabels = {
"muplus": "W^{+}#rightarrow#mu^{+}#nu",
"muminus": "W^{-}#rightarrow#mu^{-}#bar{#nu}",
"mumu": "Z#rightarrow #mu^{+}#mu^{-}"
# "eplus": "W^{+}#rightarrow e^{+}#nu",
# "eminus": "W^{-}#rightarrow e^{-}#bar{#nu}",
# "ee": "Z#rightarrow e^{+}e^{-}",
}
hs_gmc = ROOT.THStack("hs_stack_{}_{}".format(channel, suffix), "hs_stack")
channel_forlabel = channel.replace("_pfmet", "").replace("_pfmt", "")
labels_mc = []
if nevts['qcd'] > 0.:
hs_gmc.Add(hqcd)
labels_mc.append("QCD")
if nevts['ttbar'] > 0:
hs_gmc.Add(httbar)
labels_mc.append("t#bar{t}")
if nevts['ewk'] > 0:
hs_gmc.Add(hewk)
labels_mc.append("EWK")
if nevts['sig_z'] > 0:
hs_gmc.Add(hsig_z)
channel_z = channel_forlabel if ("plus" not in channel and "minus" not in channel) else channel_forlabel.replace("plus", "").replace("minus", "")*2
labels_mc.append(siglabels[channel_z])
if nevts['sig'] > 0:
hs_gmc.Add(hsig)
labels_mc.append(siglabels[channel_forlabel])
labels_mc.reverse()
if "ee" not in channel and "mumu" not in channel:
# w's
xlabel = None
if "mt" in suffix:
xlabel = "m_{T} (GeV)"
elif "met" in suffix:
xlabel = "MET (GeV])"
outputname = f"{prepost}_w_{channel}_{suffix}"
else:
# z's
xlabel = "m_{ll} (GeV)"
outputname = f"{prepost}_z_{channel}_{suffix}"
if x_label != "":
xlabel = x_label
ymaxs = {
"muplus": 3.5e6,
"muminus": 3.5e6,
"mumu": 1.0e6,
}
ratiopanel_label = None
if "_syst" in suffix:
syst_name = suffix.split("_syst")[-1]
if syst_name == "All":
ratiopanel_label = "Total syst. unc."
else:
ratiopanel_label = f"Syst. unc. ({syst_name})"
yrmin = 0.95
yrmax = 1.05
ypullmin = -3.99
ypullmax = 3.99
if "prefit" in prepost:
yrmin = 0.85
yrmax = 1.15
ypullmin = -9.99
ypullmax = 9.99
drawconfigs = DrawConfig(
xmin = bins.min(),
xmax = bins.max(),
xlabel = xlabel,
ymin = 0,
ymax = ymaxs[channel_forlabel] / bin_width,
ylabel = f"Events / GeV",
outputname = outputname,
noCMS=False,
dology=False,
addOverflow=False,
addUnderflow=False,
yrmin=yrmin,
yrmax=yrmax,
yrlabel = "Obs/Pred"
)
DrawHistos(
[
hdata,
hs_gmc
],
["Observed"]+labels_mc,
drawconfigs.xmin,
drawconfigs.xmax,
drawconfigs.xlabel,
drawconfigs.ymin,
drawconfigs.ymax,
drawconfigs.ylabel,
drawconfigs.outputname,
dology=drawconfigs.dology,
dologx=drawconfigs.dologx,
showratio=drawconfigs.showratio,
yrmax = drawconfigs.yrmax,
yrmin = drawconfigs.yrmin,
yrlabel = drawconfigs.yrlabel,
ypullmin=ypullmin,
ypullmax=ypullmax,
donormalize=drawconfigs.donormalize,
ratiobase=drawconfigs.ratiobase,
legendPos = drawconfigs.legendPos,
redrawihist = drawconfigs.redrawihist,
extraText = drawconfigs.extraText,
noCMS = drawconfigs.noCMS,
addOverflow = drawconfigs.addOverflow,
addUnderflow = drawconfigs.addUnderflow,
nMaxDigits = drawconfigs.nMaxDigits,
hratiopanel=hratio,
ratiopanel_label=ratiopanel_label,
drawoptions=[
'PE X0',
'HIST same'
],
showpull=showpull,
hpulls=[hpull],
W_ref = 600,
is5TeV = False
)
ymaxs_logy = {
"muplus": 3.5e9,
"muminus": 3.5e9,
"mumu": 1.0e9,
}
ymins_logy = {
"muplus": 0.5e3,
"muminus": 0.5e3,
"mumu": 30,
}
drawconfigs = DrawConfig(
xmin = bins.min(),
xmax = bins.max(),
xlabel = xlabel,
ymin = ymins_logy[channel_forlabel],
ymax = ymaxs_logy[channel_forlabel] / bin_width,
ylabel = f"Events / GeV",
outputname = outputname+"_log",
dology=True,
addOverflow=False,
addUnderflow=False,
yrmin=yrmin,
yrmax=yrmax,
yrlabel = "Obs/Pred"
)
DrawHistos(
[
hdata,
hs_gmc
],
["Observed"]+labels_mc,
drawconfigs.xmin,
drawconfigs.xmax,
drawconfigs.xlabel,
drawconfigs.ymin,
drawconfigs.ymax,
drawconfigs.ylabel,
drawconfigs.outputname,
dology=drawconfigs.dology,
dologx=drawconfigs.dologx,
showratio=drawconfigs.showratio,
yrmax = drawconfigs.yrmax,
yrmin = drawconfigs.yrmin,
yrlabel = drawconfigs.yrlabel,
ypullmin=ypullmin,
ypullmax=ypullmax,
donormalize=drawconfigs.donormalize,
ratiobase=drawconfigs.ratiobase,
legendPos = drawconfigs.legendPos,
redrawihist = drawconfigs.redrawihist,
extraText = drawconfigs.extraText,
noCMS = drawconfigs.noCMS,
addOverflow = drawconfigs.addOverflow,
addUnderflow = drawconfigs.addUnderflow,
nMaxDigits = drawconfigs.nMaxDigits,
hratiopanel=hratio,
ratiopanel_label=ratiopanel_label,
drawoptions=[
'PE X0',
'HIST same'
],
showpull=showpull,
hpulls=[hpull],
W_ref = 600,
is5TeV = False
)
return nevts
def GetPOIValue(ifilename, poiname = ""):
"""
return the POI val and error given a postfit root file
"""
f = ROOT.TFile(ifilename)
tree = f.Get("fitresults")
tree.GetEntry(0)
val = getattr(tree, poiname)
err = abs(getattr(tree, poiname+"_err"))
return val, err
def ComparePOIs(vals_x: np.array, vals: list, errs: list, labels: list, colors: list, markers: list, output: str, is5TeV: bool):
"""
compare the POI values with different selections
"""
# print(vals_x)
graphs = []
nvals = len(vals)
width = (vals_x[1]-vals_x[0])
scale = 0.5
for idx in range(nvals):
val = vals[idx]
err = errs[idx]
color = colors[idx]
marker = markers[idx]
g = ROOT.TGraphErrors(len(vals_x), vals_x - scale*width/2. + idx*scale*width/(nvals-1.), val, np.zeros(len(vals_x)), err)
g.SetLineColor(color)
g.SetMarkerColor(color)
g.SetMarkerStyle(markers[idx])
graphs.append(g)
ymin = 0.9
ymax = 1.1
w_var = None
if "mt" in output:
w_var = "m_{T} threshold [GeV]"
elif "met" in output:
w_var = "MET threshold [GeV]"
DrawHistos(graphs, labels, vals_x[0]-width/2., vals_x[-1]+width/2., w_var, ymin, ymax, "POI / POI^{no cut}", output, dology=False, showratio=False, donormalize=False, drawoptions='EP', legendPos = [0.2, 0.7, 0.8, 0.8], noCMS = False, nMaxDigits = 3, legendNCols = 2, is5TeV = is5TeV, legendoptions=["LEP"]*nvals)
def result2json(ifilename: str, poiname: str, ofilename: str, hname: str = "nuisance_impact_mu"):
"""
script to convert the postfit POI and impacts of nuisance parameters
to json file, which will be used to make impact plots later
"""
nameMap = {
"Pol1shape": "QCD_pol1",
"mcScale": "QCD_ScaledMC"
}
def getNuisName(nuis):
result = nuis
for key, val in nameMap.items():
if nuis.endswith(key):
#result = val
result = nuis.replace(key, val)
break
if bool(re.match(r"\w*bin\d+shape", nuis)):
result = ("QCD_" + nuis).replace("shape", "")
return result.replace("lepEta_bin0_WpT_bin0_", "")
ifile = ROOT.TFile(ifilename)
himpact = ifile.Get(hname)
tree = ifile.Get("fitresults")
tree.GetEntry(0)
# find the POI bin for poiname
ibinX = -1
for binX in range(1, himpact.GetNbinsX()+1):
poi = himpact.GetXaxis().GetBinLabel(binX)
if poi == poiname:
ibinX = binX
continue
assert ibinX >=0, "Can not find the POI {} in the postfit file {}. Please check.".format(poiname, ifilename)
results = OrderedDict()
results['POIs'] = []
val = getattr(tree, poiname)
err = abs(getattr(tree, poiname+"_err"))
poi = OrderedDict()
poi['fit'] = [val-err, val, val+err]
poi['name'] = poiname
results['POIs'].append(poi)
results['method'] = 'default'
results['params'] = []
# dump impacts
impacts = OrderedDict()
for ibinY in range(1, himpact.GetNbinsY()+1):
nuis = himpact.GetYaxis().GetBinLabel(ibinY)
impacts[nuis] = himpact.GetBinContent(ibinX, ibinY)
# sort impacts, descending
impacts = OrderedDict(sorted(list(impacts.items()), key=lambda x: abs(x[1]), reverse=True))
pulls = OrderedDict()
for nuis in list(impacts.keys()):
val = getattr(tree, nuis)
err = getattr(tree, nuis+"_err")
err = abs(err)
pulls[nuis] = [val - err, val, val + err]
# save to results
for nuis in list(impacts.keys()):
systematic = OrderedDict()
systematic['fit'] = pulls[nuis]
systematic['groups'] = []
systematic['impact_' + poiname] = impacts[nuis]
systematic['name'] = getNuisName(nuis)
systematic['prefit'] = [-1.0, 0., 1.0]
systematic[poiname] = [poi['fit'][1] - impacts[nuis], poi['fit'][1], poi['fit'][1] + impacts[nuis]]
systematic['type'] = "Gaussian"
# print((getNuisName(nuis), pulls[nuis][1], pulls[nuis][1]-pulls[nuis][0], impacts[nuis]))
results['params'].append(systematic)
with open(ofilename, 'w') as fp:
json.dump(results, fp, indent=2)
def DumpGroupImpacts(ifilename: str, poiname: str, hname = "nuisance_group_impact_mu"):
"""
print out the grouped impacts
"""
val_poi, err_poi = GetPOIValue(ifilename, poiname)
ifile = ROOT.TFile(ifilename)
himpact_grouped = ifile.Get(hname)
# find the POI bin for poiname
ibinX = -1
for binX in range(1, himpact_grouped.GetNbinsX()+1):
poi = himpact_grouped.GetXaxis().GetBinLabel(binX)
if poi == poiname:
ibinX = binX
break
assert ibinX >=0, "Can not find the POI {} in the postfit file {}. Please check.".format(poiname, ifilename)
impacts = OrderedDict()
for ibinY in range(1, himpact_grouped.GetNbinsY()+1):
nuis = himpact_grouped.GetYaxis().GetBinLabel(ibinY)
impacts[nuis] = himpact_grouped.GetBinContent(ibinX, ibinY) * 100.0 / val_poi
stat_unc = impacts["stat"] * val_poi / 100.
lumi_unc = 0.00 * val_poi if "ratio" not in poiname else 0.
print("")
print("#"*50)
# adding BBB unc. to syst, not stats!
err_poi = np.sqrt((impacts["binByBinStat"] / 100)**2 + (err_poi/val_poi)**2) * val_poi
if lumi_unc > 0.:
print(f"{ifilename:50s}|{poiname:30s}| poi = {val_poi:5.5f} +/- {stat_unc:5.5f} (stat) +/- {err_poi:5.5f} (syst) +/- {lumi_unc:5.5f} (lumi)")
else:
print(f"{ifilename:50s}|{poiname:30s}| poi = {val_poi:5.5f} +/- {stat_unc:5.5f} (stat) +/- {err_poi:5.5f} (syst)")
# sort impacts, descending
impacts = OrderedDict(sorted(list(impacts.items()), key=lambda x: abs(x[1]), reverse=True))
print(f"\nPrint grouped nuisance impacts for {poiname} in {ifilename}")
for nuis in list(impacts.keys()):
print(f"{nuis:20}: {impacts[nuis]:.3f}")
print()
return impacts
| KIT-CMS/Z_early_Run3 | SignalFit/modules/postFitScripts.py | postFitScripts.py | py | 18,524 | python | en | code | 0 | github-code | 90 |
19916579515 | '''
@author: diana.kantor
Soil-specific Report functionality. Checks data for soil-specific
indicators such as spikes, jumps, frozen soil flags, and missing
volumetric calculations.
'''
from crn import *
import StandardReport
SPIKE_THRESHOLD = 5.0
stndReport = StandardReport.StandardReport()
class SoilReport:
'''Constructor. Sets global vars to be used throughout program.'''
def __init__(self):
return
'''A mapping of report column names with their descriptions.'''
def getColumnInfo(self):
columns = []
columns.append(('spike', "The number of spikes for this sensor. Values with range, frozen, or door flags are not included as spikes."))
columns.append(('jump', "The number of jumps for this sensor. Values with range, frozen, or door flags are not included as jumps."))
columns.append(('frozen', "The number of values with a frozen flag. Or 'N/A' for non-soil moisture sensors."))
columns.append(('no volumetric', "The number of values for which volumetric was not calculated. Or 'N/A' for non-soil moisture sensors."))
return columns
'''Counts facts with frozen soil bit set in the flag integer.'''
def countFrozenFlags(self, facts):
count = stndReport.countFlagsForType(facts, 16)[0]
if count==0 and len(facts)>0 and not self.isSoilMoistureElement(facts[0].element):
return "N/A"
return count
'''Counts the number of facts with no corresponding calculated
volumetric values. Ignores facts that are not for soil moisture
dielectric elements.'''
def countNoVolumetric(self, facts):
count = 0
for fa in facts:
elem = findElement(fa.elementId)
elemName = elem.name
if not self.isSoilMoistureElement(elem):
return "N/A"
stationId = fa.stationId
datetimeId = fa.datetimeId
volElemName = elemName.replace('M','MV')
volElem = list(elementDao.getElementsByName([volElemName]).values())
volFact = getData(stationId, datetimeId, volElem)
if len(volFact) is 0:
count += 1;
return count;
'''Counts the number of spikes in this collection of facts. A spike
occurs when a value goes far up/down from normal for 1 hour'''
def countSpikes(self, facts):
firstBad = 0
triplets = self.gatherFactGroups(facts, 3)
spikeCount = 0
for trip in triplets:
diff1 = trip[1].value - trip[0].value
diff2 = trip[1].value - trip[2].value
# Check that both changes are greater than the allowed "spike threshold"
if (float(abs(diff1))>SPIKE_THRESHOLD) and (float(abs(diff2))>SPIKE_THRESHOLD):
# If so, it is only a spike if one change is positive and the other is negative.
if(diff1 > 0 and diff2 > 0) or (diff1 < 0 and diff2 < 0):
spikeCount+=1
if firstBad==0: firstBad = trip[1].datetime.datetime0_23
return (spikeCount, firstBad)
'''Counts the number of "jumps" in a set of facts for a station and sensor.
A jump is similar to a spike except that it goes up/down for 2 hours
before returning to normal, rather than for just one hour.'''
def countJumps(self, facts):
firstBad = 0
quadruples = self.gatherFactGroups(facts, 4)
jumpCount = 0
for quad in quadruples:
diff1 = (quad[1].value - quad[0].value)
diff2 = (quad[1].value - quad[2].value)
diff3 = (quad[2].value - quad[3].value)
# Check that the first and last change are greater than the allowed "spike threshold"
# and that the middle change is LESS than the allowed threshold.
if (float(abs(diff1))>SPIKE_THRESHOLD) and (float(abs(diff2))<SPIKE_THRESHOLD) and (float(abs(diff3))>SPIKE_THRESHOLD):
# If so, it is only a jump if the first and last changes are in the opposite direction.
if (diff1 > 0 and diff3 > 0) or (diff1 < 0 and diff3 < 0):
jumpCount+=1
if firstBad==0: firstBad = quad[1].datetime.datetime0_23
return (jumpCount, firstBad)
'''Group facts for a sensor together in each consecutive group of X datetimes.
Used for soil spike test.'''
def gatherFactGroups(self, facts, numInGroup):
factGroupList = []
# Could work without sorting it first, but sort anyway
sorted(facts, key=attrgetter("datetimeId"))
for fa in facts:
# Get each group of size numInGroup, which will be returned as lists of 1-item lists
factGroup = [facts.forDatetime(fa.datetimeId-idx) for idx in range(numInGroup)]
if any(not fact for fact in factGroup):
continue
# Now that we only have lists with all elNum facts, turn each 1-item fact
# list into a fact
factGroup = [fact[0] for fact in factGroup]
# Remove any groups of elNum facts that have any flagged facts.
if any(self.isFlaggedOtherThanSensor(fact) for fact in factGroup):
continue
#print "range: %s-%s elems: %s %s" % (dt-1,dt+2,smel,stel)
factGroupList.append(factGroup)
return factGroupList
'''For a fact, determines if it has any range, door, frozen flags or any
other flag that is not a bad sensor flag. Since this application is intended
to determine independently whether the sensor is "bad" we do not want to
take into account any previous determination that it is bad.'''
def isFlaggedOtherThanSensor(self, fact):
return (fact.flag > 0) and (fact.flag != 32)
'''Determines if an element represents a soil moisture dielectric sensor.'''
def isSoilMoistureElement(self, elem):
elemName = elem.name
regexMatch = re.match("SM[123][(005)|(010)|(020)|(050)|(100)]",elemName)
if regexMatch is not None:
return True
return False
# END
| eggsyntax/crnscript | src/sensorreport/SoilReport.py | SoilReport.py | py | 6,260 | python | en | code | 0 | github-code | 90 |
18069458819 | # https://atcoder.jp/contests/agc002/tasks/agc002_b
n, m = map(int, input().split())
xy = []
for _ in range(m):
x, y = map(int, input().split())
xy.append((x - 1, y - 1))
box = [0] * n
box[0] = 1
num = [1] * n
for x, y in xy:
if box[x]:
box[y] |= 1
num[x] -= 1
num[y] += 1
if not num[x]:
box[x] = 0
ans = 0
for i in range(n):
if box[i] and num[i]:
ans += 1
print(ans) | Aasthaengg/IBMdataset | Python_codes/p04034/s631155548.py | s631155548.py | py | 422 | python | en | code | 0 | github-code | 90 |
17932575479 | # https://atcoder.jp/contests/abc079/tasks/abc079_d
# ワーシャルフロイド
h, w = map(int, input().split())
edge = [[] for _ in range(10)]
for i in range(10):
edge[i] = list(map(int, input().split()))
num = [list(map(int, input().split())) for _ in range(h)]
for k in range(10):
for i in range(10):
for j in range(10):
# iからjへの最短距離
# 負の辺がある場合 つっこむとかなり遅くなる
# if edge[i][k] != float('inf') and edge[k][j] != float('inf'):
edge[i][j] = min(edge[i][j], edge[i][k] + edge[k][j])
ans = 0
for i in num:
for j in i:
if j == 1 or j == -1:
continue
else:
ans += edge[j][1]
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03546/s995868052.py | s995868052.py | py | 752 | python | en | code | 0 | github-code | 90 |
16845667446 | import cv2
from pathlib import Path
cwd = Path.cwd()/'trident/scripts'
IMAGE_PATH = str(cwd/'rockfish.jpg')
mlDir = cwd/'tf_files'
def get_image():
img = cv2.imread(IMAGE_PATH)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img | jonnyk20/trident | trident/scripts/image_tools.py | image_tools.py | py | 245 | python | en | code | 0 | github-code | 90 |
34537232549 | import collections
from typing import List
'''
当合并的条件有很多的时候,注意反过来思考,不要一味的找合并条件
'''
class Solution:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
ans =[]
intervals=sorted(intervals)
for interval in intervals:
# 如果列表为空,或者当前区间与上一区间不重合,直接添加
if not ans or ans[-1][1] < interval[0]:
ans.append(interval)
else:
# 否则的话,我们就可以与上一区间进行合并
ans[-1][1] = max(ans[-1][1], interval[1])
return ans
if __name__ == '__main__':
intervals =[[1,4],[0,4]]
# intervals=sorted(intervals)
# print(intervals)
test = Solution()
print(test.merge(intervals))
| zhengyaoyaoyao/leetcodePython | leetcode/medium/56. 合并区间.py | 56. 合并区间.py | py | 839 | python | zh | code | 0 | github-code | 90 |
10950408791 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from model.queues import queues, display_name_for_queue
from model.workitems import WorkItems
from model import queuestatus
class QueueStatus(webapp.RequestHandler):
def _rows_for_work_items(self, work_items):
if not work_items:
return []
rows = []
for item_id in work_items.item_ids:
rows.append({
"attachment_id": item_id,
"bug_id": 1,
})
return rows
def get(self, queue_name):
work_items = WorkItems.all().filter("queue_name =", queue_name).get()
statuses = queuestatus.QueueStatus.all().filter("queue_name =", queue_name).order("-date").fetch(15)
status_groups_by_patch_id = {}
status_groups = []
synthetic_patch_id_counter = 0
for status in statuses:
if status.active_patch_id:
patch_id = status.active_patch_id
else:
patch_id = 'synthetic-%d' % synthetic_patch_id_counter
synthetic_patch_id_counter += 1
if patch_id not in status_groups_by_patch_id:
new_status_group = []
status_groups_by_patch_id[patch_id] = new_status_group
status_groups.append(new_status_group)
status_groups_by_patch_id[patch_id].append(status)
template_values = {
"display_queue_name": display_name_for_queue(queue_name),
"work_item_rows": self._rows_for_work_items(work_items),
"status_groups": status_groups,
}
self.response.out.write(template.render("templates/queuestatus.html", template_values))
| blackberry/WebKit-Smartphone | webkit/WebKitTools/QueueStatusServer/handlers/queuestatus.py | queuestatus.py | py | 3,344 | python | en | code | 13 | github-code | 90 |
18558288309 | def main():
N, M = (int(_) for _ in input().split())
if N > 1 and M > 1:
N = max(0, N-2)
M = max(0, M-2)
print(N * M)
else:
if N * M == 1:
print(1)
else:
print(max(max(N, M)-2, 0))
return
if __name__ == '__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p03417/s243447212.py | s243447212.py | py | 264 | python | en | code | 0 | github-code | 90 |
20840306772 | # Tool = "BESCOM ElecMeter"
# HandcraftedBy : "Atharvan Technoligical Development Center (ATDC)"\
# Web : www.atharvantechsys.com
# Version = "1.4"
# LastModifiedOn : "5th April 2022"
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import Resources
import copy
import requests
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from threading import*
import os
import sys
import time
import datetime
from PyQt5.QtWidgets import *
from PyQt5 import QtGui
MMPrev = [11111, 0, 0, 0, 0]
S1Prev = [11111, 0, 0, 0, 0]
S2Prev = [11111, 0, 0, 0, 0]
def showUserInfo(message):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Information)
msgBox.setText(message)
msgBox.setWindowTitle("Status Update")
msgBox.setStandardButtons(QMessageBox.Ok)
msgBox.show()
returnValue = msgBox.exec()
if returnValue == QMessageBox.Ok: pass
else: pass
#msgBox.buttonClicked.connect(msgButtonClick)
#returnValue = msgBox.exec()
class Label():
def __init__(self , text):
self.Label = QLabel(text)
self.Label.setFixedSize(80, 40)
self.Label.setFont(QFont('Times', 14))
#self.Label.setStyleSheet("border: 1px solid dodgerblue;")
self.Label.setAlignment(Qt.AlignCenter)
class LCDDisplay():
def __init__(self , value):
self.LCD = QLCDNumber()
self.LCD.setStyleSheet("color: rgb(20, 114, 175)")
self.LCD.setStyleSheet("background-color: #C5D6D0")
self.LCD.setFont(QFont('Times', 14))
self.LCD.setFixedSize(80, 40)
self.LCD.display(str(value))
class TaskControlBtn:
def __init__(self, Cord_X, Cord_Y, Iconname):
self.button = QPushButton(MainWindowGUI)
self.button.move(Cord_X, Cord_Y)
self.button.resize(37, 37)
self.button.show()
# Acquire relative paths of files
def resource_path(relative_path):
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
#IconFilepath = resource_path(":/resources/AI_Volved.ico")
IconFilepath = ":/resources/electricity_13643.ico"
def GenerateLog():
ServerGETUrl = " https://api.thingspeak.com/channels/1664584/feeds.json"
recievedData = requests.get(url=ServerGETUrl, verify=False)
data = recievedData.json()
Log = []
for DataField in data["feeds"]:
F1 = DataField["field1"]
if F1 is not None:
field = F1
else: field = "Invalid Data"
data = "Updated: " + str(DataField["created_at"]) + " UTC, Data: " + field
Log.append(data)
LogFile = open("Log_BESCOMElecMTR.txt", "w+")
for index in range(len(Log)):
LogFile.write(Log[index] + "\n")
LogFile.close()
showUserInfo("Log File has been successfully Generated.")
def updatefields(MM,S1,S2):
global MOverallHealth_Label
def ERRnoERRARb(data,LCDObj):
if data=="1":
LCDObj.LCD.setStyleSheet("background-color: #F5C6BE")
return "1"
else :
LCDObj.LCD.setStyleSheet("background-color: #C5D6D0")
return "0"
MM_R.LCD.display(ERRnoERRARb(MM[0][0],MM_R))
MM_B.LCD.display(ERRnoERRARb(MM[0][1],MM_B))
MM_Y.LCD.display(ERRnoERRARb(MM[0][2],MM_Y))
MM_N.LCD.display(ERRnoERRARb(MM[0][3],MM_N))
MM_G.LCD.display(ERRnoERRARb(MM[0][4],MM_G))
MM_Phase_R.LCD.display(MM[1])
MM_Phase_Y.LCD.display(MM[2])
MM_Phase_B.LCD.display(MM[3])
S1_R.LCD.display(ERRnoERRARb(S1[0][0],S1_R))
S1_B.LCD.display(ERRnoERRARb(S1[0][1],S1_B))
S1_Y.LCD.display(ERRnoERRARb(S1[0][2],S1_Y))
S1_N.LCD.display(ERRnoERRARb(S1[0][3],S1_N))
S1_G.LCD.display(ERRnoERRARb(S1[0][4],S1_G))
S1_Phase_R.LCD.display(S1[1])
S1_Phase_Y.LCD.display(S1[2])
S1_Phase_B.LCD.display(S1[3])
S2_R.LCD.display(ERRnoERRARb(S2[0][0],S2_R))
S2_B.LCD.display(ERRnoERRARb(S2[0][1],S2_B))
S2_Y.LCD.display(ERRnoERRARb(S2[0][2],S2_Y))
S2_N.LCD.display(ERRnoERRARb(S2[0][3],S2_N))
S2_G.LCD.display(ERRnoERRARb(S2[0][4],S2_G))
S2_Phase_R.LCD.display(S2[1])
S2_Phase_Y.LCD.display(S2[2])
S2_Phase_B.LCD.display(S2[3])
MCCBstatus = "NA"
S1status = "NA"
S2status = "NA"
errFlag = 0
if (MM[4] == "1") :
MCCBstatus = "MCCB : ON"
else :
MCCBstatus = "MCCB : OFF"
errFlag = 1
if (S1[4] == "0") :
S1status = "S1 COMM : OK"
else:
S1status = "S1 COMM : NOT OK"
errFlag= 1
if (S2[4] == "0"):
S2status = "S2 COMM : OK"
else:
S2status = "S2 COMM : NOT OK"
errFlag = 1
if errFlag :
MOverallHealth_Label.Label.setStyleSheet("QLabel {color : red; }");
else: MOverallHealth_Label.Label.setStyleSheet("QLabel {color : green; }");
now = datetime.datetime.now()
current_time = now.strftime("%H:%M:%S:%f")
StatusUpdate = "Live Status:"+"\n\n"+MCCBstatus +"\n"+ S1status +"\n"+ S2status + "\n\n"+"Last synched Time: "+current_time
MOverallHealth_Label.Label.setText(StatusUpdate)
def ArbitrateTask():
try:
if not Task1.is_alive():
Task1.start()
else: showUserInfo("Server Synch already initiated")
except Exception as error : showUserInfo(error)
DataLogTable = {}
def StartOperation():
global StartOperationBtn,DataLogTable,MMPrev,S1Prev,S2Prev
StartOperationBtn.setStyleSheet(
"QPushButton {border: 1px blue;border-radius: 5px; background-color: green; color : white;}""QPushButton::hover"
"{"
"background-color : #228B22;"
"}")
StartOperationBtn.setText("Connected")
while(1):
ServerGETUrl = " https://api.thingspeak.com/channels/1664584/feeds.json"
recievedData = requests.get(url=ServerGETUrl, verify=False)
data = recievedData.json()
#print(data)
LastUpdatedData = data["feeds"][-1]["field1"].split("$")[1:]
def LogTablUpdate():
global DataLogTable,MMPrev,S1Prev,S2Prev
DataLogTable.clear()
for DataField in data["feeds"]:
#print(DataField)
MMData=str(DataField["field1"]).strip().split("$")[1]
S1Data=str(DataField["field1"]).strip().split("$")[2]
S2Data=str(DataField["field1"]).strip().split("$")[3]
MMData = MMData[3:len(MMData)-1]
S1Data = S1Data[3:len(S1Data)-1]
S2Data = S2Data[3:]
DataLogTable[DataField["created_at"].replace("Z","").replace("T"," ")] = [MMData,S1Data,S2Data]
#print(DataLogTable)
DataLogTableTimeStamps = list(DataLogTable.keys())
if len(DataLogTableTimeStamps) > 5:
DataLogTableTimeStamps = list(DataLogTable.keys())[-5:]
for row in range(len(DataLogTableTimeStamps)) :
TimeInIST = datetime.datetime.strptime(str(DataLogTableTimeStamps[row]),'%Y-%m-%d %H:%M:%S') \
+ datetime.timedelta(hours=5,minutes=30)
LogTable.setItem(row, 0, QTableWidgetItem(str(TimeInIST)))
LogTable.setItem(row, 1, QTableWidgetItem(DataLogTable[DataLogTableTimeStamps[row]][0]))
LogTable.setItem(row, 2, QTableWidgetItem(DataLogTable[DataLogTableTimeStamps[row]][1]))
LogTable.setItem(row, 3, QTableWidgetItem(DataLogTable[DataLogTableTimeStamps[row]][2]))
LogTable.update()
try:
if data["feeds"][-1]["field1"] is not None:
MM = LastUpdatedData[0].split(",")[1:]
S1 = LastUpdatedData[1].split(",")[1:]
S2 = LastUpdatedData[2].split(",")[1:]
else :
MM = [11111,0,0,0,0]
S1 = [11111,0,0,0,0]
S2 = [11111,0,0,0,0]
except :
MM = [11111, 0, 0, 0, 0]
S1 = [11111, 0, 0, 0, 0]
S2 = [11111, 0, 0, 0, 0]
try:
if(MM!=MMPrev or S1!=S1Prev or S2!=S2Prev):
updatefields(MM, S1, S2)
MMPrev = copy.deepcopy(MM)
S1Prev = copy.deepcopy(S1)
S2Prev = copy.deepcopy(S2)
except Exception as error:
print(error)
continue
LogTablUpdate()
MainWindowGUI.update()
time.sleep(3)
Task1 = Thread(target=StartOperation)
if __name__ == "__main__":
Aplication = QApplication(sys.argv)
MainWindowGUI = QWidget()
MainWindowGUI.setFixedSize(1366, 768)
MainWindowGUI.setWindowTitle('BESCOM ElecMeter')
MainWindowGUI.setStyleSheet("background-color: white;")
MainWindowGUI.setObjectName("MainMenu");
#QString qwidgetStyle = "QWidget#MainMenu {background-image: url(background.jpg);}";
#qwidgetStyle = "QWidget#MainMenu {background-image: url(background.jpg); border: 5px solid rgba(3, 5, 28, 1);}";
MainWindowGUI.setStyleSheet("QWidget#MainMenu{background-image: url(:/resources/Wallpaper.jpg) no-repeat center center fixed;}")
MainWindowGUI.setWindowIcon(QtGui.QIcon(IconFilepath))
Xfactor = -170
Yfactor = 40
#MM_Data_Frame.setStyleSheet("QFrame { background-color: dodgerblue } ");
#MM_Data_Frame.setFrameStyle(QFrame.Panel | QFrame.Raised)
# Label_RBYNG_Frame.setStyleSheet("QFrame { background-color : rgba(255, 255, 255, 10); }")
Label_RBYNG_Frame = QFrame(MainWindowGUI)
Label_RBYNG_Frame.move(500+Xfactor, 10+Yfactor)
Label_RBYNG_Frame.setStyleSheet("background-color: darkgrey")
MM_Data_Frame = QFrame(MainWindowGUI)
MM_Data_Frame.move(500+Xfactor, 110+Yfactor)
S1_Data_Frame = QFrame(MainWindowGUI)
S1_Data_Frame.move(500+Xfactor, 190+Yfactor-30)
S2_Data_Frame = QFrame(MainWindowGUI)
S2_Data_Frame.move(500+Xfactor, 270+Yfactor-60)
MM_PhaseData_Frame = QFrame(MainWindowGUI)
MM_PhaseData_Frame.move(980+Xfactor, 110+Yfactor)
S1_PhaseData_Frame = QFrame(MainWindowGUI)
S1_PhaseData_Frame.move(980+Xfactor, 190+Yfactor-30)
S2_PhaseData_Frame = QFrame(MainWindowGUI)
S2_PhaseData_Frame.move(980+Xfactor, 270+Yfactor-60)
MS_Frame = QFrame(MainWindowGUI)
MS_Frame.move(370+Xfactor, 110 + Yfactor)
Phase_Frame = QFrame(MainWindowGUI)
Phase_Frame.move(980+Xfactor, 10 + Yfactor)
Phase_Frame.setStyleSheet("background-color: darkgrey")
DataLog_Frame = QFrame(MainWindowGUI)
DataLog_Frame.move(1600+Xfactor, 50 + Yfactor)
MOverallHealth_Frame = QFrame(MainWindowGUI)
MOverallHealth_Frame.move(1270+Xfactor, 10 + 380)
MOverallHealth_Label = Label("Live Status : NA")
MOverallHealth_Label.Label.setFont(QFont('Times', 8))
MOverallHealth_Label.Label.setFixedSize(400, 300)
MOverallHealth_Label.Label.setAlignment(Qt.AlignLeft)
MOverallHealth_Frame_DataFramelayout = QHBoxLayout(MOverallHealth_Frame)
MOverallHealth_Frame_DataFramelayout.addWidget(MOverallHealth_Label.Label)
MOverallHealth_Frame_DataFramelayout.setContentsMargins(0, 0, 0, 0)
MM_R_Label = Label("R")
MM_R_Label.Label.setFixedSize(75, 40)
MM_R_Label.Label.setStyleSheet("color : red; ");
MM_B_Label = Label("Y")
MM_B_Label.Label.setFixedSize(75, 40)
MM_B_Label.Label.setStyleSheet("color : yellow; ");
MM_Y_Label = Label("B")
MM_Y_Label.Label.setFixedSize(75, 40)
MM_Y_Label.Label.setStyleSheet("color : blue; ");
MM_N_Label = Label("P")
MM_N_Label.Label.setFixedSize(75, 40)
MM_G_Label = Label("N")
MM_G_Label.Label.setFixedSize(75, 40)
MM_G_Label.Label.setStyleSheet("color : brown; ");
MM_Label = Label("MM")
S1_Label = Label("S1")
S2_Label = Label("S2")
Phase1_Label = Label("ϕR")
Phase1_Label.Label.setFixedSize(80, 60)
Phase2_Label = Label("ϕY")
Phase2_Label.Label.setFixedSize(80, 60)
Phase3_Label = Label("ϕB")
Phase3_Label.Label.setFixedSize(80, 60)
MM_R = LCDDisplay("0")
MM_B = LCDDisplay("0")
MM_Y = LCDDisplay("0")
MM_N = LCDDisplay("0")
MM_G = LCDDisplay("0")
MM_Phase_R = LCDDisplay("0")
MM_Phase_Y = LCDDisplay("0")
MM_Phase_B = LCDDisplay("0")
S1_R = LCDDisplay("0")
S1_B = LCDDisplay("0")
S1_Y = LCDDisplay("0")
S1_N = LCDDisplay("0")
S1_G = LCDDisplay("0")
S1_Phase_R = LCDDisplay("0")
S1_Phase_Y = LCDDisplay("0")
S1_Phase_B = LCDDisplay("0")
S2_R = LCDDisplay("0")
S2_B = LCDDisplay("0")
S2_Y = LCDDisplay("0")
S2_N = LCDDisplay("0")
S2_G = LCDDisplay("0")
S2_Phase_R = LCDDisplay("0")
S2_Phase_Y = LCDDisplay("0")
S2_Phase_B = LCDDisplay("0")
#MainWindowGUI.horizontalGroupBox = QGroupBox("MM")
MM_Data_Framelayout = QHBoxLayout(MM_Data_Frame)
MM_Data_Framelayout.addWidget(MM_R.LCD)
MM_Data_Framelayout.addWidget(MM_B.LCD)
MM_Data_Framelayout.addWidget(MM_Y.LCD)
MM_Data_Framelayout.addWidget(MM_N.LCD)
MM_Data_Framelayout.addWidget(MM_G.LCD)
MM_Data_Frame.setLayout(MM_Data_Framelayout)
MM_Data_Framelayout.setContentsMargins(0, 0, 0, 0)
S1_Data_Framelayout = QHBoxLayout(S1_Data_Frame)
S1_Data_Framelayout.addWidget(S1_R.LCD)
S1_Data_Framelayout.addWidget(S1_B.LCD)
S1_Data_Framelayout.addWidget(S1_Y.LCD)
S1_Data_Framelayout.addWidget(S1_N.LCD)
S1_Data_Framelayout.addWidget(S1_G.LCD)
S1_Data_Frame.setLayout(S1_Data_Framelayout)
S1_Data_Framelayout.setContentsMargins(0, 0, 0, 0)
S2_Data_Framelayout = QHBoxLayout(S2_Data_Frame)
S2_Data_Framelayout.addWidget(S2_R.LCD)
S2_Data_Framelayout.addWidget(S2_B.LCD)
S2_Data_Framelayout.addWidget(S2_Y.LCD)
S2_Data_Framelayout.addWidget(S2_N.LCD)
S2_Data_Framelayout.addWidget(S2_G.LCD)
S2_Data_Frame.setLayout(S2_Data_Framelayout)
S2_Data_Framelayout.setContentsMargins(0, 0, 0, 0)
MS_Framelayout = QVBoxLayout(MS_Frame)
MS_Framelayout.addWidget(MM_Label.Label)
MS_Framelayout.addWidget(S1_Label.Label)
MS_Framelayout.addWidget(S2_Label.Label)
MS_Framelayout.setContentsMargins(0, 0, 0, 0)
layout = QHBoxLayout(Label_RBYNG_Frame)
layout.addWidget(MM_R_Label.Label)
layout.addWidget(MM_B_Label.Label)
layout.addWidget(MM_Y_Label.Label)
layout.addWidget(MM_N_Label.Label)
layout.addWidget(MM_G_Label.Label)
Label_RBYNG_Frame.setLayout(layout)
MS_Framelayout = QHBoxLayout(Phase_Frame)
MS_Framelayout.addWidget(Phase1_Label.Label)
MS_Framelayout.addWidget(Phase2_Label.Label)
MS_Framelayout.addWidget(Phase3_Label.Label)
MS_Framelayout.setContentsMargins(0, 0, 0, 0)
MS_DataFramelayout = QHBoxLayout(MM_PhaseData_Frame)
MS_DataFramelayout.addWidget(MM_Phase_R.LCD)
MS_DataFramelayout.addWidget(MM_Phase_Y.LCD)
MS_DataFramelayout.addWidget(MM_Phase_B.LCD)
MS_DataFramelayout.setContentsMargins(0, 0, 0, 0)
MS_DataFramelayout = QHBoxLayout(S1_PhaseData_Frame)
MS_DataFramelayout.addWidget(S1_Phase_R.LCD)
MS_DataFramelayout.addWidget(S1_Phase_Y.LCD)
MS_DataFramelayout.addWidget(S1_Phase_B.LCD)
MS_DataFramelayout.setContentsMargins(0, 0, 0, 0)
MS_DataFramelayout = QHBoxLayout(S2_PhaseData_Frame)
MS_DataFramelayout.addWidget(S2_Phase_R.LCD)
MS_DataFramelayout.addWidget(S2_Phase_Y.LCD)
MS_DataFramelayout.addWidget(S2_Phase_B.LCD)
MS_DataFramelayout.setContentsMargins(0, 0, 0, 0)
StartOperationBtn = QPushButton(MainWindowGUI)
StartOperationBtn.setText('Connect')
StartOperationBtn.move(1350+Xfactor, 10 + Yfactor)
StartOperationBtn.resize(140, 50)
StartOperationBtn.setStyleSheet(
"QPushButton {border: 1px blue;border-radius: 5px; background-color: #075691; color : white;}""QPushButton::hover"
"{"
"background-color : #1a85b4;"
"}")
StartOperationBtn.show()
StartOperationBtn.clicked.connect(ArbitrateTask)
SaveLog = QPushButton(MainWindowGUI)
SaveLog.setText('Save Log')
SaveLog.move(1350+Xfactor, 10 + Yfactor+60)
SaveLog.resize(140, 50)
SaveLog.setStyleSheet(
"QPushButton {border: 1px blue;border-radius: 5px; background-color: #075691; color : white;}""QPushButton::hover"
"{"
"background-color : #1a85b4;"
"}")
SaveLog.show()
SaveLog.clicked.connect(GenerateLog)
LogTable = QTableWidget(MainWindowGUI)
LogTable.setRowCount(20)
LogTable.setColumnCount(4)
LogTable.setFixedSize(740,250)
LogTable.setStyleSheet("background-color: lightgrey")
LogTable.move(500+Xfactor, 380)
TimeLable = QTableWidgetItem("Time stamp (IST)")
MMDataLable = QTableWidgetItem("MM Data")
S1DataLable = QTableWidgetItem("S1 Data")
S2DataLable = QTableWidgetItem("S2 Data")
LogTable.setHorizontalHeaderItem(0, TimeLable)
LogTable.setHorizontalHeaderItem(1, MMDataLable)
LogTable.setHorizontalHeaderItem(2, S1DataLable)
LogTable.setHorizontalHeaderItem(3, S2DataLable)
#MM_N.LCD.display("99")
MainWindowGUI.showMaximized()
sys.exit(Aplication.exec_()) | arun5k1095/BESCOMElecMeter | BESCOMElecMeter.py | BESCOMElecMeter.py | py | 17,528 | python | en | code | 0 | github-code | 90 |
71605065898 | from IPython.core.display import display, HTML
from IPython.core.magic import register_line_magic, register_line_cell_magic
@register_line_magic
def bokehlab(line):
"""
Magic equivalent to %load_ext bokehlab. Injects keywords like 'plot'
into global namespace.
"""
from bokehlab import CONFIG, load_config, RESOURCE_MODES
load_config()
parts = line.split()
verbose = False
if '-v' in parts:
parts.remove('-v')
verbose = True
if '--verbose' in parts:
parts.remove('-v')
verbose = True
line = ' '.join(parts)
if line in RESOURCE_MODES:
CONFIG['resources'] = {'mode': line}
elif line:
print(f'Unknown resources mode: "{line}". Available modes: {RESOURCE_MODES}')
if verbose:
print('Using', CONFIG.get('resources', {}).get('mode'), 'resources')
ip = get_ipython()
if 'bokehlab' not in ip.extension_manager.loaded:
ip.run_line_magic('load_ext', 'bokehlab')
else:
display(HTML('<div class="bk-root">BokehJS already loaded, reloading...</div>'))
ip.run_line_magic('reload_ext', 'bokehlab')
@register_line_cell_magic
def bokehlab_config(line, cell=None):
'''
Configure bokehlab. Syntax:
1) %bokehlab_config [-g/--global] key=value [key1=value1 [...]]
-g or --global saves config to ~/.bokeh/bokehlab.yaml
For example,
%bokehlab_config figure.width=500 figure.height=200
2) %bokehlab_config [-g/--global] -d/--delete key [key1 [...]]
deletes the corresponding keys
3) %bokehlab_config without arguments displays current config
4) %bokehlab --clear deletes ~/.bokeh/bokehlab.yaml
'''
from bokehlab.config import configure
configure(line, cell)
@register_line_cell_magic
def blc(line, cell=None):
return bokehlab_config(line, cell)
| axil/bokehlab | bokehlab/bokehlab_magic.py | bokehlab_magic.py | py | 1,900 | python | en | code | 1 | github-code | 90 |
29737866354 | # imports libraries
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
# reads & describes data from files
X = pd.read_csv('Xdata.csv')
y = pd.read_csv('Ydata.csv')
print(X.describe())
# drops the column name 'Date' from the dataset
Xdrop = X.drop('Years',1)
ydrop = y.drop('Years',1)
# reads data used for predictions
PredictX = pd.read_csv('PredictXv2.csv')
PredictXdrop = PredictX.drop('Years',1)
# algorithm
rf_model_on_full_data = RandomForestRegressor(random_state=1)
rf_model_on_full_data.fit(Xdrop, ydrop)
# ML model is used to make predictions
test_preds = rf_model_on_full_data.predict(PredictXdrop)
output = pd.DataFrame({'Years': PredictX.Years, 'Color' : test_preds})
print(output)
output.to_csv('Predictions.csv', index=False)
# yellow if eagles win
# orange if chiefs win | ishaaty/superbowl23 | predictions.py | predictions.py | py | 815 | python | en | code | 0 | github-code | 90 |
21967978898 | """
This is a mock CLI for sending requests for benchmarks.
"""
import time
import pprint as pp
from digi.util import patch_spec, get_spec
room_gvr = ("bench.digi.dev", "v1", "rooms", "room-test", "default")
measure_gvr = ("bench.digi.dev", "v1", "measures", "measure-test", "default")
ROOM_ORIG_INTENT = 0.8
ROOM_INTENT = 0.1
ROOM_STATUS = 0.1
LAMP_INTENT = 0.1
LAMP_STATUS = 0.1
measure = None
def send_request(auri, s: dict):
global measure
resp, e = patch_spec(*auri, s)
if e is not None:
print(f"bench: encountered error {e} \n {resp}")
exit()
def benchmark_room_lamp(root_intent=ROOM_INTENT, skip_result=False):
global measure
measure = dict()
measure = {
"start": time.time(),
# "request": None,
# "forward_root": None,
# "backward_root": None,
"forward_leaf": None,
"backward_leaf": None,
}
send_request(room_gvr, {
"control": {
"brightness": {
"intent": root_intent
}
}
})
if skip_result:
return {}
# wait until results are ready
while True:
if all(v is not None and v > 0 for v in measure.values()):
break
measure_spec, _, _ = get_spec(*measure_gvr)
measure.update(measure_spec["obs"])
now = time.time()
pp.pprint(measure)
# post proc
return {
"ttf": now - measure["start"],
"fpt": measure["forward_leaf"] - measure["start"],
"bpt": now - measure["backward_leaf"],
"dt": measure["backward_leaf"] - measure["forward_leaf"],
}
def reset():
global measure
measure = None
send_request(measure_gvr, {
"obs": {
# "forward_root": -1,
# "backward_root": -1,
"forward_leaf": -1,
"backward_leaf": -1,
}
})
if __name__ == '__main__':
# warm-up
benchmark_room_lamp(root_intent=0.5, skip_result=True)
print("warmed up")
time.sleep(5)
reset()
time.sleep(5)
result = benchmark_room_lamp(root_intent=ROOM_INTENT)
pp.pprint(result)
| digi-project/dspace | benchmarks/room_lamp.py | room_lamp.py | py | 2,122 | python | en | code | 11 | github-code | 90 |
18252943719 | import math
def py():
print("Yes")
def pn():
print("No")
def iin():
x = int(input())
return x
neko = 0
nya = 0
nuko = 0
h,w = map(int,input().split())
neko = h%2
nya = w%2
nuko = h * w /2
if neko + nya == 2:
nuko = nuko + 1
if (h == 1)or(w == 1):
nuko = 1
print(int(nuko)) | Aasthaengg/IBMdataset | Python_codes/p02742/s861006122.py | s861006122.py | py | 297 | python | en | code | 0 | github-code | 90 |
41169890155 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 4 05:11:04 2020
@author: donbo
"""
# %% imports
import importlib
import jax
import jax.numpy as jnp
import cyipopt as cy
from cyipopt import minimize_ipopt
# import numpy as jnp
# from scipy.optimize import minimize
# this next line is CRUCIAL or we will lose precision
from jax.config import config; config.update("jax_enable_x64", True)
from timeit import default_timer as timer
from collections import namedtuple
import src.utilities as ut
import src.functions_geoweight_poisson as fgp
# %% reimports
importlib.reload(fgp)
# %% option defaults
user_defaults = {
'scaling': True,
'scale_goal': 10.0, # this is an important parameter!
'init_beta': 0.5,
'objgoal': 100,
'quiet': True}
ipopts = {
'print_level': 0,
'file_print_level': 5,
'max_iter': 100,
'linear_solver': 'ma86',
'print_user_options': 'yes'
}
options_defaults = {**ipopts, **user_defaults}
# options_defaults = {**solver_defaults, **user_defaults}
# %% problem class
class ipprob:
def __init__(self, f, g, h, quiet=True):
self.f = f
self.g = g
self.h = h
self.quiet = quiet
def objective(self, x):
"""Returns the scalar value of the objective given x."""
return self.f(x)
def gradient(self, x):
"""Returns the gradient of the objective with respect to x."""
return self.g(x)
def hessian(self, x, lagrange, obj_factor):
H = self.h(x)
return obj_factor*H
def intermediate(
self,
alg_mod,
iter_count,
obj_value,
inf_pr,
inf_du,
mu,
d_norm,
regularization_size,
alpha_du,
alpha_pr,
ls_trials
):
if(not self.quiet):
if iter_count <= 10 or (iter_count % 10) == 0:
print(f'{"":5} {iter_count:5d} {"":10} {obj_value:8.4e} {"":10} {inf_pr:8.4e}')
# %% poisson - the primary function
def poisson(wh, xmat, geotargets, options=None):
a = timer()
options_all = options_defaults.copy()
options_all.update(options)
opts = ut.dict_nt(options_all) # convert dict to named tuple for ease of use
if opts.scaling:
xmat, geotargets, scale_factors = fgp.scale_problem(xmat, geotargets, opts.scale_goal)
betavec0 = jnp.full(geotargets.size, opts.init_beta) # 1e-13 or 1e-12 seems best
dw = fgp.jax_get_diff_weights(geotargets)
# jax_sspd = jax.jit(jax_sspd)
ljax_sspd = lambda bvec: fgp.jax_sspd(bvec, wh, xmat, geotargets, dw)
ljax_sspd = jax.jit(ljax_sspd)
g = jax.grad(ljax_sspd)
g = jax.jit(g)
h = jax.hessian(ljax_sspd)
h = jax.jit(h)
nlp = cy.Problem(
n=len(betavec0),
m=0,
problem_obj=ipprob(ljax_sspd, g, h, opts.quiet))
for option, value in opts.ipopts.items():
nlp.add_option(option, value)
x, result = nlp.solve(betavec0)
# cyipopt.Problem.jacobian() and cyipopt.Problem.hessian() methods should return the non-zero values
# of the respective matrices as flattened arrays. The hessian should return a flattened lower
# triangular matrix. The Jacobian and Hessian can be dense or sparse
# cyipopt.minimize_ipopt(fun, x0, args=(), kwargs=None, method=None, jac=None,
# hess=None, hessp=None, bounds=None, constraints=(), tol=None, callback=None, options=None)[source]¶
# result = minimize_ipopt(ljax_sspd, betavec0, jac=g, options=opts.ipopts)
# get return values
beta_opt = x.reshape(geotargets.shape)
whs_opt = fgp.get_whs_logs(beta_opt, wh, xmat, geotargets)
geotargets_opt = jnp.dot(whs_opt.T, xmat)
if opts.scaling:
geotargets_opt = jnp.multiply(geotargets_opt, scale_factors)
b = timer()
print(f'\n Elapsed seconds: {b - a: 9.2f}')
# create a named tuple of items to return
fields = ('elapsed_seconds',
'whs_opt',
'geotargets_opt',
'beta_opt',
'result')
Result = namedtuple('Result', fields, defaults=(None,) * len(fields))
res = Result(elapsed_seconds=b - a,
whs_opt=whs_opt,
geotargets_opt=geotargets_opt,
beta_opt=beta_opt,
result=result)
return res
| donboyd5/weighting | src/geoweight_poisson_ipopt.py | geoweight_poisson_ipopt.py | py | 4,354 | python | en | code | 0 | github-code | 90 |
18280153069 |
import operator
class SegmentTree:
def __init__(self, size, fn=operator.add, default=None, initial_values=None):
"""
:param int size:
:param callable fn: 区間に適用する関数。引数を 2 つ取る。min, max, operator.xor など
:param default:
:param list initial_values:
"""
default = default or 0
# size 以上である最小の 2 冪を size とする
n = 1
while n < size:
n *= 2
self._size = n
self._fn = fn
self._tree = [default] * (self._size * 2 - 1)
if initial_values:
i = self._size - 1
for v in initial_values:
self._tree[i] = v
i += 1
i = self._size - 2
while i >= 0:
self._tree[i] = self._fn(self._tree[i * 2 + 1], self._tree[i * 2 + 2])
i -= 1
def set(self, i, value):
"""
i 番目に value を設定
:param int i:
:param value:
:return:
"""
x = self._size - 1 + i
self._tree[x] = value
while x > 0:
x = (x - 1) // 2
self._tree[x] = self._fn(self._tree[x * 2 + 1], self._tree[x * 2 + 2])
def add(self, i, value):
"""
もとの i 番目と value に fn を適用したものを i 番目に設定
:param int i:
:param value:
:return:
"""
x = self._size - 1 + i
self.set(i, self._fn(self._tree[x], value))
def get(self, from_i, to_i=None, k=0, L=None, r=None):
"""
[from_i, to_i) に fn を適用した結果を返す
:param int from_i:
:param int to_i:
:param int k: self._tree[k] が、[L, r) に fn を適用した結果を持つ
:param int L:
:param int r:
:return:
"""
if to_i is None:
return self._tree[self._size - 1 + from_i]
L = 0 if L is None else L
r = self._size if r is None else r
if from_i <= L and r <= to_i:
return self._tree[k]
if to_i <= L or r <= from_i:
return None
ret_L = self.get(from_i, to_i, k * 2 + 1, L, (L + r) // 2)
ret_r = self.get(from_i, to_i, k * 2 + 2, (L + r) // 2, r)
if ret_L is None:
return ret_r
if ret_r is None:
return ret_L
return self._fn(ret_L, ret_r)
def __len__(self):
return self._size
from bisect import bisect_right
def resolve():
N, D, A = map(int, input().split())
AB = [list(map(int, input().split())) for _ in range(N)]
AB.sort()
X, _ = zip(*AB)
D = 2*D
seg = SegmentTree(N+10)
ans = 0
for i, (x, h) in enumerate(AB):
h = -(-h // A)
damage = seg.get(0, i+1)
if h < damage:
continue
ans += h - damage
seg.add(i, h-damage)
seg.add(bisect_right(X, x + D), - h + damage)
print(ans)
if __name__ == "__main__":
resolve()
| Aasthaengg/IBMdataset | Python_codes/p02788/s554925231.py | s554925231.py | py | 3,045 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.