seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
38722920889 |
import csv, os
os.chdir(r'') # set working directory, store files for analysis (the Fishbook script outputs) in this folder
outputFile = open(r'', 'w', newline='') # set output file name and path
outputWriter = csv.writer(outputFile)
for csvFilename in os.listdir('.'):
listLaneNo = ['Lane Number']
listSocialScore = ['Social Score']
if not csvFilename.endswith('.csv'):
continue # skip non-csv files
csvFileObj = open(csvFilename)
readerObj = csv.reader(csvFileObj)
Data = list(readerObj)
for i in range(0, len(Data), 5):
listLaneNo.append(Data[i][0])
for i in range(1, len(Data), 5):
listSocialScore.append(Data[i][1])
csvFileObj.close()
outputWriter.writerow([str(csvFilename)])
outputWriter.writerow(listLaneNo)
outputWriter.writerow(listSocialScore)
outputWriter.writerow(['************************'])
outputFile.close()
| yijie-geng/Fishbook | Fishbook assay/poolSocialScore.py | poolSocialScore.py | py | 1,027 | python | en | code | 0 | github-code | 90 |
17039855860 | t = int(input())
for _ in range(t):
n = list(input())
onecount = 0
zerocount = 0
for i in n:
if i == '1':
onecount = onecount + 1
elif i == '0':
zerocount = zerocount + 1
if onecount == 1 or zerocount == 1:
print('Yes')
else:
print('No')
| rajujha373/Codechef | PRACTICE/beginners/LONGSEQ.py | LONGSEQ.py | py | 272 | python | en | code | 1 | github-code | 90 |
23046728081 | '''
902. Numbers At Most N Given Digit Set
Hard
Given an array of digits, you can write numbers using each digits[i] as many times as we want. For example, if digits = ['1','3','5'], we may write numbers such as '13', '551', and '1351315'.
Return the number of positive integers that can be generated that are less than or equal to a given integer n.
Example 1:
Input: digits = ["1","3","5","7"], n = 100
Output: 20
Explanation:
The 20 numbers that can be written are:
1, 3, 5, 7, 11, 13, 15, 17, 31, 33, 35, 37, 51, 53, 55, 57, 71, 73, 75, 77.
https://leetcode.com/problems/numbers-at-most-n-given-digit-set/
'''
class Solution:
def atMostNGivenDigitSet(self, digits: List[str], n: int) -> int:
s = str(n)
dp = [0] * len(s) + [1]
for i in range(len(s)-1, -1 , -1):
for digit in digits:
if digit < s[i]:
dp[i] += len(digits) ** (len(s)-i-1)
elif digit == s[i]:
dp[i] += dp[i+1]
return dp[0] + sum(len(digits) ** i for i in range(1, len(s)))
| aditya-doshatti/Leetcode | umbers_at_most_n_given_digit_set_902.py | umbers_at_most_n_given_digit_set_902.py | py | 1,071 | python | en | code | 0 | github-code | 90 |
72207967978 | # -*- coding: utf-8 -*-
# @Time : 2019/10/23 0023 10:47
# @Author : 没有蜡笔的小新
# @E-mail : sqw123az@sina.com
# @FileName: N-Queens II.py
# @Software: PyCharm
# @Blog :https://blog.csdn.net/Asunqingwen
# @GitHub :https://github.com/Asunqingwen
"""
The n-queens puzzle is the problem of placing n queens on an n×n chessboard such that no two queens attack each other.
Given an integer n, return the number of distinct solutions to the n-queens puzzle.
Example:
Input: 4
Output: 2
Explanation: There are two distinct solutions to the 4-queens puzzle as shown below.
[
[".Q..", // Solution 1
"...Q",
"Q...",
"..Q."],
["..Q.", // Solution 2
"Q...",
"...Q",
".Q.."]
]
"""
def totalNQueens(n: int) -> int:
def helper():
nonlocal res
row = len(queens)
if row == n:
res += 1
for col in range(n):
if col not in queens and xy_main[row - col] and xy_para[row + col]:
queens.append(col)
xy_main[row - col], xy_para[row + col] = 0, 0
helper()
queens.pop()
xy_main[row - col], xy_para[row + col] = 1, 1
queens = []
xy_main = [1] * (2 * n - 1) # 主对角线
xy_para = [1] * (2 * n - 1) # 副对角线
res = 0
helper()
return res
if __name__ == '__main__':
n = 4
result = totalNQueens(n)
print(result)
| Asunqingwen/LeetCode | hard/N-Queens II.py | N-Queens II.py | py | 1,297 | python | en | code | 0 | github-code | 90 |
33472245488 | import subprocess
import sys
from contextlib import contextmanager
from pathlib import Path
from typing import Iterator
import pytest
from pkg_resources import Requirement
from pkginfo import Wheel
from vulcan.builder import resolve_deps
from vulcan.isolation import get_executable
@contextmanager
def verbose_called_process_error() -> Iterator[None]:
try:
yield
except subprocess.CalledProcessError as err:
print(err.stdout)
print(err.stderr, file=sys.stderr)
raise
@pytest.fixture
def wheel_pkg_info(test_built_application_wheel: Path) -> Wheel:
return Wheel(str(test_built_application_wheel))
def versions_exist(*versions: str) -> bool:
try:
for v in versions:
get_executable(v)
return True
except FileNotFoundError:
return False
class TestResolveDeps:
"""
These tests are more for the fixtures than for anything in the library itself.
"""
def test_resolve_deps_no_conflict(self, wheel_pkg_info: Wheel) -> None:
reqs = [Requirement.parse(reqstring) for reqstring in wheel_pkg_info.requires_dist]
assert len({req.name for req in reqs}) == len({(req.name, req.specifier) for req in reqs}), \
'duplicate package found in requirements'
@pytest.mark.asyncio
async def test_empty_reqs_empty_deps(self) -> None:
with verbose_called_process_error():
assert await resolve_deps([], {}) == ([], {})
@pytest.mark.asyncio
async def test_empty_base_non_empty_extras_empty_base(self) -> None:
with verbose_called_process_error():
base, extras = await resolve_deps([], {'test': ['requests']})
assert base == []
assert extras
@pytest.mark.asyncio
async def test_non_empty_base_empty_extras_empty_extras(self) -> None:
with verbose_called_process_error():
base, extras = await resolve_deps(['requests'], {})
assert base
assert extras == {}
@pytest.mark.asyncio
async def test_same_reqs_same_deps(self) -> None:
with verbose_called_process_error():
base, extras = await resolve_deps(['requests'], {'test': ['requests']})
# output should be sorted, so it is good to just test equality here
assert base == extras['test']
@pytest.mark.asyncio
async def test_conflicting_deps_raises(self) -> None:
with pytest.raises(subprocess.CalledProcessError):
await resolve_deps(['requests==2.5.0'], {'test': ['requests==2.4.0']})
@pytest.mark.skipif(not versions_exist('3.6', '3.9'), reason='missing python version for test')
@pytest.mark.asyncio
async def test_resolve_different_python_versions(self) -> None:
spec = 'traitlets>=4.0.1,<=5.0.5'
with verbose_called_process_error():
resolved, _ = await resolve_deps([spec], {}, python_version='3.6')
print(resolved)
assert 'traitlets==4.3.3' in resolved
with verbose_called_process_error():
resolved, _ = await resolve_deps([spec], {}, python_version='3.9')
print(resolved)
assert 'traitlets==5.0.5' in resolved
| optiver/vulcan-py | tests/cli/test_builder.py | test_builder.py | py | 3,160 | python | en | code | 10 | github-code | 90 |
38469778439 | from selenium import webdriver
import requests
from bs4 import BeautifulSoup
import yagmail
from datetime import date
import lxml
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")
browser = webdriver.Chrome(options=chrome_options)
try:
today = date.today()
day = today.strftime('%d')
month = str(int(today.month)+1)
if len(month) < 2:
month = str(0) + month
year = today.strftime('%y')
date = today.strftime(year+month+day)
DEPARTURE = 'fra'
DESTINATION = 'kbv'
def flight_checker():
url = f"https://www.skyscanner.de/transport/fluge/fra/kbv/{date}/?adults=1&adultsv2=1&cabinclass=economy&children=0&childrenv2=&destinationentityid=27543110&inboundaltsenabled=true&infants=0&originentityid=27541706&outboundaltsenabled=false&preferdirects=false&ref=home&rtn=0"
browser.get(url)
'''
html = browser.page_source
soup = BeautifulSoup(html)
prices = soup.find_all(class_="BpkText_bpk-text__YWQwM BpkText_bpk-text--lg__ODFjM")
print(prices)
'''
prices = browser.find_element(By.CSS_SELECTOR, 'div span.BpkText_bpk-text__YWQwM.BpkText_bpk-text--lg__ODFjM')
print(prices)
flight_checker()
finally:
browser.quit() | GermanPaul12/Daily-Mail-PythonAnywhere | Files/automating_web.py | automating_web.py | py | 1,516 | python | en | code | 0 | github-code | 90 |
31547321753 | """
Module containing the 'Board' which consists of the piece positions and piece state
"""
# pylint: disable=no-value-for-parameter
# pylint: disable=import-outside-toplevel
import re
import utils
from fen import FENParser, FENBuilder
from settings import FEN_START_STATE, UNICODE_PIECES, FILE_NUMBERS, NOTATION, ALLEGIANCES
from chessboard.players import Players, Human, Computer
from chessboard.pieces import Pieces
from chessboard.move import Move
DECODE_PLAYER = {'h': Human, 'human': Human, 'c': Computer, 'computer': Computer}
class Board():
"""Piece positions and state"""
def __init__(self, start_state=FEN_START_STATE, white='h', black='h'):
self.pieces = Pieces()
self.players = Players(DECODE_PLAYER[white](), DECODE_PLAYER[black]())
self.positions = [None]*64
self.halfmove_clk = None
self.fullmove_num = None
self.last_move_info = {'success': True, 'check_attackers': []}
self.turn_clock = 0
self.reset(start_state=start_state)
self.update()
def reset(self, start_state=None):
"""Empty the board of pieces and state"""
self.positions = [None]*64
self.pieces.reset()
self.players.reset()
self.halfmove_clk = None
self.fullmove_num = None
self.last_move_info = {'success': True, 'check_attackers': []}
if start_state is not None:
self._init_board_from_FEN(start_state)
else:
self._init_board_from_FEN(FEN_START_STATE)
self.update()
@utils.algebraic
def is_occupied(self, idx):
"""Return True if idx is occupied by a piece"""
return self.positions[idx] is not None
@utils.algebraic
def get_piece(self, idx):
"""Return the piece inst by idx or algebraic expression"""
return self.positions[idx]
@utils.algebraic
def get_piece_name(self, idx):
"""Return the piece name by idx or algebraic expression"""
return str(self.positions[idx])
def __str__(self):
return self.to_string(unicode=True, rank_file=True)
def __repr__(self):
return self.to_string(unicode=True, rank_file=True)
def to_string(self, unicode=False, rank_file=False, empty_symbol='-'):
"""Pretty print the positions in a human readable format to the console"""
lines = []
if rank_file:
lines.append(' ' + ' '.join(FILE_NUMBERS.keys()))
for i in range(8):
line = ''
if rank_file:
line += '%d ' % (8-i)
for j in range(8):
p = self.positions[i*8 + j]
if p is not None:
piece = UNICODE_PIECES[p.get_char()] if unicode else p.get_char()
line += piece + ' '*len(empty_symbol)
else:
piece = empty_symbol
line += piece + ' '
lines.append(line)
return '\n'.join(lines)
def _init_board_from_FEN(self, fen):
"""Reset the board and initiliase a game state from an FEN string"""
parser = FENParser(fen)
if parser.is_valid():
parser.parse()
self.halfmove_clk = parser.halfmove_clk
self.fullmove_num = parser.fullmove_num
for piece_dict in parser.record:
piece_inst = self._create_piece(piece_dict)
self.pieces.add(piece_inst)
self.positions[piece_dict['pos_idx']] = piece_inst
self.players.set_current_player(parser.active_allegiance)
def _create_piece(self, piece_dict):
"""Return a piece instance from a name, board index and allegiance"""
piece_module = __import__('%s.%s' % (__name__.split('.')[0],
piece_dict['piece_name']))
piece_class = getattr(piece_module, piece_dict['piece_name'].capitalize())
piece_inst = piece_class(init_position=piece_dict['pos_idx'],
allegiance=piece_dict['allegiance'])
piece_inst.__dict__ = {**piece_inst.__dict__, **piece_dict}
return piece_inst
def _create_promotion_piece(self, promotion_choice):
"""
Initiliase a piece from the promotion choice without a position or allegiance
"""
if promotion_choice is not None:
piece_dict = {}
try:
promotion_choice = NOTATION[promotion_choice.lower()]
except KeyError:
raise utils.PromotionException('Unrecognised choice of piece for '
'promotion %s' % promotion_choice.lower())
if promotion_choice == 'king':
raise utils.PromotionException('Promotion to a king is not allowed')
piece_dict['piece_name'] = promotion_choice
piece_dict['pos_idx'] = None
piece_dict['allegiance'] = None
return self._create_piece(piece_dict)
return None
def get_FEN(self) -> str:
"""Returns the current state of the board as an FEN string"""
return FENBuilder(self.pieces, self.players, self.halfmove_clk, self.fullmove_num).build()
def move(self, move):
"""Do a move 2 turns"""
turns = move.split(' ')
for turn in turns:
self.turn(turn)
@utils.algebraic_move
def turn(self, piece, end_idx, promotion_choice=None):
"""Move the given piece to the given positional index"""
if end_idx not in piece.legal_moves:
raise utils.InvalidMoveException(
'%s is not a legal move for the piece %s' % (utils.idx_to_algebra(end_idx),
piece.get_overview()))
self.pieces.reset_promotion()
promotion_piece = self._create_promotion_piece(promotion_choice)
move = Move(self.positions, piece, end_idx, promotion_piece=promotion_piece)
self.pieces.mask(*move.pieces_to_remove)
self.update()
self.pieces.masked_pieces.clear()
if self.is_in_check(self.players.current_player.allegiance):
self.last_move_info['success'] = False
self.last_move_info['check_attackers'] = self.get_check_attackers(
self.players.current_player.allegiance, pos_idx=True)
move.revert()
self.update()
else:
self.last_move_info['success'] = True
self.last_move_info['check_attackers'] = []
move.do_post_move()
self.pieces.remove(*move.pieces_to_remove)
self.pieces.add(*move.pieces_to_add)
self.pieces.reset_enpassant()
move.check_double_pawn_move()
if move.switch_players:
self.players.switch_player()
if self.players.current_player.allegiance == 'white':
self.fullmove_num += 1
if move.capture or move.pawn_advance:
self.halfmove_clk = 0
else:
self.halfmove_clk += 1
self.update()
self.turn_clock = not self.turn_clock
def do_computer_move(self):
"""If the current player is a computer, execute their move"""
if self.players.current_player.is_human:
print('Current player is Human. This has no effect')
else:
self.update()
self.players.current_player.set_position(self.get_FEN())
best_move = self.players.current_player.get_best_move()
piece_alg, end_alg, promotion_choice = utils.long_algebra_to_move(best_move)
piece = self.get_piece(piece_alg)
end_idx = utils.algebra_to_idx(end_alg)
if not promotion_choice:
promotion_choice = None
self.turn(piece, end_idx, promotion_choice=promotion_choice)
@utils.algebraic
def scout(self, idx):
"""Provide scout report for a piece"""
if not self.is_occupied(idx):
return None
piece = self.get_piece(idx)
return self.pieces.scout_move_pool(piece)
@utils.algebraic
def get_legal_moves(self, idx, pos_idx=False):
"""Given the board state get the legal moves of a piece"""
if not self.is_occupied(idx):
return None
legal_moves = self.get_piece(idx).legal_moves
if pos_idx:
return legal_moves
algs = utils.idx_to_algebra(*legal_moves)
return algs if isinstance(algs, list) else [algs]
def get_check_attackers(self, allegiance, pos_idx=False):
"""
Return a list of piece positions that are currently placing the king
with the given allegiance in check
"""
attackers = []
king_loc = self.pieces.get_king(allegiance).pos_idx
z = ALLEGIANCES.index(allegiance)
for piece in self.pieces.get_pieces_by_allegiance(ALLEGIANCES[not z]):
if king_loc in piece.legal_moves:
attackers.append(piece.pos_idx)
if pos_idx:
return attackers
return utils.idx_to_algebra(*attackers)
def update(self):
"""Update the board elements after a change in positions"""
self.pieces.update(self.positions)
def _update_mate(self):
"""Update the board elements to check for mate"""
self.pieces.update(self.positions, mate=True)
def is_in_check(self, allegiance):
"""Returns whether the king with the given allegiance is in check"""
return self.pieces.get_king(allegiance).in_check
def promote_pawn(self, promotion_choice):
"""Promote a pawn with a selected piece if a pawn needs promoting"""
promotion_piece = self._create_promotion_piece(promotion_choice)
piece_to_be_promoted = self.pieces.get_pawn_promotion_piece()
promotion_piece.pos_idx = piece_to_be_promoted.pos_idx
promotion_piece.allegiance = piece_to_be_promoted.allegiance
promotion_piece.promoted_piece = True
self.pieces.remove(piece_to_be_promoted)
self.pieces.add(promotion_piece)
self.positions[piece_to_be_promoted.pos_idx] = promotion_piece
def check_endgame(self):
"""
This function will check whether we have transitioned to a checkmate or
stalemate state
"""
self._update_mate()
current_player_allegiance = self.players.current_player.allegiance
move_check_status = []
in_check = self.is_in_check(current_player_allegiance)
for piece in self.pieces.get_pieces_by_allegiance(current_player_allegiance):
for legal_move in piece.legal_moves:
move = Move(self.positions, piece, legal_move)
self.pieces.mask(*move.pieces_to_remove)
self._update_mate()
self.pieces.masked_pieces.clear()
move_check_status.append(self.is_in_check(current_player_allegiance))
move.revert()
self._update_mate()
if all(move_check_status):
if in_check:
return 'Checkmate'
return 'Stalemate'
return None
def PGN(self, str_):
"""Do some moves from a PGN string"""
moves = re.split(r'\s?[0-9]+\.', str_)[1:]
for move in moves:
self.move(move)
def gui(self):
"""Start the gui from the board state"""
import gui
gui.run(self)
| rhys-hodio/chess-py | chessboard/board.py | board.py | py | 11,409 | python | en | code | 0 | github-code | 90 |
38992902814 | from __future__ import annotations
import typing
import toolstr
from ctc import binary
from ctc import evm
from ctc import rpc
def get_command_spec():
return {
'f': async_decode_command,
'help': 'decode EVM call data',
'args': [
{'name': 'args', 'nargs': '+'},
],
}
async def async_decode_command(args: typing.Sequence[str]):
if len(args) == 1:
from ctc.protocols import fourbyte_utils
call_data = args[0]
signature = call_data[:10]
result = await fourbyte_utils.async_query_function_signature(signature)
print(result)
return
elif len(args) == 2:
contract_address, call_data = args
contract_abi = await evm.async_get_contract_abi(contract_address=contract_address)
decoded = binary.decode_call_data(
contract_abi=contract_abi, call_data=call_data
)
function_abi = decoded['function_abi']
toolstr.print_text_box('Decoding Call Data')
print('- n_bytes:', len(binary.convert(call_data, 'binary')))
print()
toolstr.print_header('Function Info')
print('- name:', function_abi['name'])
print('- selector:', decoded['function_selector'])
print('- signature:', binary.get_function_signature(function_abi))
print('- inputs:')
for p, parameter in enumerate(function_abi['inputs']):
print(' ', str(p + 1) + '.', parameter['name'], parameter['type'])
print('- outputs:')
for p, parameter in enumerate(function_abi['outputs']):
print(' ', str(p + 1) + '.', parameter['name'], parameter['type'])
print()
toolstr.print_header('Function Parameters')
input_names = binary.get_function_parameter_names(function_abi)
for p, parameter in enumerate(decoded['parameters']):
if isinstance(parameter, tuple):
print(str(p + 1) + '.', str(input_names[p]) + ':')
for subparameter in parameter:
print(' ' + str(subparameter))
else:
print(str(p + 1) + '.', str(input_names[p]) + ':', parameter)
await rpc.async_close_http_session()
| 0xmzz/checkthechain | src/ctc/cli/commands/compute/decode_command.py | decode_command.py | py | 2,131 | python | en | code | null | github-code | 90 |
18318969690 | from datetime import datetime, timedelta
from typing import Optional
# from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from jose import JWTError, jwt
# from pydantic import BaseModel
# from schemas import schemas
# from routes import users
from fastapi import HTTPException, status
SECRET_KEY = "b1ab5323dc7c3807a2788652aaca963fe5d2be8b6d37094f8fdc1d921b6b485a"
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_HOURS = 168
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(hours=ACCESS_TOKEN_EXPIRE_HOURS)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
def verify_token(
token: str,
credentials_exception=HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
),
):
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
print(f"\n\nContents of JWT Token:{payload}\n\n")
email: str = payload.get("sub")
print(f"\n\nContents of email in JWT Token:{email}\n\n")
if email is None:
print("CredentialsException raised from inside 'if email is None'")
raise credentials_exception
return payload
except JWTError:
raise credentials_exception
# user = get_user(fake_users_db, username=token_data.username)
# if user is None:
# raise credentials_exception
# return user
| anuran-roy/c4-backend-project2 | auth/tokengen.py | tokengen.py | py | 1,707 | python | en | code | 0 | github-code | 90 |
4958525091 | # Given a string s, find the longest palindromic substring in s. You may assume that the maximum length of s is 1000.
# Example:
# Input: "babad"
# Output: "bab"
# Note: "aba" is also a valid answer.
# Example:
# Input: "cbbd"
# Output: "bb"
#Manacher's Algo
class Solution:
def longestPalindrome(self, s):
maxLen=1
maxs=s[0]
for i in range(1, len(s)):
left1=i-maxLen if i-maxLen > 0 else 0
left2=i-maxLen-1 if i-maxLen-1>0 else 0
si1=s[left1:i+1]
si2=s[left2:i+1]
if si1 == si1[::-1] and i+1-left1 > maxLen:
maxLen = i+1-left1
maxs=si1
if si2 == si2[::-1] and i+1-left2 > maxLen:
maxLen = i+1-left2
maxs=si2
return maxs
def longestPalingdromeWithDP(self, s):
ls=len(s)
if ls == 0:
return ""
maxl = 1
maxStr = s[0]
for i in range(2*ls-1):
l = 1 if i % 2 == 0 else 0
si, ei = (i+1)//2-1, i//2+1
while ls > ei and si >= 0:
if s[si] == s[ei]:
si-=1
ei+=1
else:
break
if ei - si - 1 > maxl:
maxStr = s[si+1:ei]
maxl = ei - si - 1
return maxStr
s=Solution()
# print(s.longestPalindrome('babad'))
# print(s.longestPalindrome('aa'))
# print(s.longestPalindrome('ab'))
# print(s.longestPalindrome('abadedab'))
# print(s.longestPalingdromeWithDP('babad'))
# print(s.longestPalingdromeWithDP('aa'))
print(s.longestPalingdromeWithDP('a'))
# print(s.longestPalingdromeWithDP('abadedab'))
| boxu0001/practice | py3/dynamicProgramming/S5_LongestParlingdrom.py | S5_LongestParlingdrom.py | py | 1,741 | python | en | code | 0 | github-code | 90 |
18829343314 | #Determinar si un numero es o no es primo
def es_primo(numero):
if numero <= 1:
return False
for i in range(2, int(numero**0.5) + 1):
if numero % i == 0:
return False
return True
def main():
numero = int(input("Ingrese un número entero mayor que 1: "))
if es_primo(numero):
print(f"{numero} es un número primo.")
else:
print(f"{numero} no es un número primo.")
if __name__ == "__main__":
main()
| Kevin-Andres-Garavito/kevin_python | Plan mejoramiento/Ciclos/Ejercico2.py | Ejercico2.py | py | 492 | python | es | code | 0 | github-code | 90 |
360755578 | # -*- coding: utf-8 -*-
""" PolymorphicModel Meta Class
Please see README.rst or DOCS.rst or http://bserve.webhop.org/wiki/django_polymorphic
"""
from django.db import models
from django.db.models.base import ModelBase
from manager import PolymorphicManager
from query import PolymorphicQuerySet
# PolymorphicQuerySet Q objects (and filter()) support these additional key words.
# These are forbidden as field names (a descriptive exception is raised)
POLYMORPHIC_SPECIAL_Q_KWORDS = [ 'instance_of', 'not_instance_of']
###################################################################################
### PolymorphicModel meta class
class PolymorphicModelBase(ModelBase):
"""
Manager inheritance is a pretty complex topic which may need
more thought regarding how this should be handled for polymorphic
models.
In any case, we probably should propagate 'objects' and 'base_objects'
from PolymorphicModel to every subclass. We also want to somehow
inherit/propagate _default_manager as well, as it needs to be polymorphic.
The current implementation below is an experiment to solve this
problem with a very simplistic approach: We unconditionally
inherit/propagate any and all managers (using _copy_to_model),
as long as they are defined on polymorphic models
(the others are left alone).
Like Django ModelBase, we special-case _default_manager:
if there are any user-defined managers, it is set to the first of these.
We also require that _default_manager as well as any user defined
polymorphic managers produce querysets that are derived from
PolymorphicQuerySet.
"""
def __new__(self, model_name, bases, attrs):
#print; print '###', model_name, '- bases:', bases
# create new model
new_class = self.call_superclass_new_method(model_name, bases, attrs)
# check if the model fields are all allowed
self.validate_model_fields(new_class)
# create list of all managers to be inherited from the base classes
inherited_managers = new_class.get_inherited_managers(attrs)
# add the managers to the new model
for source_name, mgr_name, manager in inherited_managers:
#print '** add inherited manager from model %s, manager %s, %s' % (source_name, mgr_name, manager.__class__.__name__)
new_manager = manager._copy_to_model(new_class)
new_class.add_to_class(mgr_name, new_manager)
# get first user defined manager; if there is one, make it the _default_manager
user_manager = self.get_first_user_defined_manager(attrs)
if user_manager:
def_mgr = user_manager._copy_to_model(new_class)
#print '## add default manager', type(def_mgr)
new_class.add_to_class('_default_manager', def_mgr)
new_class._default_manager._inherited = False # the default mgr was defined by the user, not inherited
# validate resulting default manager
self.validate_model_manager(new_class._default_manager, model_name, '_default_manager')
return new_class
def get_inherited_managers(self, attrs):
"""
Return list of all managers to be inherited/propagated from the base classes;
use correct mro, only use managers with _inherited==False,
skip managers that are overwritten by the user with same-named class attributes (in attrs)
"""
add_managers = []; add_managers_keys = set()
for base in self.__mro__[1:]:
if not issubclass(base, models.Model): continue
if not getattr(base, 'polymorphic_model_marker', None): continue # leave managers of non-polym. models alone
for key, manager in base.__dict__.items():
if type(manager) == models.manager.ManagerDescriptor: manager = manager.manager
if not isinstance(manager, models.Manager): continue
if key in attrs: continue
if key in add_managers_keys: continue # manager with that name already added, skip
if manager._inherited: continue # inherited managers have no significance, they are just copies
if isinstance(manager, PolymorphicManager): # validate any inherited polymorphic managers
self.validate_model_manager(manager, self.__name__, key)
add_managers.append((base.__name__, key, manager))
add_managers_keys.add(key)
return add_managers
@classmethod
def get_first_user_defined_manager(self, attrs):
mgr_list = []
for key, val in attrs.items():
if not isinstance(val, models.Manager): continue
mgr_list.append((val.creation_counter, val))
# if there are user defined managers, use first one as _default_manager
if mgr_list: #
_, manager = sorted(mgr_list)[0]
return manager
return None
@classmethod
def call_superclass_new_method(self, model_name, bases, attrs):
"""call __new__ method of super class and return the newly created class.
Also work around a limitation in Django's ModelBase."""
# There seems to be a general limitation in Django's app_label handling
# regarding abstract models (in ModelBase). See issue 1 on github - TODO: propose patch for Django
# We run into this problem if polymorphic.py is located in a top-level directory
# which is directly in the python path. To work around this we temporarily set
# app_label here for PolymorphicModel.
meta = attrs.get('Meta', None)
model_module_name = attrs['__module__']
do_app_label_workaround = (meta
and model_module_name == 'polymorphic'
and model_name == 'PolymorphicModel'
and getattr(meta, 'app_label', None) is None )
if do_app_label_workaround: meta.app_label = 'poly_dummy_app_label'
new_class = super(PolymorphicModelBase, self).__new__(self, model_name, bases, attrs)
if do_app_label_workaround: del(meta.app_label)
return new_class
def validate_model_fields(self):
"check if all fields names are allowed (i.e. not in POLYMORPHIC_SPECIAL_Q_KWORDS)"
for f in self._meta.fields:
if f.name in POLYMORPHIC_SPECIAL_Q_KWORDS:
e = 'PolymorphicModel: "%s" - field name "%s" is not allowed in polymorphic models'
raise AssertionError(e % (self.__name__, f.name) )
@classmethod
def validate_model_manager(self, manager, model_name, manager_name):
"""check if the manager is derived from PolymorphicManager
and its querysets from PolymorphicQuerySet - throw AssertionError if not"""
if not issubclass(type(manager), PolymorphicManager):
e = 'PolymorphicModel: "' + model_name + '.' + manager_name + '" manager is of type "' + type(manager).__name__
e += '", but must be a subclass of PolymorphicManager'
raise AssertionError(e)
if not getattr(manager, 'queryset_class', None) or not issubclass(manager.queryset_class, PolymorphicQuerySet):
e = 'PolymorphicModel: "' + model_name + '.' + manager_name + '" (PolymorphicManager) has been instantiated with a queryset class which is'
e += ' not a subclass of PolymorphicQuerySet (which is required)'
raise AssertionError(e)
return manager
| maskedduck/twitranet | polymorphic/base.py | base.py | py | 7,582 | python | en | code | 1 | github-code | 90 |
39244288697 | import logging
from usautobuild.actions import ApiCaller, Builder, Dockerizer, Gitter, Licenser, Uploader, DiscordChangelogPoster, tag_as_stable
from usautobuild.cli import args
from usautobuild.config import Config
from usautobuild.logger import Logger
from usautobuild.utils import git_version
log = logging.getLogger("usautobuild")
WARNING_GIF = "https://tenor.com/view/14422456"
def main() -> None:
with Logger(args["log_level"]) as logger:
config = Config(args)
logger.configure(config)
_real_main(config)
def _real_main(config: Config) -> None:
if args["get_license"]:
Licenser(config)
return
if args["stable"]:
tag_as_stable()
return
log.info("Launched Build Bot version %s", git_version())
if not config.release:
log.warning("Running a debug build that will not be registered")
log.warning(f"If this is a mistake make sure to ping whoever started it to add --release flag {WARNING_GIF}")
gitter = Gitter(config)
builder = Builder(config)
uploader = Uploader(config)
dockerizer = Dockerizer(config)
gitter.start_gitting()
builder.start_building()
uploader.start_upload()
dockerizer.start_dockering()
if config.release:
api_caller = ApiCaller(config)
api_caller.post_new_version()
changelog_poster = DiscordChangelogPoster(config)
changelog_poster.start_posting()
if __name__ == "__main__":
main()
| unitystation/build-script | main.py | main.py | py | 1,484 | python | en | code | 0 | github-code | 90 |
27504795219 | """Test module for blueprint-from-raw-template module."""
import json
import unittest
from mock import MagicMock
from stacker.blueprints.raw import (
get_template_params, get_template_path, RawTemplateBlueprint
)
from stacker.variables import Variable
from ..factories import mock_context
RAW_JSON_TEMPLATE_PATH = 'stacker/tests/fixtures/cfn_template.json'
RAW_YAML_TEMPLATE_PATH = 'stacker/tests/fixtures/cfn_template.yaml'
RAW_J2_TEMPLATE_PATH = 'stacker/tests/fixtures/cfn_template.json.j2'
def test_get_template_path_local_file(tmpdir):
"""Verify get_template_path finding a file relative to CWD."""
template_path = tmpdir.join('cfn_template.json')
template_path.ensure()
with tmpdir.as_cwd():
result = get_template_path('cfn_template.json')
assert template_path.samefile(result)
def test_get_template_path_invalid_file(tmpdir):
"""Verify get_template_path with an invalid filename."""
with tmpdir.as_cwd():
assert get_template_path('cfn_template.json') is None
def test_get_template_path_file_in_syspath(tmpdir, monkeypatch):
"""Verify get_template_path with a file in sys.path.
This ensures templates are able to be retrieved from remote packages.
"""
template_path = tmpdir.join('cfn_template.json')
template_path.ensure()
monkeypatch.syspath_prepend(tmpdir)
result = get_template_path(template_path.basename)
assert template_path.samefile(result)
def test_get_template_params():
"""Verify get_template_params function operation."""
template_dict = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "TestTemplate",
"Parameters": {
"Param1": {
"Type": "String"
},
"Param2": {
"Default": "default",
"Type": "CommaDelimitedList"
}
},
"Resources": {}
}
template_params = {
"Param1": {
"Type": "String"
},
"Param2": {
"Default": "default",
"Type": "CommaDelimitedList"
}
}
assert get_template_params(template_dict) == template_params
class TestBlueprintRendering(unittest.TestCase):
"""Test class for blueprint rendering."""
def test_to_json(self):
"""Verify to_json method operation."""
expected_json = json.dumps(
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "TestTemplate",
"Parameters": {
"Param1": {
"Type": "String"
},
"Param2": {
"Default": "default",
"Type": "CommaDelimitedList"
}
},
"Resources": {
"Dummy": {
"Type": "AWS::SNS::Topic",
"Properties": {
"DisplayName": {"Ref": "Param1"}
}
}
},
"Outputs": {
"DummyId": {
"Value": "dummy-1234"
}
}
},
sort_keys=True,
indent=4
)
self.assertEqual(
RawTemplateBlueprint(
name="test",
context=mock_context(),
raw_template_path=RAW_JSON_TEMPLATE_PATH).to_json(),
expected_json
)
def test_j2_to_json(self):
"""Verify jinja2 template parsing."""
expected_json = json.dumps(
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "TestTemplate",
"Parameters": {
"Param1": {
"Type": "String"
},
"Param2": {
"Default": "default",
"Type": "CommaDelimitedList"
}
},
"Resources": {
"Dummy": {
"Type": "AWS::CloudFormation::WaitConditionHandle"
}
},
"Outputs": {
"DummyId": {
"Value": "dummy-bar-param1val-foo-1234"
}
}
},
sort_keys=True,
indent=4
)
blueprint = RawTemplateBlueprint(
name="stack1",
context=mock_context(
extra_config_args={'stacks': [{'name': 'stack1',
'template_path': 'unused',
'variables': {
'Param1': 'param1val',
'bar': 'foo'}}]},
environment={'foo': 'bar'}),
raw_template_path=RAW_J2_TEMPLATE_PATH
)
blueprint.resolve_variables([Variable("Param1", "param1val"),
Variable("bar", "foo")])
self.assertEqual(
expected_json,
blueprint.to_json()
)
class TestVariables(unittest.TestCase):
"""Test class for blueprint variable methods."""
def test_get_parameter_definitions_json(self): # noqa pylint: disable=invalid-name
"""Verify get_parameter_definitions method with json raw template."""
blueprint = RawTemplateBlueprint(
name="test",
context=MagicMock(),
raw_template_path=RAW_JSON_TEMPLATE_PATH)
parameters = blueprint.get_parameter_definitions()
self.assertEqual(
parameters,
{"Param1": {"Type": "String"},
"Param2": {"Default": "default",
"Type": "CommaDelimitedList"}})
def test_get_parameter_definitions_yaml(self): # noqa pylint: disable=invalid-name
"""Verify get_parameter_definitions method with yaml raw template."""
blueprint = RawTemplateBlueprint(
name="test",
context=MagicMock(),
raw_template_path=RAW_YAML_TEMPLATE_PATH
)
parameters = blueprint.get_parameter_definitions()
self.assertEqual(
parameters,
{"Param1": {"Type": "String"},
"Param2": {"Default": "default",
"Type": "CommaDelimitedList"}})
def test_get_required_parameter_definitions_json(self): # noqa pylint: disable=invalid-name
"""Verify get_required_param... method with json raw template."""
blueprint = RawTemplateBlueprint(
name="test",
context=MagicMock(),
raw_template_path=RAW_JSON_TEMPLATE_PATH
)
self.assertEqual(
blueprint.get_required_parameter_definitions(),
{"Param1": {"Type": "String"}})
def test_get_required_parameter_definitions_yaml(self): # noqa pylint: disable=invalid-name
"""Verify get_required_param... method with yaml raw template."""
blueprint = RawTemplateBlueprint(
name="test",
context=MagicMock(),
raw_template_path=RAW_YAML_TEMPLATE_PATH
)
self.assertEqual(
blueprint.get_required_parameter_definitions(),
{"Param1": {"Type": "String"}})
| cloudtools/stacker | stacker/tests/blueprints/test_raw.py | test_raw.py | py | 7,457 | python | en | code | 706 | github-code | 90 |
18355647919 | import sys
readline = sys.stdin.readline
MOD = 10 ** 9 + 7
INF = float('INF')
sys.setrecursionlimit(10 ** 5)
def main():
K, X = map(int, readline().split())
res = [x for x in range(X - K + 1, X + K)]
print(*res)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p02946/s079455045.py | s079455045.py | py | 267 | python | en | code | 0 | github-code | 90 |
74696681255 |
import numpy as np
from scipy.special import digamma, polygamma
N = np.array([ 9, 9, 10, 12, 6])
M = 5
k = 4
V = 32
alpha = np.random.dirichlet(10*np.ones(k),1)[0]
beta = np.random.dirichlet(np.ones(V),k)
gamma = np.tile(alpha,(M,1)) + np.tile(N/k,(k,1)).T
def test_converge2():
tol = 10**(-2)
loss1 = np.sqrt(list(map(np.sum,np.square(beta - beta))))
loss2 = np.sqrt(list(map(np.sum,np.square(gamma - gamma))))
assert np.max(loss1) <= tol and np.max(loss2) <= tol | yangbaovera/latent-dirichlet-allocation | unit_test/test_converge2.py | test_converge2.py | py | 490 | python | en | code | 0 | github-code | 90 |
9198027117 | class Solution:
def searchLeft(self, nums, target, left, right):
while left <= right:
mid = (left + right)//2
if nums[mid] < target:
left = mid + 1
else:
right = mid - 1
return left
def searchInsert(self, nums, target):
n = len(nums)
idx = self.searchLeft(nums, target, 0, n-1)
return idx
| TPIOS/LeetCode-cn-solutions | First Hundred/0035.py | 0035.py | py | 416 | python | en | code | 0 | github-code | 90 |
27924120491 | import math
import torch
from torch.nn.functional import _Reduction
from .MSECriterion import MSECriterion
"""
This file implements a criterion for multi-class classification.
It learns an embedding per class, where each class' embedding
is a point on an (N-1)-dimensional simplex, where N is
the number of classes.
For example usage of this class, look at.c/criterion.md
Reference: http.//arxiv.org/abs/1506.08230
"""
class ClassSimplexCriterion(MSECriterion):
def __init__(self, nClasses):
super(ClassSimplexCriterion, self).__init__()
self.nClasses = nClasses
# embedding the simplex in a space of dimension strictly greater than
# the minimum possible (nClasses-1) is critical for effective training.
simp = self._regsplex(nClasses - 1)
self.simplex = torch.cat((simp, torch.zeros(simp.size(0), nClasses - simp.size(1))), 1)
self._target = torch.Tensor(nClasses)
self.output_tensor = None
def _regsplex(self, n):
"""
regsplex returns the coordinates of the vertices of a
regular simplex centered at the origin.
The Euclidean norms of the vectors specifying the vertices are
all equal to 1. The input n is the dimension of the vectors;
the simplex has n+1 vertices.
input:
n # dimension of the vectors specifying the vertices of the simplex
output:
a # tensor dimensioned (n+1, n) whose rows are
vectors specifying the vertices
reference:
http.//en.wikipedia.org/wiki/Simplex#Cartesian_coordinates_for_regular_n-dimensional_simplex_in_Rn
"""
a = torch.zeros(n + 1, n)
for k in range(n):
# determine the last nonzero entry in the vector for the k-th vertex
if k == 0:
a[k][k] = 1
else:
a[k][k] = math.sqrt(1 - a[k:k + 1, 0:k + 1].norm() ** 2)
# fill_ the k-th coordinates for the vectors of the remaining vertices
c = (a[k][k] ** 2 - 1 - 1 / n) / a[k][k]
a[k + 1:n + 2, k:k + 1].fill_(c)
return a
# handle target being both 1D tensor, and
# target being 2D tensor (2D tensor means.nt: anything)
def _transformTarget(self, target):
assert target.dim() == 1
nSamples = target.size(0)
self._target.resize_(nSamples, self.nClasses)
for i in range(nSamples):
self._target[i].copy_(self.simplex[int(target[i])])
def updateOutput(self, input, target):
self._transformTarget(target)
assert input.nelement() == self._target.nelement()
if self.output_tensor is None:
self.output_tensor = input.new(1)
self._backend.MSECriterion_updateOutput(
self._backend.library_state,
input,
self._target,
self.output_tensor,
_Reduction.legacy_get_enum(self.sizeAverage, True, emit_warning=False),
)
self.output = self.output_tensor[0].item()
return self.output
def updateGradInput(self, input, target):
assert input.nelement() == self._target.nelement()
implicit_gradOutput = torch.Tensor([1]).type(input.type())
self._backend.MSECriterion_updateGradInput(
self._backend.library_state,
input,
self._target,
implicit_gradOutput,
self.gradInput,
_Reduction.legacy_get_enum(self.sizeAverage, True, emit_warning=False),
)
return self.gradInput
def getPredictions(self, input):
return torch.mm(input, self.simplex.t())
def getTopPrediction(self, input):
prod = self.getPredictions(input)
_, maxs = prod.max(prod.ndimension() - 1)
return maxs.view(-1)
| sibozhang/Text2Video | venv_vid2vid/lib/python3.7/site-packages/torch/legacy/nn/ClassSimplexCriterion.py | ClassSimplexCriterion.py | py | 3,860 | python | en | code | 381 | github-code | 90 |
27661791530 | import math
import imageio
import datetime
import numpy as np
from scipy.fftpack import dct, idct
image = imageio.imread('sample.png')
size = 256
_in = np.array(image)
_out = np.array([[0.0]*size]*size, dtype='f')
_out2 = np.array([[0.0]*size]*size, dtype='f')
def dct_1d(array, a_size):
array1 = np.array(array)
array3 = np.ones(a_size)
array3[0] = 1/math.sqrt(2)
array3 = array3 * (2/a_size)**0.5
output = np.zeros(a_size)
for i in range(a_size):
array2 = np.zeros(a_size)
for j in range(a_size):
array2[j] = math.cos( (i * math.pi / a_size) * (j + 1/2))
output[i] = np.dot(array1, array2)
output = np.multiply(output, array3)
return output
#assume array is a_size by a_size
def dct2d_partial(array, a_size):
output = np.zeros([a_size,a_size])
array3 = np.ones(a_size)
array3[0] = 1/math.sqrt(2)
array3 = array3 * (2/a_size)**0.5
arrays2 = np.zeros([a_size,a_size])
for j in range(a_size):
for k in range(a_size):
arrays2[j][k] = math.cos( (j * math.pi / a_size) * (k + 1/2))
for i in range(a_size):
if i % 16 == 15:
print(i)
output1 = np.zeros(a_size)
for j in range(a_size):
output1[j] = np.dot(array[i], arrays2[j])
output[i] = np.multiply(output1, array3)
## for i in range(a_size):
## output[i] = dct_1d(array[i], a_size)
return output
def dct2d(array, a_size):
inter = dct2d_partial(array, a_size)
## print("Now transpose")
inter2 = dct2d_partial(np.transpose(inter), a_size)
return inter2
def idct_1d(array, a_size):
array1 = np.array(array)
array3 = np.ones(a_size)
array3[0] = 1/math.sqrt(2)
array3 = array3 * math.sqrt(2/a_size)
output = np.zeros(a_size)
for i in range(a_size):
array2 = np.zeros(a_size)
for j in range(a_size):
array2[j] = math.cos( (j * math.pi / a_size) * (i + 1/2))
output[i] = np.dot(np.multiply(array3, array1), array2)
return output
def idct2d_partial(array, a_size):
output = np.zeros([a_size,a_size])
array3 = np.ones(a_size)
array3[0] = 1/math.sqrt(2)
array3 = array3 * math.sqrt(2/a_size)
arrays2 = np.zeros([a_size,a_size])
for i in range(a_size):
for j in range(a_size):
arrays2[i][j] = math.cos( (j * math.pi / a_size) * (i + 1/2))
for i in range(a_size):
if i % 16 == 15:
print(i)
output1 = np.zeros(a_size)
for j in range(a_size):
output1[j] = np.dot(array[i], arrays2[j])
output[i][j] = np.dot(np.multiply(array3, array[i]), arrays2[j])
## array_r = array[i]
## output_r = idct_1d(array_r, a_size)
## for j in range(a_size):
## output[i][j] = output_r[j]
return output
def idct2d(array, a_size):
inter = idct2d_partial(array, a_size)
## print("Now transpose")
inter2 = idct2d_partial(np.transpose(inter), a_size)
return inter2
##_out3 = np.load("dct_r.npy")
##
##print(1, datetime.datetime.now())
##first = dct2d(_in, size)
##second = idct2d(first, size)
##print(2, datetime.datetime.now())
##imageio.imwrite('sample_out.jpg', second[:, :])
| Larvichee/StolenProjects | My Own Projects/Discrete Cosine Transform/DCTniDCT2.py | DCTniDCT2.py | py | 3,393 | python | en | code | 0 | github-code | 90 |
18159113745 | import torch
from torch import nn
__all__ = ["scatter_add"]
def scatter_add(
x: torch.Tensor, idx_i: torch.Tensor, dim_size: int, dim: int = 0
) -> torch.Tensor: # 用于对具有相同索引的值进行求和
"""
Sum over values with the same indices.
Args:
x: input values
idx_i: index of center atom i
dim_size: size of the dimension after reduction
dim: the dimension to reduce
Returns:
reduced input
"""
return _scatter_add(x, idx_i, dim_size, dim)
@torch.jit.script
# 将具有相同索引的值进行求和
def _scatter_add(
x: torch.Tensor, idx_i: torch.Tensor, dim_size: int, dim: int = 0
) -> torch.Tensor:
shape = list(x.shape)
shape[dim] = dim_size
# 创建形状与输入张量x相同的临时张量
tmp = torch.zeros(shape, dtype=x.dtype, device=x.device)
y = tmp.index_add(dim, idx_i, x) # 根据索引idx_i在指定的维度dim上求和
return y
| 1Bigsunflower/schnetpack | src/schnetpack/nn/scatter.py | scatter.py | py | 968 | python | en | code | null | github-code | 90 |
17952225929 | a,b,c,d,e,f = map(int,input().split())
water = []
sugar = []
for i in range(0,1000):
for j in range(0,1000):
if (100*i*a + 100*j*b ) <= f:
water.append(100*i*a + 100*j*b)
if c*i + d*j <= f:
sugar.append(c*i+d*j)
water = list(set(water))
water.sort()
water.remove(0)
sugar = list(set(sugar))
sugar.sort()
concentration = 0
sgw = 0
sg = 0
x = e/100
for i in water:
for j in sugar:
temp = (100 * j) / (i + j)
if concentration <= temp and i + j <= f and (i /100)*e >= j:
#print(i,j,concentration,temp,i*x)
sgw = i + j
sg = j
concentration = temp
if sgw == 0:
sgw = water[-1]
print(sgw,sg)
| Aasthaengg/IBMdataset | Python_codes/p03599/s321336494.py | s321336494.py | py | 717 | python | en | code | 0 | github-code | 90 |
71174325417 | from django.urls import path
from django.conf.urls import include
from . import views
# Create your views here.
app_name = "dominus.team"
urlpatterns = [
## --- Team --- ## Do we move them back? :)
path("team/register", views.RegisterTeamView.as_view(), name="registerTeamView"),
path("team/<int:team_id>/update", views.TeamUpdateView.as_view(), name="teamUpdateView"),
path("team/<int:team_id>/delete", views.TeamDeleteView.as_view(), name="teamDeleteView"),
path('team/<int:team_id>/detail_view', views.TeamDetailView.as_view(), name='teamDetailView'),
path("team/<int:team_id>/home", views.team_home, name="team_home"),
## --- Team --- ##
] | afk-studio/gladiatorus | dominus/team/urls.py | urls.py | py | 686 | python | en | code | 0 | github-code | 90 |
1974588945 | """
this code is under Apache-2.0 license from PyThaiNLP library.
https://github.com/PyThaiNLP
"""
import re
from typing import List, Optional
from pythaiaddr.util.trie import Trie
from pythaiaddr.tokenize.newmm import segment
def word_tokenize(
text: str,
keep_whitespace: bool = True,
) -> List[str]:
if not text or not isinstance(text, str):
return []
segments = segment(text)
if not keep_whitespace:
segments = [token.strip(" ") for token in segments if token.strip(" ")]
return segments
| thirawat69/PyThaiAddr | pythaiaddr/tokenize/tokenizer.py | tokenizer.py | py | 538 | python | en | code | 2 | github-code | 90 |
72457285736 | # This function uses tmp var
def shellsort_shift(arr):
gap = len(arr) // 2
while gap > 0:
for right in range(gap, len(arr)):
key = arr[right]
left = right
while left >= gap and key < arr[left - gap]:
arr[left] = arr[left - gap]
left -= key
arr[left] = key
gap //= 2
return arr
# This function does not use tmp var
def shellsort_swap(arr):
gap = len(arr) // 2
while gap > 0:
for right in range(gap, len(arr)):
left = right
while left >= gap and arr[left] < arr[left - gap]:
arr[left - gap], arr[left] = arr[left], arr[left - gap]
left -= gap
gap //= 2
return arr
if __name__ == "__main__":
from random import shuffle
l = range(15)
lcopy = l[:]
shuffle(l)
print('Unsorted')
print(l)
assert l != lcopy
print('Sorted')
print(shellsort_shift(l))
assert l == lcopy
| 7riatsu/procon-book | sort/shellSort.py | shellSort.py | py | 995 | python | en | code | 0 | github-code | 90 |
25375025381 | # Non-abundant sums
# Problem 23
# https://projecteuler.net/problem=23
# A perfect number is a number for which the sum of
# its proper divisors is exactly equal to the number.
# For example,
# the sum of the proper divisors of 28 would be 1 + 2 + 4 + 7 + 14 = 28,
# which means that 28 is a perfect number.
#
# A number n is called deficient if the sum of its proper divisors is
# less than n and it is called abundant if this sum exceeds n.
#
# As 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16,
# the smallest number that can be written as the sum of
# two abundant numbers is 24. By mathematical analysis,
# it can be shown that all integers greater than 28123
# can be written as the sum of two abundant numbers.
# However, this upper limit cannot be reduced
# any further by analysis even though it is known
# that the greatest number that cannot be expressed
# as the sum of two abundant numbers is less than this limit.
#
# Find the sum of all the positive integers which
# cannot be written as the sum of two abundant numbers.
# Answer: 4179871.
def sum_of_proper_divisors(number):
summa: int = 0
for i in range(1, number):
if number % i == 0:
summa += i
return summa
n: int = 28123
sum_of_numbers_up_n: int = 0
for k in range(1, n):
sum_of_numbers_up_n += k
set_count: list = []
for j in range(1, n):
if j < sum_of_proper_divisors(j):
set_count.append(j)
sum_0: set = set()
for m in range(len(set_count)):
for x in range(m, len(set_count)):
if (set_count[m] + set_count[x]) < n:
sum_0.add(set_count[m] + set_count[x])
sum_of_abundant = sum(sum_0)
print('Answer:', sum_of_numbers_up_n - sum_of_abundant)
| UrlBin/ProjectEuler | Problem_23.py | Problem_23.py | py | 1,708 | python | en | code | 0 | github-code | 90 |
18476326699 | #!/usr/bin/env python3
import itertools
n = int(input())
must = set(["3", "5", "7"])
count = 0
for i in itertools.product("0357", repeat=10):
num = int("".join(i))
set_ = set(list(str(num)))
if 0 in set_:
continue
if set_ != must:
continue
if int("".join(i)) <= n:
# print(int("".join(i)))
count += 1
else:
break
print(count)
| Aasthaengg/IBMdataset | Python_codes/p03212/s616232066.py | s616232066.py | py | 394 | python | en | code | 0 | github-code | 90 |
9891839838 | # To enable all SSL methods use: SSLv23
# then add options to disable certain methods
# https://bugs.launchpad.net/pyopenssl/+bug/1020632/comments/3
import binascii
import io
import os
import struct
import threading
import typing
import certifi
from kaitaistruct import KaitaiStream
from OpenSSL import SSL
import seleniumwire.thirdparty.mitmproxy.options
from seleniumwire.thirdparty.mitmproxy import certs, exceptions
from seleniumwire.thirdparty.mitmproxy.contrib.kaitaistruct import tls_client_hello
from seleniumwire.thirdparty.mitmproxy.net import check
BASIC_OPTIONS = (
SSL.OP_CIPHER_SERVER_PREFERENCE
)
if hasattr(SSL, "OP_NO_COMPRESSION"):
BASIC_OPTIONS |= SSL.OP_NO_COMPRESSION
DEFAULT_METHOD = SSL.SSLv23_METHOD
DEFAULT_OPTIONS = (
SSL.OP_NO_SSLv2 |
SSL.OP_NO_SSLv3 |
BASIC_OPTIONS
)
"""
Map a reasonable SSL version specification into the format OpenSSL expects.
Don't ask...
https://bugs.launchpad.net/pyopenssl/+bug/1020632/comments/3
"""
VERSION_CHOICES = {
"all": (SSL.SSLv23_METHOD, BASIC_OPTIONS),
# SSLv23_METHOD + NO_SSLv2 + NO_SSLv3 == TLS 1.0+
# TLSv1_METHOD would be TLS 1.0 only
"secure": (DEFAULT_METHOD, DEFAULT_OPTIONS),
"TLSv1": (SSL.TLSv1_METHOD, BASIC_OPTIONS),
"TLSv1_1": (SSL.TLSv1_1_METHOD, BASIC_OPTIONS),
"TLSv1_2": (SSL.TLSv1_2_METHOD, BASIC_OPTIONS),
}
METHOD_NAMES = {
SSL.SSLv23_METHOD: "SSLv23",
SSL.TLSv1_METHOD: "TLSv1",
SSL.TLSv1_1_METHOD: "TLSv1.1",
SSL.TLSv1_2_METHOD: "TLSv1.2",
}
def client_arguments_from_options(options: "seleniumwire.thirdparty.mitmproxy.options.Options") -> dict:
if options.ssl_insecure:
verify = SSL.VERIFY_NONE
else:
verify = SSL.VERIFY_PEER
method, tls_options = VERSION_CHOICES[options.ssl_version_server]
return {
"verify": verify,
"method": method,
"options": tls_options,
"ca_path": options.ssl_verify_upstream_trusted_confdir,
"ca_pemfile": options.ssl_verify_upstream_trusted_ca,
"client_certs": options.client_certs,
"cipher_list": options.ciphers_server,
}
class MasterSecretLogger:
def __init__(self, filename):
self.filename = filename
self.f = None
self.lock = threading.Lock()
# required for functools.wraps, which pyOpenSSL uses.
__name__ = "MasterSecretLogger"
def __call__(self, connection, where, ret):
done_now = (
where == SSL.SSL_CB_HANDSHAKE_DONE and ret == 1
)
# this is a horrendous workaround for https://github.com/mitmproxy/mitmproxy/pull/3692#issuecomment-608454530:
# OpenSSL 1.1.1f decided to not make connection.master_key() fail in the SSL_CB_HANDSHAKE_DONE callback.
# To support various OpenSSL versions and still log master secrets, we now mark connections where this has
# happened and then try again on the next event. This is ugly and shouldn't be done, but eventually we
# replace this with context.set_keylog_callback anyways.
done_previously_but_not_logged_yet = (
hasattr(connection, "_still_needs_masterkey")
)
if done_now or done_previously_but_not_logged_yet:
with self.lock:
if not self.f:
d = os.path.dirname(self.filename)
if not os.path.isdir(d):
os.makedirs(d)
self.f = open(self.filename, "ab")
self.f.write(b"\r\n")
try:
client_random = binascii.hexlify(connection.client_random())
masterkey = binascii.hexlify(connection.master_key())
except (AssertionError, SSL.Error): # careful: exception type changes between pyOpenSSL versions
connection._still_needs_masterkey = True
else:
self.f.write(b"CLIENT_RANDOM %s %s\r\n" % (client_random, masterkey))
self.f.flush()
if hasattr(connection, "_still_needs_masterkey"):
delattr(connection, "_still_needs_masterkey")
def close(self):
with self.lock:
if self.f:
self.f.close()
@staticmethod
def create_logfun(filename):
if filename:
return MasterSecretLogger(filename)
return None
log_master_secret = MasterSecretLogger.create_logfun(
os.getenv("MITMPROXY_SSLKEYLOGFILE") or os.getenv("SSLKEYLOGFILE")
)
def _create_ssl_context(
method: int = DEFAULT_METHOD,
options: int = DEFAULT_OPTIONS,
ca_path: str = None,
ca_pemfile: str = None,
cipher_list: str = None,
alpn_protos: typing.Iterable[bytes] = None,
alpn_select=None,
alpn_select_callback: typing.Callable[[typing.Any, typing.Any], bytes] = None,
verify: int = SSL.VERIFY_PEER,
verify_callback: typing.Optional[
typing.Callable[[SSL.Connection, SSL.X509, int, int, bool], bool]
] = None,
) -> SSL.Context:
"""
Creates an SSL Context.
:param method: One of SSLv23_METHOD, TLSv1_METHOD, TLSv1_1_METHOD, or TLSv1_2_METHOD
:param options: A bit field consisting of OpenSSL.SSL.OP_* values
:param verify: A bit field consisting of OpenSSL.SSL.VERIFY_* values
:param ca_path: Path to a directory of trusted CA certificates prepared using the c_rehash tool
:param ca_pemfile: Path to a PEM formatted trusted CA certificate
:param cipher_list: A textual OpenSSL cipher list, see https://www.openssl.org/docs/apps/ciphers.html
:rtype : SSL.Context
"""
try:
context = SSL.Context(method)
except ValueError:
method_name = METHOD_NAMES.get(method, "unknown")
raise exceptions.TlsException(
"SSL method \"%s\" is most likely not supported "
"or disabled (for security reasons) in your libssl. "
"Please refer to https://github.com/mitmproxy/mitmproxy/issues/1101 "
"for more details." % method_name
)
# Options (NO_SSLv2/3)
if options is not None:
context.set_options(options)
# Verify Options (NONE/PEER and trusted CAs)
if verify is not None:
context.set_verify(verify, verify_callback)
if ca_path is None and ca_pemfile is None:
ca_pemfile = certifi.where()
try:
context.load_verify_locations(ca_pemfile, ca_path)
except SSL.Error:
raise exceptions.TlsException(
"Cannot load trusted certificates ({}, {}).".format(
ca_pemfile, ca_path
)
)
# Workaround for
# https://github.com/pyca/pyopenssl/issues/190
# https://github.com/mitmproxy/mitmproxy/issues/472
# Options already set before are not cleared.
context.set_mode(SSL._lib.SSL_MODE_AUTO_RETRY)
# Cipher List
if cipher_list:
try:
context.set_cipher_list(cipher_list.encode())
except SSL.Error as v:
raise exceptions.TlsException("SSL cipher specification error: %s" % str(v))
# SSLKEYLOGFILE
if log_master_secret:
context.set_info_callback(log_master_secret)
if alpn_protos is not None:
# advertise application layer protocols
context.set_alpn_protos(alpn_protos)
elif alpn_select is not None and alpn_select_callback is None:
# select application layer protocol
def alpn_select_callback(conn_, options):
if alpn_select in options:
return bytes(alpn_select)
else: # pragma: no cover
return options[0]
context.set_alpn_select_callback(alpn_select_callback)
elif alpn_select_callback is not None and alpn_select is None:
if not callable(alpn_select_callback):
raise exceptions.TlsException("ALPN error: alpn_select_callback must be a function.")
context.set_alpn_select_callback(alpn_select_callback)
elif alpn_select_callback is not None and alpn_select is not None:
raise exceptions.TlsException(
"ALPN error: only define alpn_select (string) OR alpn_select_callback (function).")
return context
def create_client_context(
cert: str = None,
sni: str = None,
address: str = None,
verify: int = SSL.VERIFY_NONE,
**sslctx_kwargs
) -> SSL.Context:
"""
Args:
cert: Path to a file containing both client cert and private key.
sni: Server Name Indication. Required for VERIFY_PEER
address: server address, used for expressive error messages only
verify: A bit field consisting of OpenSSL.SSL.VERIFY_* values
"""
if sni is None and verify != SSL.VERIFY_NONE:
raise exceptions.TlsException("Cannot validate certificate hostname without SNI")
def verify_callback(
conn: SSL.Connection,
x509: SSL.X509,
errno: int,
depth: int,
is_cert_verified: bool
) -> bool:
if is_cert_verified and depth == 0 and not sni:
conn.cert_error = exceptions.InvalidCertificateException(
f"Certificate verification error for {address}: Cannot validate hostname, SNI missing."
)
is_cert_verified = False
elif is_cert_verified:
pass
else:
conn.cert_error = exceptions.InvalidCertificateException(
"Certificate verification error for {}: {} (errno: {}, depth: {})".format(
sni,
SSL._ffi.string(SSL._lib.X509_verify_cert_error_string(errno)).decode(),
errno,
depth
)
)
# SSL_VERIFY_NONE: The handshake will be continued regardless of the verification result.
return is_cert_verified
context = _create_ssl_context(
verify=verify,
verify_callback=verify_callback,
**sslctx_kwargs,
)
if sni:
# Manually enable hostname verification on the context object.
# https://wiki.openssl.org/index.php/Hostname_validation
param = SSL._lib.SSL_CTX_get0_param(context._context)
# Matching on the CN is disabled in both Chrome and Firefox, so we disable it, too.
# https://www.chromestatus.com/feature/4981025180483584
SSL._lib.X509_VERIFY_PARAM_set_hostflags(
param,
SSL._lib.X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS | SSL._lib.X509_CHECK_FLAG_NEVER_CHECK_SUBJECT
)
SSL._openssl_assert(
SSL._lib.X509_VERIFY_PARAM_set1_host(param, sni.encode("idna"), 0) == 1
)
# Client Certs
if cert:
try:
context.use_privatekey_file(cert)
context.use_certificate_chain_file(cert)
except SSL.Error as v:
raise exceptions.TlsException("SSL client certificate error: %s" % str(v))
return context
def accept_all(
conn_: SSL.Connection,
x509: SSL.X509,
errno: int,
err_depth: int,
is_cert_verified: bool,
) -> bool:
# Return true to prevent cert verification error
return True
def create_server_context(
cert: typing.Union[certs.Cert, str],
key: SSL.PKey,
handle_sni: typing.Optional[typing.Callable[[SSL.Connection], None]] = None,
request_client_cert: bool = False,
chain_file=None,
dhparams=None,
extra_chain_certs: typing.Iterable[certs.Cert] = None,
**sslctx_kwargs
) -> SSL.Context:
"""
cert: A certs.Cert object or the path to a certificate
chain file.
handle_sni: SNI handler, should take a connection object. Server
name can be retrieved like this:
connection.get_servername()
The request_client_cert argument requires some explanation. We're
supposed to be able to do this with no negative effects - if the
client has no cert to present, we're notified and proceed as usual.
Unfortunately, Android seems to have a bug (tested on 4.2.2) - when
an Android client is asked to present a certificate it does not
have, it hangs up, which is frankly bogus. Some time down the track
we may be able to make the proper behaviour the default again, but
until then we're conservative.
"""
if request_client_cert:
verify = SSL.VERIFY_PEER
else:
verify = SSL.VERIFY_NONE
context = _create_ssl_context(
ca_pemfile=chain_file,
verify=verify,
verify_callback=accept_all,
**sslctx_kwargs,
)
context.use_privatekey(key)
if isinstance(cert, certs.Cert):
context.use_certificate(cert.x509)
else:
context.use_certificate_chain_file(cert)
if extra_chain_certs:
for i in extra_chain_certs:
context.add_extra_chain_cert(i.x509)
if handle_sni:
# SNI callback happens during do_handshake()
context.set_tlsext_servername_callback(handle_sni)
if dhparams:
SSL._lib.SSL_CTX_set_tmp_dh(context._context, dhparams)
return context
def is_tls_record_magic(d):
"""
Returns:
True, if the passed bytes start with the TLS record magic bytes.
False, otherwise.
"""
d = d[:3]
# TLS ClientHello magic, works for SSLv3, TLSv1.0, TLSv1.1, TLSv1.2
# http://www.moserware.com/2009/06/first-few-milliseconds-of-https.html#client-hello
return (
len(d) == 3 and
d[0] == 0x16 and
d[1] == 0x03 and
0x0 <= d[2] <= 0x03
)
def get_client_hello(rfile):
"""
Peek into the socket and read all records that contain the initial client hello message.
client_conn:
The :py:class:`client connection <seleniumwire.thirdparty.mitmproxy.connections.ClientConnection>`.
Returns:
The raw handshake packet bytes, without TLS record header(s).
"""
client_hello = b""
client_hello_size = 1
offset = 0
while len(client_hello) < client_hello_size:
record_header = rfile.peek(offset + 5)[offset:]
if not is_tls_record_magic(record_header) or len(record_header) < 5:
raise exceptions.TlsProtocolException(
'Expected TLS record, got "%s" instead.' % record_header)
record_size = struct.unpack_from("!H", record_header, 3)[0] + 5
record_body = rfile.peek(offset + record_size)[offset + 5:]
if len(record_body) != record_size - 5:
raise exceptions.TlsProtocolException(
"Unexpected EOF in TLS handshake: %s" % record_body)
client_hello += record_body
offset += record_size
client_hello_size = struct.unpack("!I", b'\x00' + client_hello[1:4])[0] + 4
return client_hello
class ClientHello:
def __init__(self, raw_client_hello):
self._client_hello = tls_client_hello.TlsClientHello(
KaitaiStream(io.BytesIO(raw_client_hello))
)
@property
def cipher_suites(self):
return self._client_hello.cipher_suites.cipher_suites
@property
def sni(self) -> typing.Optional[bytes]:
if self._client_hello.extensions:
for extension in self._client_hello.extensions.extensions:
is_valid_sni_extension = (
extension.type == 0x00 and
len(extension.body.server_names) == 1 and
extension.body.server_names[0].name_type == 0 and
check.is_valid_host(extension.body.server_names[0].host_name)
)
if is_valid_sni_extension:
return extension.body.server_names[0].host_name
return None
@property
def alpn_protocols(self):
if self._client_hello.extensions:
for extension in self._client_hello.extensions.extensions:
if extension.type == 0x10:
return list(x.name for x in extension.body.alpn_protocols)
return []
@property
def extensions(self) -> typing.List[typing.Tuple[int, bytes]]:
ret = []
if self._client_hello.extensions:
for extension in self._client_hello.extensions.extensions:
body = getattr(extension, "_raw_body", extension.body)
ret.append((extension.type, body))
return ret
@classmethod
def from_file(cls, client_conn) -> "ClientHello":
"""
Peek into the connection, read the initial client hello and parse it to obtain ALPN values.
client_conn:
The :py:class:`client connection <seleniumwire.thirdparty.mitmproxy.connections.ClientConnection>`.
Returns:
:py:class:`client hello <seleniumwire.thirdparty.mitmproxy.net.tls.ClientHello>`.
"""
try:
raw_client_hello = get_client_hello(client_conn)[4:] # exclude handshake header.
except exceptions.ProtocolException as e:
raise exceptions.TlsProtocolException('Cannot read raw Client Hello: %s' % repr(e))
try:
return cls(raw_client_hello)
except EOFError as e:
raise exceptions.TlsProtocolException(
f"Cannot parse Client Hello: {e!r}, Raw Client Hello: {binascii.hexlify(raw_client_hello)!r}"
)
def __repr__(self):
return f"ClientHello(sni: {self.sni}, alpn_protocols: {self.alpn_protocols})"
| wkeeling/selenium-wire | seleniumwire/thirdparty/mitmproxy/net/tls.py | tls.py | py | 17,480 | python | en | code | 1,689 | github-code | 90 |
18258474359 | a, b = map(int, input().split())
i = 1
f = 0
while True:
if int(i*0.08)==a and int(i*0.1)==b:
print(i)
f = 1
break
if int(i*0.08)>a:
break
i += 1
if not f:
print(-1) | Aasthaengg/IBMdataset | Python_codes/p02755/s699364373.py | s699364373.py | py | 213 | python | en | code | 0 | github-code | 90 |
30337954177 | #!/usr/bin/python3
""" Main file """
import oca
import time
def main():
""" Main function """
oca_hw = oca.OpenCanalyzerHw('/dev/tty.usbmodem401')
oca_hw.start()
oca_hw.sync()
oca_hw.send_message([0x33, 0x34])
oca_hw.send_message([0x35, 0x36])
oca_hw.send_message(b"Hello World!")
time.sleep(2)
for i in range(oca_hw.get_number_of_messages()):
print(oca_hw.get_message())
oca_hw.stop()
if __name__ == "__main__":
# execute only if run as a script
main()
| Open-Canalyzer/Tools | python_communication_test/main.py | main.py | py | 516 | python | en | code | 0 | github-code | 90 |
31544161085 | from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PyQt4 import QtTest
import os
class CardLabel(QLabel):
def __init__(self, window, name=None):
super().__init__(window)
self.setAlignment(Qt.AlignCenter)
self.setStyleSheet("color:red;")
if not name:
self.setImage(os.path.join(os.getcwd(), "view\img_matgo\cards\\tail.png"))
else:
self.setImage(os.path.join(os.getcwd(), "view\img_matgo\cards\\"+name))
def setImage(self, image):
if(os.path.exists(image)):
card_image = QPixmap(image)
self.setPixmap(card_image)
self.resize(card_image.size().width(), card_image.size().height())
else:
self.resize(37, 60)
self.setText("Image\nnot\nfound")
class Status:
class StatusLabel(QLabel):
def __init__(self, window, isEnemy):
super().__init__(window)
self.setStyleSheet("background-color: white;color: black; padding-left: 2px;")
if isEnemy:
self.move(399, 139)
else:
self.move(399, 232)
self.resize(72, 88)
self.setFont(QFont("Times", 9))
self.show()
class CountLabel(QLabel):
def __init__(self, window):
super().__init__(window)
self.setStyleSheet("background-color: white;color: black;")
self.setFont(QFont("Times", 9))
def setText(self, num):
if num == 0:
self.hide()
return
else:
self.show()
self.raise_()
super().setText(str(num))
if num < 10:
self.resize(7, 11)
else:
self.resize(13, 11)
class GwangLabel(CountLabel):
def __init__(self, window, isEnemy):
self.isEnemy = isEnemy
super().__init__(window)
def setText(self, num):
if type(num) is str:
num = int(num)
super().setText(num)
if self.isEnemy:
self.move(33+7*num, 54)
else:
self.move(33+7*num, 444)
class AnimalLabel(CountLabel):
def __init__(self, window, isEnemy):
self.isEnemy = isEnemy
super().__init__(window)
def setText(self, num):
if type(num) is str:
num = int(num)
super().setText(num)
if self.isEnemy:
self.move(146+7*num, 54)
else:
self.move(150+7*num, 444)
class DanLabel(CountLabel):
def __init__(self, window, isEnemy):
self.isEnemy = isEnemy
super().__init__(window)
def setText(self, num):
if type(num) is str:
num = int(num)
super().setText(num)
if self.isEnemy:
self.move(268+7*num, 54)
else:
self.move(272+7*num, 444)
class PeeLabel(CountLabel):
def __init__(self, window, isEnemy):
self.isEnemy = isEnemy
super().__init__(window)
def setText(self, num, sum):
if type(num) is str:
num = int(num)
super().setText(sum)
if self.isEnemy:
self.move(33+7*num, 122)
else:
self.move(33+7*num, 375)
def __init__(self, window, isEnemy): # FIX
self.status = self.StatusLabel(window, isEnemy)
self.gwanglabel = self.GwangLabel(window, isEnemy)
self.animallabel = self.AnimalLabel(window, isEnemy)
self.danlabel = self.DanLabel(window, isEnemy)
self.peelabel = self.PeeLabel(window, isEnemy)
def setParent(self, window):
self.status.setParent(window)
self.gwanglabel.setParent(window)
self.animallabel.setParent(window)
self.danlabel.setParent(window)
self.peelabel.setParent(window)
class Dialog(QLabel):
def __init__(self, window, width, height):
super().__init__(window)
self.resize(width, height)
self.move((window.width()-width)//2, (window.height()-height)//2)
self.setStyleSheet("background-color: #f0feb8;padding-left:5px; padding-right:5px")
self.show()
class ShakeDialog(Dialog): # 180 120
def __init__(self, window, cards, width, height):
super().__init__(window, width, height)
qv = QVBoxLayout()
titleLabel = QLabel("흔들었습니다", self)
titleLabel.setFont(QFont("Times", 14, QFont.Bold))
qv.addWidget(titleLabel, 0, Qt.AlignCenter)
qh = QHBoxLayout()
for card in cards:
label = CardLabel(self, card.imageName)
qh.addWidget(label)
qv.addLayout(qh)
self.setLayout(qv)
class ChongtongDialog(Dialog):
def __init__(self, window, cards, width, height):
super().__init__(window, width, height)
qv = QVBoxLayout()
titleLabel = QLabel("총통!", self)
titleLabel.setFont(QFont("Times", 14, QFont.Bold))
qv.addWidget(titleLabel, 0, Qt.AlignCenter)
if len(cards) == 4:
qh = QHBoxLayout()
for card in cards:
label = CardLabel(self, card.imageName)
qh.addWidget(label)
qv.addLayout(qh)
else:
qh = QHBoxLayout()
for card in cards[0:4]:
label = CardLabel(self, card.imageName)
qh.addWidget(label)
qv.addLayout(qh)
qh = QHBoxLayout()
for card in cards[4:8]:
label = CardLabel(self, card.imageName)
qh.addWidget(label)
qv.addLayout(qh)
self.setLayout(qv)
class ResultDialog(Dialog):
def __init__(self, window, title, width, height, messages=None, money=None):
super().__init__(window, width, height)
qv = QVBoxLayout()
titleLabel = QLabel(title, window)
titleLabel.setFont(QFont("Times", 24, QFont.Bold))
qv.addWidget(titleLabel, 0, Qt.AlignCenter)
if messages:
messageLabel = QLabel('\n'.join(messages), window)
messageLabel.setFont(QFont("Times", 14))
else:
messageLabel = QLabel('무승부', window)
messageLabel.setFont(QFont("Times", 14))
qv.addWidget(messageLabel, 0, Qt.AlignCenter)
if money:
moneyLabel = QLabel(str(money)+"원", window)
moneyLabel.setFont(QFont("Times", 24, QFont.Bold))
else:
moneyLabel = QLabel("재시작합니다", window)
moneyLabel.setFont(QFont("Times", 24, QFont.Bold))
qv.addWidget(moneyLabel, 0, Qt.AlignCenter)
self.setLayout(qv)
class ResultDialog(Dialog):
def __init__(self, window, title, width, height, messages=None, money=None):
super().__init__(window, width, height)
qv = QVBoxLayout()
titleLabel = QLabel(title, window)
titleLabel.setFont(QFont("Times", 24, QFont.Bold))
qv.addWidget(titleLabel, 0, Qt.AlignCenter)
if messages:
messageLabel = QLabel('\n'.join(messages), window)
messageLabel.setFont(QFont("Times", 14))
else:
messageLabel = QLabel('무승부', window)
messageLabel.setFont(QFont("Times", 14))
qv.addWidget(messageLabel, 0, Qt.AlignCenter)
if money:
moneyLabel = QLabel(str(money)+"원", window)
moneyLabel.setFont(QFont("Times", 24, QFont.Bold))
else:
moneyLabel = QLabel("재시작합니다", window)
moneyLabel.setFont(QFont("Times", 24, QFont.Bold))
qv.addWidget(moneyLabel, 0, Qt.AlignCenter)
self.setLayout(qv)
class FieldGUI:
def __init__(self, parent):
self.parent = parent
def tofield(self, card, slot, pos, arrange=False):
if not card.fliped:
card.flip()
self.parent.flipcard.emit(card)
self.parent.movecard.emit(card, 10+slot//2*55+5*pos+20*(slot%2), 150+85*(slot%2)+5*pos)
if pos==0:
if not arrange:
self.parent.playsound.emit("whoop")
else:
if not arrange:
self.parent.playsound.emit("whip")
def clear(self):
print('싹쓸')
self.parent.playsound.emit("clear")
QtTest.QTest.qWait(1500)
def attachEventHand(controller, hand, field):
def select(number):
for i in range(len(hand)):
controller.cardlabels[hand[i]].mousePressEvent = None
for label in controller.cardlabels[hand[i]].findChildren(QWidget):
label.setParent(None)
controller.answer.emit(number)
for i in range(len(hand)):
controller.cardlabels[hand[i]].mousePressEvent = lambda state, number=i:select(number)
if field.exist(hand[i]):
exist = QLabel("↖", controller.cardlabels[hand[i]])
exist.show()
class PlayerGUI:
def __init__(self, parent, player, field):
self.parent = parent
self.player = player
self.field = field
def tohand(self, card, slot, arrange=False):
if self.player.isEnemy:
self.parent.movecard.emit(card, 476+(slot%5)*40, 5+68*(slot//5))
else:
card.flip()
self.parent.flipcard.emit(card)
self.parent.movecard.emit(card, 476+(slot%5)*40, 326+68*(slot//5))
if not arrange:
self.parent.playsound.emit("whoop")
def toplayer(self, cards):
if type(cards) is list:
for card in cards:
if card.prop == "gwang":
self.togwang(card)
elif card.prop == "animal":
self.toanimal(card)
elif card.prop == "dan":
self.todan(card)
else:
self.topee(card)
if len(cards) != 0:
self.parent.playsound.emit("whoop")
else:
if cards.prop == "gwang":
self.togwang(cards)
elif cards.prop == "animal":
self.toanimal(cards)
elif cards.prop == "dan":
self.todan(cards)
else:
self.topee(cards)
self.parent.playsound.emit("whoop")
def togwang(self, card):
self.parent.raisecard.emit(card)
if self.player.isEnemy:
self.parent.movecard.emit(card, 2+(len(self.player.gwang)-1)*7, 3)
else:
self.parent.movecard.emit(card, 2+(len(self.player.gwang)-1)*7, 395)
def toanimal(self, card):
self.parent.raisecard.emit(card)
if self.player.isEnemy:
self.parent.movecard.emit(card, 115+(len(self.player.animal)-1)*7, 3)
else:
self.parent.movecard.emit(card, 119+(len(self.player.animal)-1)*7, 395)
def todan(self, card):
self.parent.raisecard.emit(card)
if self.player.isEnemy:
self.parent.movecard.emit(card, 237+(len(self.player.dan)-1)*7, 3)
else:
self.parent.movecard.emit(card, 241+(len(self.player.dan)-1)*7, 395)
def topee(self, card):
self.parent.raisecard.emit(card)
if self.player.isEnemy:
self.parent.movecard.emit(card, 2+(len(self.player.pee)-1)*7, 72)
else:
self.parent.movecard.emit(card, 2+(len(self.player.pee)-1)*7, 325)
def selectdual(self):
answer = input("Treat as pee? : ")
if answer == "no":
return "animal"
else:
return "pee"
def whattoget(self, cards):
num = input("Choose a card to get : ")
if num == 0:
return 0
else:
return 1
def askgo(self):
answer = input("Go? : ")
if answer == "yes":
return True
else:
return False
def chongtong(self, cards1, cards2):
if not(cards1 and cards2):
if cards1:
self.parent.chongtong.emit(cards1, 230, 140)
else:
self.parent.chongtong.emit(cards2, 230, 140)
else:
cards1.extend(cards2)
self.parent.chongtong.emit(cards1, 230, 280)
QtTest.QTest.qWait(2000)
def shake(self, cards):
self.parent.playsound.emit("shake")
self.parent.shake.emit(cards, 200, 140)
QtTest.QTest.qWait(2000)
def bomb(self, cards):
self.field.put(self.player.put(cards.pop(0)))
QtTest.QTest.qWait(400)
self.field.put(self.player.put(cards.pop(0)))
QtTest.QTest.qWait(400)
firstput = self.field.put(self.player.put(cards.pop(0)))
QtTest.QTest.qWait(400)
self.parent.playsound.emit("bomb")
QtTest.QTest.qWait(500)
return firstput
def threefuck(self):
print('쓰리뻑')
self.parent.playsound.emit("threefuck")
QtTest.QTest.qWait(1500)
def allgodori(self):
print('고도리')
self.parent.playsound.emit("godori")
QtTest.QTest.qWait(1500)
def allreddan(self):
print('홍단')
self.parent.playsound.emit("reddan")
QtTest.QTest.qWait(1500)
def allbluedan(self):
print('청단')
self.parent.playsound.emit("bluedan")
QtTest.QTest.qWait(1500)
def allchodan(self):
print('초단')
self.parent.playsound.emit("chodan")
QtTest.QTest.qWait(1500)
def allgwang(self, count, bee=False):
if not bee:
print('{}광'.format(count))
else:
print("비삼광")
self.parent.playsound.emit("gwang{}".format(count))
QtTest.QTest.qWait(1500)
def go(self, count):
print("{}고".format(count))
self.parent.playsound.emit("go{}".format(count))
QtTest.QTest.qWait(1500)
def stop(self):
print("스톱")
self.parent.playsound.emit("stop")
QtTest.QTest.qWait(1500)
def kiss(self):
print("쪽")
self.parent.playsound.emit("kiss")
QtTest.QTest.qWait(1500)
def fuck(self):
print("뻑")
self.parent.playsound.emit("fuck")
QtTest.QTest.qWait(1500)
def getfuck(self):
print("뻑 얻음")
self.parent.playsound.emit("getfuck")
QtTest.QTest.qWait(1500)
def jafuck(self):
print("자뻑")
self.parent.playsound.emit("getfuck")
QtTest.QTest.qWait(1500)
def tadack(self):
print("따닥")
self.parent.playsound.emit("tadack")
QtTest.QTest.qWait(1500)
def result(self, info):
if info["winner"] == None:
self.parent.playsound.emit("lose")
self.parent.result.emit("나가리", 260, 140, None, None)
elif info["winner"]:
self.parent.playsound.emit("win")
self.parent.result.emit("승리", 260, 240+10*len(info["messages"]), info["messages"], info["money"])
else:
self.parent.playsound.emit("lose")
self.parent.result.emit("패배", 260, 240+10*len(info["messages"]), info["messages"], info["money"])
QtTest.QTest.qWait(4500)
def askpush(self):
answer = input("Push? : ")
if answer == "yes":
return True
else:
return False | cmh1027/matgo | view/GUI_game.py | GUI_game.py | py | 15,609 | python | en | code | 0 | github-code | 90 |
73332690858 | # https://programmers.co.kr/learn/courses/30/lessons/77486
def sell(dic, answer, seller, value):
to = int(value * 0.1)
answer[seller] += value - to
if dic[seller] != "-" and to > 0:
sell(dic, answer, dic[seller], to)
def solution(enroll, referral, seller, amount):
answer = {}
dic = {}
for e, r in zip(enroll, referral):
dic[e] = r
answer[e] = 0
for s, a in zip(seller, amount):
sell(dic, answer, s, a * 100)
return [v for v in answer.values()]
enroll = ["john", "mary", "edward", "sam", "emily", "jaimie", "tod", "young"]
referral = ["-", "-", "mary", "edward", "mary", "mary", "jaimie", "edward"]
seller = ["young", "john", "tod", "emily", "mary"]
amount = [12, 4, 2, 5, 10]
print(solution(enroll, referral, seller, amount)) | devwithpug/Algorithm_Study | python/Programmers/2021_Dev-Matching_웹_백엔드_개발자/77486.py | 77486.py | py | 807 | python | en | code | 0 | github-code | 90 |
72201265258 | # Medium
# You're given an array of integers and another array of three distinct integers. The first array is guaranteed to
# only contain integers that are in the second array, and the second array array represents a desired order for the
# integers in the first array. For example, a second array of [x, y, z] represents a desired order of [x, x, ..., x,
# y, y, ..., z, z, ..., z] in the first array.
# Write a function that sorts the first array according to the desired order in the second array.
# The function should perform this in place, and it shouldn't use any auxiliary space.
# Note that the desired order won't necessarily be ascending or descending and that the first array won't necessarily
# contain all three integers found in the second array - it might only contain one or two.
# Sample Input
# array = [1, 0, 0, -1, -1, 0, 1, 1]
# order = [0, 1, -1]
# Sample Output
# [0, 0, 0, 1, 1, 1, -1, -1]
def threeNumberSort(array, order):
# Write your code here.
idx = 0
for i in range(len(order) - 1):
curValue = order[i]
for j in range(idx, len(array)):
if array[j] == curValue:
swap(array, j, idx)
idx += 1
return array
def swap(array, i, j):
array[i], array[j] = array[j], array[i]
## T = O(n); S = O(1)
## n is the length of array, O(2n) = O(n)
def threeNumberSort(array, order):
# Write your code here.
firstIdx, secondIdx, thirdIdx = 0, 0, len(array) - 1
while secondIdx <= thirdIdx:
value = array[secondIdx]
if value == order[0]:
swap(array, secondIdx, firstIdx)
firstIdx += 1
secondIdx += 1
elif value == order[1]:
secondIdx += 1
else:
swap(array, secondIdx, thirdIdx)
thirdIdx -= 1
return array
def swap(array, i, j):
array[i], array[j] = array[j], array[i]
| ArmanTursun/coding_questions | AlgoExpert/Sorting/Medium/Three Number Sort/Three Number Sort.py | Three Number Sort.py | py | 1,896 | python | en | code | 0 | github-code | 90 |
18350128039 | import bisect
N, K = map(int, input().split())
a = list(map(int, input().split()))
MOD = 10**9 + 7
ans = 0
a.reverse()
lst2 = sorted(a)
memo = {}
for i in range(N):
if not a[i] in memo:
memo[a[i]] = bisect.bisect_left(lst2, a[i])
ans += K * (K - 1) // 2 *memo[a[i]]
for i in range(1, N):
lst = sorted(a[0:i])
k1 = bisect.bisect_left(lst, a[i])
ans += k1 * K
ans %= MOD
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02928/s271378710.py | s271378710.py | py | 413 | python | en | code | 0 | github-code | 90 |
24609459290 | from random import*
from Omamoodul import*
n=[]
p=[]
while True:
print("1- registreerimine ")
print("2- autoriseerimine ")
print("3- välja ")
print("4- muuta nimi või parool")
print("5- unustanud parooli taastamine ")
print("6- kui sa tahad vadata teie parool ")
v=input("vali number: ")
if v=="1":
print("Registreerimine")
nimi,psword=kasutajaandmed(n,p)
print(f" nimi ({nimi}) ja password ({psword})")
elif v=="2":
print("autoriseerimine")
nimi,par_=aut(n,p)
elif v=="3":
print("lõpp")
break
elif v=="4":
vananimi=input("Kirjuta teie vana nimi: ")
uusnimi=input("Kirjuta uue nimi, mida sa sooid: ")
uss_nimi(n,vananimi,uusnimi)
elif v=="5":
nimi=input("Krjuta teie login: ")
vanasalasõna=input("Kirjuta teie vana salasõna: ")
uussalasõna=input("Kirjuta uus salasõna: ")
uss_salasõna(n,p,nimi,vanasalasõna,uussalasõna)
elif v=="6":
nimiii=input("sisetage login, kellel sa tahad vadata salasõna: ")
nimii(n,p,nimiii)
print(n,p) | ViktorijaIvanova/registjauto | registjauto/registjauto.py | registjauto.py | py | 1,126 | python | et | code | 0 | github-code | 90 |
11594198420 | # Dragalia Lost Manifest Parser to download the essential latest (2.0) assets for Private Server purposes, made with love by Ceris
# This version relies solely on the asset name, and ignores any older versions of the same asset.
import sys
import os
import hashlib
import json
import requests
import threading
import time
# from git import Repo
from io import BytesIO
import zipfile
### VARIABLES HERE. Set these to "1" to process the localization manifests. ###
Download_EN = 0
Download_CN = 0
Download_TW = 0
# The app platform doesn't make much a difference other than iOS not having V1 files. Options are "iOS/" or "Android/". Yes, the trailing slash is necessary.
App_Platform = ""
UserAgent = "Dragalia/174 CFNetwork/1209 Darwin/20.2.0"
session = requests.Session()
# Clone dl-datamine repo to local directory
scriptDir = os.path.dirname(os.path.realpath(sys.argv[0]))
if os.path.exists(scriptDir + "/manifest"):
os.rename(scriptDir + "/manifest", scriptDir + "/_manifest")
if not os.path.exists(scriptDir + "/_manifest"):
## print("Cloning the dl-datamine repo. This is over 3GB in size.")
## Repo.clone_from("https://github.com/CerisWhite/dl-datamine", scriptDir + "/_manifest", depth=1)
print("Getting the manifests. This is over 3GB in size.")
zipdata = session.get("https://github.com/CerisWhite/dl-merged-manifests/archive/refs/heads/master.zip")
zip2 = zipfile.ZipFile(BytesIO(zipdata.content))
zip2.extractall(scriptDir + "/./")
os.rename(scriptDir + "/dl-merged-manifests-master/manifest", scriptDir + "/_manifest")
# Define the download function to allow for threading
def DownloadData(Manifest_json, manifest_date, assetname, assethash, DownloadURL, DownloadPath):
tick = 0
while tick <= 2:
try:
if assetname not in Manifest_json:
Manifest_json[assetname] = []
DataStream = session.get(DownloadURL, stream=True, headers={"user-agent":UserAgent})
DataFile = open(DownloadPath, "wb")
for chunk in DataStream.iter_content(chunk_size=8192):
DataFile.write(chunk)
Manifest_json[assetname].append({"hash": assethash, "date": manifest_date})
tick = 3
else:
pass
except:
print("File " + assethash + " from manifest " + manifest_date + " failed to download.")
time.sleep(0.5)
tick += 1
# Prepare
JPManifest = open(scriptDir + "/master_manifest.json", "w+")
JPManifest_json = {}
os.makedirs(scriptDir + "/masterassets/latest")
if Download_EN == 1:
ENManifest = open(scriptDir + "/master_enmanifest.json", "w+")
ENManifest_json = {}
os.makedirs(scriptDir + "/enassets/latest")
if Download_CN == 1:
CNManifest = open(scriptDir + "/master_cnmanifest.json", "w+")
CNManifest_json = {}
os.makedirs(scriptDir + "/cnassets/latest")
if Download_TW == 1:
TWManifest = open(scriptDir + "/master_twmanifest.json", "w+")
TWManifest_json = {}
os.makedirs(scriptDir + "/twassets/latest")
manifest_subdirectory = [x for x in sorted(os.listdir(scriptDir + "/_manifest/"),reverse=True)]
manifest_subdirectory_length = len(manifest_subdirectory)
def ManifestParser(Master_URL, current_manifest_json, Manifest_json, assetpath, manifest_date):
master_assetdata = current_manifest_json['categories'][0]['assets']
other_assetdata = current_manifest_json['categories'][1]['assets']
raw_assetdata = current_manifest_json['rawAssets']
for ix in master_assetdata:
assetname = ix['name']
assethash = ix['hash']
HashIdentifier = assethash[:2]
AssetPredictedPath = scriptDir + assetpath + "/latest/" + HashIdentifier + "/"
if assetname in Manifest_json:
pass
else:
if not os.path.exists(AssetPredictedPath):
os.makedirs(AssetPredictedPath)
DownloadURL = Master_URL + HashIdentifier + "/" + assethash
DownloadPath = AssetPredictedPath + assethash
while threading.active_count() >= 5:
pass
DownloadThread = threading.Thread(target=DownloadData, args=(Manifest_json, manifest_date, assetname, assethash, DownloadURL, DownloadPath))
DownloadThread.start()
for ix in other_assetdata:
assetname = ix['name']
assethash = ix['hash']
HashIdentifier = assethash[:2]
AssetPredictedPath = scriptDir + assetpath + "latest/" + HashIdentifier + "/"
if not os.path.exists(AssetPredictedPath):
os.makedirs(AssetPredictedPath)
if assetname in Manifest_json:
pass
else:
if not os.path.exists(AssetPredictedPath):
os.makedirs(AssetPredictedPath)
DownloadURL = Master_URL + HashIdentifier + "/" + assethash
DownloadPath = AssetPredictedPath + assethash
while threading.active_count() >= 5:
pass
DownloadThread = threading.Thread(target=DownloadData, args=(Manifest_json, manifest_date, assetname, assethash, DownloadURL, DownloadPath))
DownloadThread.start()
for ix in raw_assetdata:
assetname = ix['name']
assethash = ix['hash']
HashIdentifier = assethash[:2]
AssetPredictedPath = scriptDir + assetpath + "/latest/" + HashIdentifier + "/"
if assetname in Manifest_json:
pass
else:
if not os.path.exists(AssetPredictedPath):
os.makedirs(AssetPredictedPath)
DownloadURL = Master_URL + HashIdentifier + "/" + assethash
DownloadPath = AssetPredictedPath + assethash
while threading.active_count() >= 5:
pass
DownloadThread = threading.Thread(target=DownloadData, args=(Manifest_json, manifest_date, assetname, assethash, DownloadURL, DownloadPath))
DownloadThread.start()
# Process
iterator = 0
while iterator < manifest_subdirectory_length:
currentName = manifest_subdirectory[iterator]
manifest_date = currentName[:8]
manifest_directory = scriptDir + "/_manifest/" + currentName
if (currentName == "20221014_b1HyoeTFegeTexC0"):
App_Platform = "iOS/"
else:
App_Platform = "Android/"
Master_URL = "http://dragalialost.akamaized.net/dl/assetbundles/" + App_Platform
JPcurrent_manifest = open(manifest_directory + "/assetbundle.manifest.json", "r")
JPcurrent_manifest_json = json.load(JPcurrent_manifest)
assetpath = "/masterassets/"
ManifestParser(Master_URL, JPcurrent_manifest_json, JPManifest_json, assetpath, manifest_date)
if Download_EN == 1:
try:
ENcurrent_manifest = open(manifest_directory + "/assetbundle.en_us.manifest.json", "r")
except:
print("The specified en_us manifest does not exist.")
break
ENcurrent_manifest_json = json.load(ENcurrent_manifest)
assetpath = "/enassets/"
ManifestParser(Master_URL, ENcurrent_manifest_json, ENManifest_json, assetpath, manifest_date)
if Download_CN == 1:
try:
CNcurrent_manifest = open(manifest_directory + "/assetbundle.zh_cn.manifest.json", "r")
except:
print("The specified zh_cn manifest does not exist.")
break
CNcurrent_manifest_json = json.load(CNcurrent_manifest)
assetpath = "/cnassets/"
ManifestParser(Master_URL, CNcurrent_manifest_json, CNManifest_json, assetpath, manifest_date)
if Download_TW == 1:
try:
TWcurrent_manifest = open(manifest_directory + "/assetbundle.zh_tw.manifest.json", "r")
except:
print("The specified zh_tw manifest does not exist.")
break
TWcurrent_manifest_json = json.load(TWcurrent_manifest)
assetpath = "/twassets/"
ManifestParser(Master_URL, TWcurrent_manifest_json, TWManifest_json, assetpath, manifest_date)
print("Manifest " + currentName + " finished.")
iterator += 1
New_JPManifest = json.dumps(JPManifest_json)
JPManifest.write(New_JPManifest)
if Download_EN == 1:
New_ENManifest = json.dumps(ENManifest_json)
ENManifest.write(New_ENManifest)
if Download_CN == 1:
New_CNManifest = json.dumps(CNManifest_json)
CNManifest.write(New_CNManifest)
if Download_TW == 1:
New_TWManifest = json.dumps(TWManifest_json)
TWManifest.write(New_TWManifest)
exit
# Enjoy!
# - Ceris
| CerisWhite/dl-merged-manifests | EssentialAssets_iOS.py | EssentialAssets_iOS.py | py | 8,549 | python | en | code | 0 | github-code | 90 |
73245694377 | from typing import Any, Dict, Optional, Union, List
from sqlalchemy.orm import Session
import uuid
from app.core.security import get_password_hash, verify_password
from app.crud.base import CRUDBase
from app.models.transaction import Transaction
from app.schemas.transaction import TransactionCreate, TransactionUpdate
from app.models.bank import Bank
from app.models.company import Company
from app.models.user import User
from app.utils import StatusEnum, RankEnum
class CRUDTransaction(CRUDBase[Transaction, TransactionCreate, TransactionUpdate]):
def get_all_by_bank(self, db: Session, *, bank: Bank) -> List[Transaction]:
return db.query(Transaction).where(bank_id=bank.id).all()
def get_multi_by_company(
self, db: Session, *, company: Company, skip: int, limit: int
) -> List[Transaction]:
return (
db.query(Transaction)
.join(Transaction.bank)
.where(company_id=company.id)
.offset(skip)
.limit(limit)
.all()
)
def get_all_by_bank_scoped(
self, db: Session, *, bank: Bank, scope: StatusEnum
) -> List[Transaction]:
return db.query(Transaction).where(bank_id=bank.id).where(status=scope).all()
def get_multi_by_company_scoped(
self, db: Session, *, company: Company, scope: StatusEnum, skip: int, limit: int
) -> List[Transaction]:
return (
db.query(Transaction)
.join(Transaction.bank)
.where(company_id=company.id)
.where(status=scope)
.offset(skip)
.limit(limit)
.all()
)
def create(
self, db: Session, *, obj_in: TransactionCreate, creator: User
) -> Transaction:
db_item = Bank(**obj_in.dict(), creator_id=creator.id)
return super().create(db, obj_in=db_item)
def update(
self,
db: Session,
*,
db_obj: Transaction,
obj_in: Union[Transaction, Dict[str, Any]],
) -> Transaction:
if db_obj.status is not StatusEnum.PENDING:
# Error
pass
if isinstance(obj_in, dict):
update_data = obj_in
else:
update_data = obj_in.dict(exclude_unset=True)
return super().update(db, db_obj=db_obj, obj_in=update_data)
def approve(
self, db: Session, *, db_obj: Transaction, approver: User
) -> Transaction:
return super().update(
db,
db_obj=db_obj,
obj_in={"status": StatusEnum.APPROVED, "approver_id": approver.id},
)
transaction = CRUDTransaction(Transaction)
| yudjinn/nwbnk-api | src/app/crud/transaction.py | transaction.py | py | 2,649 | python | en | code | 0 | github-code | 90 |
35219766329 | import sys
input = sys.stdin.readline
n, k = map(int, input().split())
data = [list(map(int, input().split())) for _ in range(n)]
test = []
for _ in range(k):
test.append(list(map(int, input().split())))
su = [[0] * n for _ in range(n)]
for i in range(n):
for j in range(n):
if i == 0 and j == 0:
su[i][j] = data[i][j]
elif i == 0:
su[i][j] = data[i][j] + su[i][j-1]
elif j == 0:
su[i][j] = data[i][j] + su[i-1][j]
else:
su[i][j] = data[i][j] - su[i-1][j-1] + su[i-1][j] + su[i][j-1]
for i in test:
x1, y1, x2, y2 = i
if x1 == x2 and y1 == y2:
result = data[x2 - 1][y2 - 1]
elif x1 == 1 and y1 == 1:
result = su[x2-1][y2-1]
elif x1 == 1:
result = su[x2-1][y2-1] - su[x2-1][y1-2]
elif y1 == 1:
result = su[x2-1][y2-1] - su[x1-2][y2-1]
else:
result = su[x2-1][y2-1] - (su[x1-2][y2-1] + su[x2-1][y1-2]) + su[x1-2][y1-2]
print(result) | yongwoo97/algorithm | silver/11660_구간합구하기5.py | 11660_구간합구하기5.py | py | 991 | python | en | code | 0 | github-code | 90 |
4494069771 | from django.urls import path
from posts import views
from posts.apps import PostsConfig
app_name = PostsConfig.name
urlpatterns = [
path('', views.index, name='index'),
path('create/', views.post_create, name='post_create'),
path('group/<slug:slug>/', views.group_posts, name='group_list'),
path(
'posts/<int:post_id>/comment/', views.add_comment, name='add_comment'
),
path('posts/<int:pk>/', views.post_detail, name='post_detail'),
path('posts/<int:pk>/edit/', views.post_edit, name='post_edit'),
path('profile/<str:username>/', views.profile, name='profile'),
]
| AlexandrVasilchuk/hw04_tests | yatube/posts/urls.py | urls.py | py | 608 | python | en | code | 2 | github-code | 90 |
7738653702 | # -*- encoding: utf-8 -*-
"""
@File : file_utils.py
@Time : 2020_01_28-23:02:59
@Author : zhenwang
@Description :
- Version 1.0.0: File created.
"""
def get_vars_from_file(mod_path, default=None, raise_exception=False):
import ast
ModuleType = type(ast)
with open(mod_path, "r") as file_mod:
data = file_mod.read()
try:
ast_data = ast.parse(data, filename=mod_path)
except:
if raise_exception:
raise
print("Syntax error 'ast.parse' can't read %r" % mod_path)
import traceback
traceback.print_exc()
return_value = {}
if ast_data:
for body in ast_data.body:
if body.__class__ == ast.Assign:
if len(body.targets) == 1:
try:
return_value[body.targets[0].id] = ast.literal_eval(body.value)
except:
if raise_exception:
raise
print("AST error parsing for %r" % (mod_path))
import traceback
traceback.print_exc()
return return_value if return_value else default
# example use
# a = 0
# variables = get_vars_from_file(__file__)
# print(variables)
| moliqingwa/DRLND | p2_continuous-control/file_utils.py | file_utils.py | py | 1,300 | python | en | code | 1 | github-code | 90 |
13173427476 | import os, json, traceback, time
from qgis.core import *
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtNetwork import *
from DlgWaiting import DlgWaiting
from GeosismaWindow import GeosismaWindow as gw
# SpatiaLite imports
from pyspatialite import dbapi2 as db
class DownloadRequests(DlgWaiting):
# signals
done = pyqtSignal(bool)
singleDone = pyqtSignal(bool)
message = pyqtSignal(str, int)
def __init__(self, parent=None):
DlgWaiting.__init__(self, parent)
self.singleFinished = True
self.allFinished = True
self.jsonRequests = None
self.manager = QgsNetworkAccessManager.instance()
# clean listeners to avoid overlap
try:
self.manager.authenticationRequired.disconnect()
except:
pass
try:
self.manager.finished.disconnect()
except:
pass
# add new listeners
self.manager.finished.connect(self.replyFinished)
self.manager.authenticationRequired.connect(self.authenticationRequired)
#self.setWindowModality(Qt.ApplicationModal)
def __del__(self):
try:
self.manager.finished.disconnect(self.replyFinished)
self.manager.authenticationRequired.disconnect(self.authenticationRequired)
except Exception:
pass
def run(self):
try:
#self.requestsApi = gw.instance().downloadedRequestsApi
self.downloadedTeams = gw.instance().downloadedTeams
self.downloadedRequests = gw.instance().downloadedRequests
# count how many download
numDownload = 0
for team in self.downloadedTeams:
for request in team["requests"]:
numDownload += 1
# init progress bar
self.reset()
self.setWindowTitle( self.tr("Scarica le schede Sopralluogo del Team") )
self.setRange( 0, numDownload )
QApplication.setOverrideCursor(Qt.WaitCursor)
# set semaphores
self.done.connect(self.setAllFinished)
self.singleDone.connect(self.setSingleFinished)
# for each request api
self.allFinished = False
#for requestApi in self.requestsApi:
for index,team in enumerate(self.downloadedTeams):
for requestApi in team["requests"]:
# create db
self.jsonRequest = None
self.singleFinished = False
self.downloadRequests(requestApi)
# whait end of single request
while (not self.singleFinished):
qApp.processEvents()
time.sleep(0.1)
# archive request in self.downloadedTeams
self.downloadedTeams[index]["downloadedRequests"][requestApi] = self.jsonRequests
self.downloadedRequests.append(self.jsonRequests)
self.onProgress()
# some other emitted done signal
if (self.allFinished):
return
gw.instance().downloadedTeams = self.downloadedTeams
gw.instance().downloadedRequests = self.downloadedRequests
self.done.emit(True)
except Exception as e:
try:
traceback.print_exc()
except:
pass
self.done.emit(False)
self.message.emit(e.message, QgsMessageLog.CRITICAL)
raise e
finally:
QApplication.restoreOverrideCursor()
def setSingleFinished(self, success):
self.singleFinished = True
def setAllFinished(self, success):
self.allFinished = True
def downloadRequests(self, requestApi):
# get connection conf
settings = QSettings()
self.baseApiUrl = settings.value("/rt_geosisma_offline/baseApiUrl", "http://geosisma-test.faunalia.it/")
# for each request api
message = self.tr("Download Richiesta %s" % requestApi)
self.message.emit(message, QgsMessageLog.INFO)
request = QNetworkRequest()
url = QUrl(self.baseApiUrl + requestApi)
url.addQueryItem("format", "json")
request.setUrl(url)
# start download
self.manager.get(request)
# wait request finish to go to the next
self.singleFinished = False
def authenticationRequired(self, reply, authenticator ):
if self is None:
return
# check if reached mas retry
gw.instance().authenticationRetryCounter += 1
if (gw.instance().authenticationRetryCounter % gw.instance().maxAuthenticationError) == 0:
gw.instance().authenticationRetryCounter = 0 # reset counter
message = self.tr("Autenticazione fallita piu' di %d volte" % gw.instance().maxAuthenticationError)
self.message.emit(message, QgsMessageLog.CRITICAL)
QMessageBox.critical(self, gw.MESSAGELOG_CLASS, message)
# abort continuing request
reply.abort()
self.done.emit(False)
return
# if not authenticated ask credentials
if not gw.instance().autenthicated:
(ok, gw.instance().user, gw.instance().pwd) = QgsCredentials.instance().get("", gw.instance().user, gw.instance().pwd, self.tr("Inserisci User e PWD della tua utenza Geosisma"))
if not ok: # MEANS PRESED CANCEL
gw.instance().authenticationRetryCounter = 0
reply.abort()
message = self.tr("Mancata autenticazione")
self.message.emit(message, QgsMessageLog.WARNING)
self.done.emit(False)
return
# do authentication
authenticator.setUser(gw.instance().user)
authenticator.setPassword(gw.instance().pwd)
def replyFinished(self, reply):
if self is None:
return
# need auth
if reply.error() == QNetworkReply.AuthenticationRequiredError:
gw.instance().autenthicated = False
# do again until authenticated or reached max retry
self.manager.get(reply.request())
return
# received error
if reply.error():
message = self.tr("Errore nella HTTP Request: %d - %s" % (reply.error(), reply.errorString()) )
self.message.emit(message, QgsMessageLog.WARNING)
self.done.emit(False)
return
# well authenticated :)
gw.instance().autenthicated = True
gw.instance().authenticationRetryCounter = 0
from json import loads
raw = reply.readAll()
try:
self.jsonRequests = loads(raw.data())
except Exception as e:
try:
traceback.print_exc()
except:
pass
self.done.emit(False)
return
#gw.instance().downloadedRequests.append(jsonRequests)
# successfully end
self.singleDone.emit(True)
| faunalia/rt_geosisma_offline | DownloadRequests.py | DownloadRequests.py | py | 7,428 | python | en | code | 0 | github-code | 90 |
32886565235 | #coding=utf-8
from __future__ import print_function
import numpy as np
import numpy.random as nrd
from uniform import UniformPattern
class RWProperyPattern():
WRITE = 1
READ = 0
def __init__(self, base, write_ratio):
self.base = base
if write_ratio < 0:
print("Warning! write_ratio ge 0. Asssuming write_ratio is 0.")
write_ratio = 0
self.write_ratio = write_ratio
self.created = []
def __str__(self):
return "write ratio: {0}".format(self.write_ratio_expect)
def __repr__(self):
return str(self)
def clean_state(self):
self.created = []
def generate(self):
# generate a uniform choose seq.
return self.generate_property(self.base, with_base=True)
def generate_property(self, base, with_base=False):
# generate the rw state seq.
rw = nrd.choice([self.WRITE, self.READ], size=base.access_max,
p=[self.write_ratio, 1-self.write_ratio])
for pair in zip(base.generate_base(fresh=False), rw):
if pair[0] not in self.created:
self.created.append(pair[0])
if with_base:
yield (pair[0], self.WRITE)
else:
yield self.WRITE
else:
if with_base:
yield pair
else:
yield pair[1]
def generate(obj_num, access_max, write_ratio, times=1):
b = UniformPattern(obj_num, access_max)
p = RWProperyPattern(b, write_ratio)
if times >= 0:
b.clean_state()
for i in range(times):
for j in p.generate():
yield j
else:
while True:
b.clean_state()
for i in p.generate():
yield i
if __name__ == '__main__':
import matplotlib.pyplot as plt
y1 = generate(100, 1000, 0.7)
ny = np.array(list(y1))
plt.hist(ny[:,0], bins=10)
plt.show()
| FacelessManipulator/CachedBench | patterns/rw.py | rw.py | py | 1,634 | python | en | code | 0 | github-code | 90 |
2409077669 | import copy
from ly_kernel.db.BaseModel import *
import pickle
from base64 import b64encode, b64decode
from enums.FlowEnums import FlowOpType,SpecsNodeType
from ly_service.utils import Time
import json
class FlowOp(BaseModel):
"""
模板
"""
__tablename__ = 'wf_flow'
id = db.Column(db.Integer, primary_key=True)
specs_data = db.Column(db.JSON, nullable=True, comment='流转过程')
node_data = db.Column(db.JSON, nullable=True, comment='节点数据')
is_completed = db.Column(db.Integer, default=0, nullable=True, comment='是否结束')
is_invalid = db.Column(db.Integer, default=0, nullable=True, comment='是否作废')
params = db.Column(db.String(1024), nullable=False, default='{}', comment='审批过程中传入的参数')
createTime = db.Column(db.TIMESTAMP, nullable=False, server_default=db.text('CURRENT_TIMESTAMP'), comment='创建时间')
updateTime = db.Column(db.TIMESTAMP, nullable=False, server_default=db.text('CURRENT_TIMESTAMP'),
server_onupdate=db.text('CURRENT_TIMESTAMP'), comment='更新时间')
def __init__(self):
self.node_data = {}
# 作废该流程
def invalid(self):
self.is_invalid = 1
self.node_data = {
"node_ready": self.encode_pickle([]),
"logs": self.encode_pickle([]),
}
def encode_pickle(self, v):
return str(b64encode(pickle.dumps(v, protocol=pickle.HIGHEST_PROTOCOL)), encoding="utf-8")
def decode_pickle(self, v):
return pickle.loads(b64decode(v))
# 获取发起者的名字
def get_creator(self):
creator = self.decode_pickle(self.node_data.get("logs", self.encode_pickle([])))[0]
return {
"uid": creator.get("uid"),
"user": creator.get("user"),
}
def get_my_leader(self, my_info=None):
if my_info is None:
my_info = self.get_creator()
my_leader = LieYingApp.rpc_client(1, "centerApp:/user/myleader", json_data={"uid": my_info.get("uid")},
method="GET")
return {
"my_info": my_info,
"my_leader": my_leader.get("data")
}
def find_node_ready_by_uid(self, user_id, role_id=None):
node_ready = self.get_node_ready()
for _node in node_ready:
if _node.get("user").get("id") == user_id:
return _node
role_ids = [role["id"] for role in _node.get("role_list", [])]
if set(role_id) & set(role_ids):
return _node
return None
def find_all_node_ready(self):
node_ready_list = self.get_node_ready()
return node_ready_list
def find_node_ready_noed_name(self):
node_readys = self.get_node_ready()
return [node_ready["node_name"] for node_ready in node_readys]
def get_node_ready(self):
node_ready = self.decode_pickle(self.node_data.get("node_ready")) if "node_ready" in self.node_data else []
return node_ready
def get_logs(self):
logs = self.decode_pickle(self.node_data.get("logs")) if "logs" in self.node_data else []
return logs
# 发起者是否可以撤回流程
def is_revoke_by_creator(self):
def _find_node(_logs):
is_find = False
for _d_node in _logs:
if type(_d_node) == list:
is_find = _find_node(_d_node)
else:
if _d_node.get("state") == FlowOpType.AGREE.get_name():
return True
elif _d_node.get("state") == FlowOpType.REFUSE.get_name():
return True
elif _d_node.get("state") == FlowOpType.REJECT.get_name():
return True
if is_find:
break
return is_find
logs = self.decode_pickle(self.node_data.get("logs", self.encode_pickle([])))
return not _find_node(logs)
def update_node_ready(self, node_ready, reissue_log=None, auto_complete_log=None):
node_data = json.loads(json.dumps(self.node_data))
if reissue_log is None:
real_node_ready = []
# 去掉已经完成的消息节点
for node in node_ready:
if node["node_type"] != SpecsNodeType.Message.get_id():
real_node_ready.append(node)
node_data["node_ready"] = self.encode_pickle(real_node_ready)
node_data["logs"] = self.encode_pickle(
self._private_add_logs(node_ready, reissue_log=reissue_log, auto_complete_log=auto_complete_log))
self.node_data = node_data
def add_node_ready(self, to_add_node):
node_data = json.loads(json.dumps(self.node_data))
node_ready = node_data["node_ready"].append(to_add_node)
node_data["node_ready"] = self.encode_pickle(node_ready)
node_data["logs"] = self.encode_pickle(self._private_add_logs(node_ready))
self.node_data = node_data
def add_logs(self, log):
logs = self.get_logs()
logs.append(log)
node_data = json.loads(json.dumps(self.node_data))
node_data["logs"] = self.encode_pickle(logs)
self.node_data = node_data
# def update_logs(self, log_id, state, idea, params=None):
# if state.get("id") == FlowOpType.REFUSE.get_id():
# self.update_logs_refuse(log_id, state, idea, params)
# else:
# self.update_logs_other(log_id, state, idea, params)
def update_logs(self, log_id, state, idea, params=None, approve_user_info=None):
is_update = False
logs = self.get_logs()
for _find_id in logs:
if type(_find_id) == list:
for _node_s in _find_id:
if _node_s.get("logid") == log_id and _node_s.get("state") == FlowOpType.NORMAL.get_name():
is_update = True
if idea is not None and idea != "":
_node_s["idea"] = idea
if params is not None:
_node_s["params"] = params
if approve_user_info is not None:
_node_s["uid"] = approve_user_info["user_id"]
_node_s["user"] = approve_user_info["user_name"]
_node_s["state"] = state.get("name")
_node_s["time"] = Time.timeStampToFormatByDatetime(int(Time.currentTime()))
if state.get("id") == FlowOpType.REFUSE.get_id():
_node_s_copy = copy.deepcopy(_node_s)
_node_s_copy["state"] = FlowOpType.NORMAL.get_name()
_node_s_copy["idea"] = ""
_node_s_copy["params"] = {}
_node_s_copy["time"] = Time.timeStampToFormatByDatetime(int(Time.currentTime()))
_find_id.append(_node_s_copy)
break
if is_update:
break
else:
if _find_id.get("logid") == log_id and _find_id.get("state") == FlowOpType.NORMAL.get_name():
if params is not None:
_find_id["params"] = params
if idea is not None and idea != "":
_find_id["idea"] = idea
if approve_user_info is not None:
_find_id["uid"] = approve_user_info["user_id"]
_find_id["user"] = approve_user_info["user_name"]
_find_id["state"] = state.get("name")
_find_id["time"] = Time.timeStampToFormatByDatetime(int(Time.currentTime()))
if state.get("id") == FlowOpType.REFUSE.get_id():
_find_id_copy = copy.deepcopy(_find_id)
_find_id_copy["state"] = FlowOpType.NORMAL.get_name()
_find_id_copy["idea"] = ""
_find_id_copy["params"] = {}
_find_id_copy["time"] = Time.timeStampToFormatByDatetime(int(Time.currentTime()))
logs.append(_find_id_copy)
break
node_data = json.loads(json.dumps(self.node_data))
node_data["logs"] = self.encode_pickle(logs)
self.node_data = node_data
def update_logs_refuse(self, log_id, state, idea, params=None):
is_update_1 = False
is_update_2 = False
logs = self.get_logs()
new_normal_log_list_1 = []
will_remove_list_1 = []
for _find_id in logs:
if type(_find_id) == list:
new_normal_log_list_2 = []
will_remove_list_2 = []
for _node_s in _find_id:
if _node_s.get("state") == FlowOpType.NORMAL.get_name():
if _node_s.get("logid") == log_id:
is_update_2 = True
if idea is not None and idea != "":
_node_s["idea"] = idea
if params is not None:
_node_s["params"] = params
_node_s["state"] = state.get("name")
_node_s["time"] = Time.timeStampToFormatByDatetime(int(Time.currentTime()))
# 拒绝完补一条待办信息,方便重新发起时使用
_node_s_copy = copy.deepcopy(_node_s)
_node_s_copy["state"] = FlowOpType.NORMAL.get_name()
_node_s_copy["idea"] = ""
_node_s_copy["params"] = {}
new_normal_log_list_2.append(_node_s_copy)
else:
will_remove_list_2.append(_node_s)
new_normal_log_list_2.append(_node_s)
if is_update_2:
# 移除旧的待办节点
for node in will_remove_list_2:
_find_id.remove(node)
# 加上新的待办节点
_find_id.extend(new_normal_log_list_2)
# 拒绝操作将拒绝的节点补上
break
else:
if _find_id.get("state") == FlowOpType.NORMAL.get_name():
if _find_id.get("logid") == log_id:
is_update_1 = True
if params is not None:
_find_id["params"] = params
if idea is not None and idea != "":
_find_id["idea"] = idea
_find_id["state"] = state.get("name")
_find_id["time"] = Time.timeStampToFormatByDatetime(int(Time.currentTime()))
_find_id_copy = copy.deepcopy(_find_id)
_find_id_copy["state"] = FlowOpType.NORMAL.get_name()
_find_id_copy["idea"] = ""
_find_id_copy["params"] = {}
new_normal_log_list_1.append(_find_id_copy)
else:
will_remove_list_1.append(_find_id)
new_normal_log_list_1.append(_find_id)
if is_update_1:
for node in will_remove_list_1:
logs.remove(node)
logs.extend(new_normal_log_list_1)
# 如果是拒绝则删除其他待办节点,重新发起再加入
# if state.get("id") == FlowOpType.REFUSE.get_id():
# node_will_remove_list = []
# for _find_id in logs:
# if type(_find_id) == list:
# node_will_remove_list_child = []
# for _node_s in _find_id:
# if _node_s.get("state") == FlowOpType.NORMAL.get_name():
# node_will_remove_list_child.append(_node_s)
# for node_will_remove in node_will_remove_list_child:
# _find_id.remove(node_will_remove)
# else:
# if _find_id.get("state") == FlowOpType.NORMAL.get_name():
# node_will_remove_list.append(_find_id)
# for node_will_remove in node_will_remove_list:
# logs.remove(node_will_remove)
node_data = json.loads(json.dumps(self.node_data))
node_data["logs"] = self.encode_pickle(logs)
self.node_data = node_data
def _private_add_logs(self, node_ready, reissue_log=None, auto_complete_log=None):
logs = self.get_logs()
# 补充重新发起日志
if reissue_log:
logs.append(reissue_log)
# 补充自动完成的日志
if auto_complete_log:
logs.extend(auto_complete_log)
# 日志流转
_add = dict()
# 消息节点计数,如果返回的全是消息节点,说明流程已经结束
msg_node_cnt = 0
for _node in node_ready:
input_node = _node.get("input_node")
if input_node not in _add:
_add[input_node] = []
if _node.get("node_type") == SpecsNodeType.Message.get_id():
state = FlowOpType.AGREE.get_name()
msg_node_cnt += 1
else:
state = FlowOpType.NORMAL.get_name()
_add[input_node].append({
"logid": _node.get("node_id"),
"uid": _node.get("user").get("id"),
"user": _node.get("user").get("name"),
"role_list": _node.get("role_list", []),
"node_type": _node.get("node_type", None),
"time": Time.timeStampToFormatByDatetime(int(Time.currentTime())),
"state": state,
"node_desc": _node.get('node_desc', ''),
"node_activity_name": _node.get('node_activity_name', ''),
"node_name": _node.get('node_name', ''),
"node_tpl": _node.get("node_tpl", {})
})
if len(_add) > 0:
def _find_node(_logs, log_id):
is_find = False
for _d_node in _logs:
if type(_d_node) == list:
is_find = _find_node(_d_node, log_id)
else:
if _d_node.get("logid") == log_id and _d_node.get("state") == FlowOpType.NORMAL.get_name():
return True
if is_find:
break
return is_find
for key, _add_node in _add.items():
is_find = False
for _log in _add_node:
is_find = _find_node(logs, _log.get("logid"))
if is_find:
break
if not is_find:
if len(_add_node) == 1:
logs.append(_add_node[0])
else:
logs.append(_add_node)
# 全是消息节点即流程结束
if len(node_ready) == msg_node_cnt:
logs.append({
"time": Time.timeStampToFormatByDatetime(int(Time.currentTime())),
"state": FlowOpType.OVER.get_name()
})
elif reissue_log is None:
logs.append({
"time": Time.timeStampToFormatByDatetime(int(Time.currentTime())),
"state": FlowOpType.OVER.get_name()
})
return logs
def get_toadd_node_and_skip_node(self, toadd_node=None):
ready_nodes = self.get_node_ready()
toadd_node_list = []
skip_node_id_list = []
for node in ready_nodes:
if toadd_node and node.get('node_id') == toadd_node.get('node_id'):
continue
if node.get('node_type', None) == 101:
toadd_node_list.append(node)
skip_node_id_list.append(node.get('output_node'))
return toadd_node_list, skip_node_id_list
def have_ever_agree(self, uid):
"""判断用户之前是否同意过"""
logs = self.get_logs()
for log in logs:
if type(log) == list:
for log_child in log:
if log_child.get('uid', None) == uid and log_child.get('state',
None) == FlowOpType.AGREE.get_name():
return True
else:
if log.get('uid', None) == uid and log.get('state', None) == FlowOpType.AGREE.get_name():
return True
return False
| ZainLiu/YXtest | workflow/src/models/FlowOp.py | FlowOp.py | py | 16,916 | python | en | code | 0 | github-code | 90 |
12551682248 | #!/usr/bin/python
"""Example with a core infra: network switches and controller
Archi considered here:
C1 - controller - connected to both AP - Access Points - and S - Switches
H1 could be seen as a potentiel server or broker located within the network
AP1 ---- ----AP4
/ /
AP2 -- S1 <- S3 -> S2 -- AP5
/ / /
AP3-- H1 ----- AP6
***Requirements***:
Kernel version: 5.8+ (due to the 802.11p support)
sumo 1.5.0 or higher
sumo-gui"""
import os
from mininet.node import Controller, OVSKernelSwitch, RemoteController
from mn_wifi.node import UserAP
from mininet.log import setLogLevel, info
from mn_wifi.cli import CLI
from mn_wifi.net import Mininet_wifi
from mn_wifi.sumo.runner import sumo
from mn_wifi.link import wmediumd, ITSLink
from mn_wifi.wmediumdConnector import interference
import time
def topology():
"Create a network."
net = Mininet_wifi(controller=Controller,accessPoint=UserAP, switch=OVSKernelSwitch, link=wmediumd, wmediumd_mode=interference)
info("*** Creating nodes - Cars + Controller + Switches + Access Points + Host \n")
for id in range(0, 20):
net.addCar('car%s' % (id+1), wlans=2, encrypt=['wpa2', ''])
c1 = net.addController('c1')
s1 = net.addSwitch('s1')
s2 = net.addSwitch('s2')
s3 = net.addSwitch('s3')
h1 = net.addHost('h1')
kwargs = {'ssid': 'vanet-ssid', 'mode': 'g', 'passwd': '123456789a',
'encrypt': 'wpa2', 'failMode': 'standalone', 'datapath': 'user'}
ap1 = net.addAccessPoint('e1', mac='00:00:00:11:00:01', channel='1',
position='2600,3500,0', **kwargs)
ap2 = net.addAccessPoint('e2', mac='00:00:00:11:00:02', channel='6',
position='2800,3500,0', **kwargs)
ap3 = net.addAccessPoint('e3', mac='00:00:00:11:00:03', channel='11',
position='3000,3500,0', **kwargs)
ap4 = net.addAccessPoint('e4', mac='00:00:00:11:00:04', channel='1',
position='2600,3300,0', **kwargs)
ap5 = net.addAccessPoint('e5', mac='00:00:00:11:00:05', channel='6',
position='2800,3300,0', **kwargs)
ap6 = net.addAccessPoint('e6', mac='00:00:00:11:00:06', channel='11',
position='3000,3300,0', **kwargs)
info("*** Configuring Propagation Model\n")
net.setPropagationModel(model="logDistance", exp=2.8)
info("*** Configuring wifi nodes\n")
net.configureWifiNodes()
net.addLink(s1, s3)
net.addLink(s2, s3)
net.addLink(s3, h1)
net.addLink(s1, ap1)
net.addLink(s1, ap2)
net.addLink(s2, ap3)
net.addLink(s2, ap4)
net.addLink(s2, ap5)
net.addLink(s2, ap6)
for car in net.cars:
net.addLink(car, intf=car.wintfs[1].name,
cls=ITSLink, band=20, channel=181)
net.useExternalProgram(program=sumo, port=8813,
config_file='map.sumocfg',
extra_params=["--start"])
info("*** Starting network\n")
net.build()
c1.start()
for enb in net.aps:
enb.start([c1])
for id, car in enumerate(net.cars):
car.setIP('192.168.0.%s/24' % (id+1), intf='%s-wlan0' % car.name)
car.setIP('192.168.1.%s/24' % (id+1), intf='%s-wlan1' % car.name)
CLI(net)
info("*** Stopping network\n")
net.stop()
if __name__ == '__main__':
setLogLevel('info')
os.system('mn -c')
topology()
| lmendiboure/mn-wifi-experiments | core-architecture/vanet-sumo-core.py | vanet-sumo-core.py | py | 3,534 | python | en | code | 0 | github-code | 90 |
4255361788 | # Imports
import time
from machine import Pin
# Start
print("Starting Blink MicroPython program")
# Set up
led = machine.ADC(0) # an analog pin ADC0
# Infinite loop
while True:
# Read the value, value range 0-65535
value = led.read_u16()
# Print to console
print(value)
# Delay
time.sleep(0.1)
| oscgonfer/sensors_dsp_lectures | 01_introduction/examples/PiPico/MicroPython/02_LightSensor.py | 02_LightSensor.py | py | 321 | python | en | code | 6 | github-code | 90 |
6947642901 | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
ref = []
temp =head
if head == None:
return None
while temp!=None:
ref.append(temp.val)
temp = temp.next
res=[]
for i in ref:
if i not in res:
res.append(i)
if len(res) == 1:
return ListNode(res[0],None)
temp1=ListNode()
i = 0
while i <len(res):
if i ==0:
temp = ListNode(res[i],ListNode())
temp1=temp
else:
temp = temp.next
temp.val = res[i]
if i == len(res) -1:
temp.next = None
else:
temp.next = ListNode()
i= i+1
return temp1
| bittu876/leetcode_ | 0083-remove-duplicates-from-sorted-list/0083-remove-duplicates-from-sorted-list.py | 0083-remove-duplicates-from-sorted-list.py | py | 1,076 | python | en | code | 0 | github-code | 90 |
14595633086 | #!/usr/bin/env python
# coding: utf-8
import pickle
import numpy as np
from flask import Flask
with open('model_rf.bin', 'rb') as f_in:
dv, rf = pickle.load(f_in)
app = Flask('wine')
wine = {
'alcohol': 20.5,
'sulphates': 0.74,
'citric acid': 0.66,
'volatile acidity': 0.04
}
@app.route('/predict', methods=['GET'])
def predict():
X = dv.transform([wine])
y_pred = round(rf.predict(X)[0])
qualStr = "The quality of this sample wine is " + str(y_pred)
return qualStr
if __name__=="__main__":
app.run(debug=True, host='0.0.0.0', port=9696) | cmh-ds4a/ml_zoomcamp | final_project/predict_app.py | predict_app.py | py | 621 | python | en | code | 0 | github-code | 90 |
34017734368 | import sys
ex = {"c": "CAUTION:", "x": "EXCEPTION:"}
count_of_words = {}
try:
file = open("words.txt")
data = file.read()
if len(data) < 1: raise Exception("You have provided empty file.")
words_list = data.split()
for word in words_list:
count_of_words[word] = count_of_words.get(word,0) + 1
except OSError as e:
print(ex.get("x") + str(e))
sys.exit(0)
except Exception as e:
print(ex.get("x") + str(e))
sys.exit(0)
print("Count of each word:")
"""Get count of more number of word in the generated dictionary"""
largest_count = -1
which_word = None
for key,value in count_of_words.items():
print(key,value)
if value > largest_count:
largest_count = value
which_word = key
print("\n\"%s\" word occured %s time in the file." % (which_word,largest_count)) | pranayb-konverge/python-for-everybody | count-words.py | count-words.py | py | 825 | python | en | code | 0 | github-code | 90 |
73884502057 | #!/usr/bin/env python
'''
create a plot for every systematic uncertainty in a datacard ROOT file with up, down and nominal distribution
usage: python DrawDatacardSysts.py datacard.root output
'''
#from __future__ import division
from ROOT import *
from sys import argv as cl
import re
import os
import sys
gStyle.SetOptStat(0)
gROOT.SetBatch(kTRUE)
region = []
process = []
syst = []
hlist = {}
n_plots = 0
n_warnings = 0
printint = False
debug = False
above = 0
below = 0
c = []
p1 = []
p2 = []
# create output folder if not present
if not os.path.isdir(cl[2]):
os.mkdir(cl[2])
# open ROOT file and check if it exists
ifile = TFile(cl[1],"READ")
if ifile.IsOpen() == False:
print("failed to open file ... abort")
exit(1)
print('Creating plots from %s' %cl[1])
# create dictionary with histogram name as key and the histogram itself
for key in ifile.GetListOfKeys():
if "PDF" in key.GetName():
key.ReadObj().SetName(key.GetName())
hlist[key.GetName()] = key.ReadObj()
# for key in ifile.GetListOfKeys():
# hlist[key.ReadObj().GetName()] = key.ReadObj()
# loop over keys and figure out what regions, processes and systematics are present
for full_name in list(hlist):
if full_name == '':
continue
n = '_'.join(full_name.split("_")[1:])
tmp = n.split('_')[0]
if not tmp in process and 'data_obs' not in tmp:
process.append(tmp)
tmp = '_'.join(n.split('_')[1:-1])
if not tmp in region:
region.append(tmp)
tmp = n.split("_")[-1]
tmp = re.sub('Up$', '', tmp)
tmp = re.sub('Down$', '', tmp)
if not tmp in syst and 'bin' not in tmp and 'nominal' not in tmp:
syst.append(tmp)
# create the plots
for r in region:
for p in process:
for s in syst:
sys.stdout.write('.')
sys.stdout.flush()
# figure out histogram names
k_nom = 'h_' + p + '_' + r + ''
k_up = 'h_' + p + '_' + r + '_' + s + 'Up'
k_down = 'h_' + p + '_' + r + '_' + s + 'Down'
h_name = 'h_' + p + '_' + r + '_' + s
h_title = f'process: {p}, variable: {r}, systematic: {s}'
# get histograms
if k_up in list(hlist):
h_nom = hlist[k_nom]
h_up = hlist[k_up]
h_down = hlist[k_down]
else:
continue
# parameters for inconsistency checks
yield_threshold = 2
balance_threshold = 0.1
reldiff_threshold = 0.5
reldiff_upperlimit = 100
ks_threshold = 1e-05
# checks for yields
printint = False
if h_nom.Integral() > yield_threshold or h_up.Integral() > yield_threshold or h_down.Integral() > yield_threshold:
balance = 1 - (((h_up.Integral() + h_down.Integral()) / 2) / h_nom.Integral())
if abs(balance) > balance_threshold and h_nom.Integral() > yield_threshold*100:
#if not any(excl in s for excl in ('QCDscale','toppt','CMS_eff_')):
print('\n# Check\033[91m %s\033[39m -> unbalanced yield difference: %f' %(cl[2]+'/' + h_name + '.pdf', balance))
printint = True
if h_up.Integral() == h_down.Integral():
if h_up.Integral() != h_nom.Integral():
if 'toppt' not in s:
print('\n# Check\033[91m %s\033[39m -> up and down variations are the same, but different compared to nominal:' %(cl[2]+'/' + h_name + '.pdf'))
printint = True
if h_up.Integral() == h_nom.Integral():
if not any(excl in s for excl in ('QCDscale','toppt','CMS_eff_')):
if not any(excl in p for excl in ('qcd')):
print('\n# Check\033[91m %s\033[39m -> up, down and nominal are the same:' %(cl[2]+'/' + h_name + '.pdf'))
printint = True
if h_up.Integral()/h_nom.Integral()<0.99 and h_down.Integral()/h_nom.Integral()<0.99:
if 'toppt' not in s:
below += 1
print('\n# Check\033[91m %s\033[39m -> up, down are at least 1 percent below nominal' %(cl[2]+'/' + h_name + '.pdf'))
printint = True
if h_up.Integral()/h_nom.Integral()>1.01 and h_down.Integral()/h_nom.Integral()>1.01:
if 'toppt' not in s:
above += 1
print('\n# Check\033[91m %s\033[39m -> up, down are at least 1 percent above nominal' %(cl[2]+'/' + h_name + '.pdf'))
printint = True
if printint:
print('# Integral nominal:\t\t %10.4f' %(h_nom.Integral()))
print('# Integral up variation:\t %10.4f' %(h_up.Integral()))
print('# Integral down variation:\t %10.4f' %(h_down.Integral()))
n_warnings = n_warnings + 1
# checks for distributions
nbinsx = h_nom.GetNbinsX()
found_bad_bins = False
ks_up = h_nom.KolmogorovTest(h_up)
ks_down = h_nom.KolmogorovTest(h_down)
bad_bins = []
# check KS values as a starting point
if ks_up < ks_threshold or ks_down < ks_threshold:
for b in range(nbinsx):
# because ROOT
bin = b + 1
# check if bin is significant
if h_nom.GetBinContent(bin) > yield_threshold or h_up.GetBinContent(bin) > yield_threshold or h_down.GetBinContent(bin) > yield_threshold:
reldiff_up = abs(1-h_up.GetBinContent(bin)/h_nom.GetBinContent(bin))
reldiff_down = abs(1-h_down.GetBinContent(bin)/h_nom.GetBinContent(bin))
# check if bin variation is within pre-defined region
if reldiff_upperlimit > reldiff_up > reldiff_threshold or reldiff_upperlimit > reldiff_down > reldiff_threshold:
diff_up = abs(h_up.GetBinContent(bin)-h_nom.GetBinContent(bin))
diff_down = abs(h_down.GetBinContent(bin)-h_nom.GetBinContent(bin))
# check if covered by template uncertainty
if diff_up > (h_nom.GetBinError(bin)+h_up.GetBinError(bin)) or diff_down > (h_nom.GetBinError(bin)+h_down.GetBinError(bin)):
bad_bins.append(bin)
found_bad_bins = True
bad_bins = [str(a) for a in bad_bins]
if found_bad_bins:
if not printint:
sys.stdout.write('\n')
print('# Check\033[31m %s\033[39m -> bad bin(s):\033[93m %s \033[39m' %(cl[2]+'/' + h_name + '.pdf', ', '.join(bad_bins)))
n_warnings = n_warnings + 1
# normalize to max deviation
maxbin = 0
for h in [h_up,h_down,h_nom]:
if h.GetMaximum() > maxbin:
maxbin = h.GetMaximum()
h_up.SetMaximum( 1.2 * maxbin )
# debug
if debug:
if any(fu in s for fu in ('scale_j','res_j','tH_UnclE', 'FSR_2017')):
print('%s ---> d:%f n:%f u:%f ---> diff_d:%f diff_u:%f' %(h_name, h_down.Integral(), h_nom.Integral(), h_up.Integral(), h_nom.Integral()-h_down.Integral(), h_nom.Integral()-h_up.Integral()))
# canvas & style
c.append(TCanvas(h_name,h_name,800,600))
p1.append(TPad("pad1"+str(h_name)+str(n_plots), "pad1"+str(h_name)+str(n_plots), 0, 0.2, 1, 1.0))
p1[-1].SetBottomMargin(0.05)
p1[-1].SetRightMargin(0.05)
p1[-1].SetLeftMargin(0.1)
p1[-1].Draw()
p1[-1].cd()
h_nom.SetMinimum(0.001)
h_up.SetMinimum(0.001)
h_down.SetMinimum(0.001)
h_nom.SetLineColor(1)
h_up.SetLineColor(632)
h_down.SetLineColor(600)
h_up.SetTitle(h_title)
h_up.GetYaxis().SetTitleSize(20)
h_up.GetYaxis().SetTitleFont(43)
h_up.GetYaxis().SetTitleOffset(1.55)
h_up.GetYaxis().SetTitle("events")
h_up.Draw()
h_down.Draw('same')
h_nom.Draw('same')
# legend
leg = TLegend(.71,.7,.95,.875)
leg.SetBorderSize(0)
leg.SetFillColor(0)
leg.SetFillStyle(0)
leg.SetTextFont(42)
leg.SetTextSize(0.02)
leg.SetTextAlign(12)
int_nom = round(h_nom.Integral(),2)
int_up = round(h_up.Integral(),2)
int_down = round(h_down.Integral(),2)
leg.AddEntry(h_nom,"nominal ("+str(int_nom)+")","L")
leg.AddEntry(h_up,"up ("+str(int_up)+", "+str(round(int_up-int_nom,2))+")","L")
leg.AddEntry(h_down,"down ("+str(int_down)+", "+str(round(int_down-int_nom,2))+")","L")
leg.Draw()
c[-1].cd()
p2.append(TPad("pad2"+str(h_name), "pad2"+str(h_name), 0, 0.0, 1, 0.2))
p2[-1].SetTopMargin(0.05)
p2[-1].SetBottomMargin(0.05)
p2[-1].SetRightMargin(0.05)
p2[-1].SetLeftMargin(0.1)
p2[-1].SetGridy()
p2[-1].Draw()
p2[-1].cd()
h_diff_up = h_up.Clone(h_name+'diffup')
h_diff_down = h_down.Clone(h_name+'diffdown')
h_diff_up.Divide(h_nom)
h_diff_down.Divide(h_nom)
# h_diff_up.Sumw2()
# h_diff_down.Sumw2()
h_diff_up.SetStats(0)
h_diff_down.SetStats(0)
# ratio_max = max([h_diff_up.GetMaximum(5), h_diff_down.GetMaximum(5)])
# print(ratio_max, ratio_min)
ratio_max = 0
for i in range(h_diff_up.GetNbinsX()):
if abs(1 - h_diff_up.GetBinContent(i+1)) > ratio_max:
ratio_max = abs(1 - h_diff_up.GetBinContent(i+1))
if abs(1 - h_diff_down.GetBinContent(i+1)) > ratio_max:
ratio_max = abs(1 - h_diff_down.GetBinContent(i+1))
# ratio_max = abs(1 - h_diff_down.GetMaximum())
h_diff_up.SetMaximum(1 + ratio_max*2)
h_diff_up.SetMinimum(1 - ratio_max*2)
# h_diff_up.SetMaximum(ratio_max)
# h_diff_up.SetMinimum(ratio_min)
h_diff_up.GetYaxis().SetNdivisions(505)
h_diff_up.GetYaxis().SetLabelSize(0.125)
h_diff_up.GetYaxis().SetTitle("ratio")
h_diff_up.GetXaxis().SetLabelSize(0)
h_diff_up.SetTitle('')
h_diff_up.Draw('same')
h_diff_down.Draw('same')
# save and suppress write message
gROOT.ProcessLine("gErrorIgnoreLevel = 2000;")
c[-1].SaveAs(cl[2]+'/'+h_name+'.pdf')
gROOT.ProcessLine("gErrorIgnoreLevel = -1;")
n_plots = n_plots + 1
print()
print('DONE! (%i plots created), warnings: %i' %(n_plots, n_warnings))
print()
print('plots with up and down above nom: %i, below nom: %i' %(above,below))
print()
if below > above:
print('more syst below nom than above! below: {}, above: {}'.format(below,above))
elif below < above:
print('more syst above nom than below! below: {}, above: {}'.format(below,above))
print
| KIT-CMS/Z_early_Run3 | SignalFit/DrawDatacardSysts.py | DrawDatacardSysts.py | py | 11,523 | python | en | code | 0 | github-code | 90 |
9681500817 | #!/usr/bin/env python3
import glob
import os
PATH = "."
files = []
for x in os.walk(PATH):
for y in glob.glob(os.path.join(x[0], '*.md')):
files.append(y)
files[-1] = files[-1].replace(".", "", 1)
sidebar_file = open('_sidebar.md', 'w')
name = "Home"
file = "/"
sidebar_file.write(f"* [{name}]({file})\n")
try:
# files.remove("/README.md")
files.remove("/_sidebar.md")
# put any other files you want to remove here
except:
pass
files.sort()
for file in files:
if ".md" in file:
name = file[1:-3]
name = name.replace("/", "'s ")
name = name.replace("_", " ")
name = name.replace("-", " ")
file = file.replace(" ", "%20")
sidebar_file.write(f"* [{name}]({file})\n")
sidebar_file.close()
print("============================================================")
print("Sidebar:")
print("============================================================")
try:
os.system("cat _sidebar.md")
except:
print("Unable to `cat _sidebar.md`")
print("============================================================") | miautomation/docsify-example | docs/_other/soneji-sidebar.py | soneji-sidebar.py | py | 1,101 | python | en | code | 0 | github-code | 90 |
12731613731 | from HqYhoo import DateFormat
import json
import hqutil as hqu
import hqpdutil as hqpdu
class HqCollect:
def __init__(self, tick):
self.collect = {
'tick': tick,
'defaultLastnDays': 10,
'lldays': {}
}
self.pdCollect(tick)
def pdCollect(self, tick):
df = hqu.pdtick(tick)
hqu.pdAddCols(df)
# day0
hqday0 = df.iloc[df.shape[0] - 1]
self.collect['day0'] = {
'date': hqday0.name.strftime(DateFormat),
'close': hqday0.Close,
'ccchg': hqday0.CCChg,
'vvchg': hqday0.VVChg,
'llchg': hqday0.LLChg,
}
# lldays
dfLlDays = hqpdu.pdLlDays(df, daysAgo = self.collect['defaultLastnDays'])
straightStart, straightEnd = hqpdu.pdStraightLlDays(dfLlDays)
# straightDays = straightStart - straightEnd + 1
# print(df[(df.No >= straightStart) & (df.No <= straightEnd)])
self.collect['lldays'] = {
'straightStart': int(straightStart), # convert from int64 to avoid json serialization error
'straightEnd': int(straightEnd),
'straightLoss': ((dfLlDays[dfLlDays.No == straightStart].PrvClose[0] -
dfLlDays[dfLlDays.No == straightEnd].Close[0]) /
dfLlDays[dfLlDays.No == straightStart].PrvClose[0])
}
if __name__ == "__main__":
# ticks = hqu.hqticks('ticks.hq')
for tick in ['BNGO', 'AVIR']:
hqCollect = HqCollect(tick)
# print(hqCollect.collect)
c = json.dumps(hqCollect.collect)
print(json.loads(c))
| jbtwitt/jb-app | py/hqcollect.py | hqcollect.py | py | 1,496 | python | en | code | 0 | github-code | 90 |
36398352091 | """
3. Count Numbers
Read a list of integers in range [0…1000] and print them in ascending order along with their number of
occurrences.
"""
numbers = sorted(list(map(int, input().split(" "))))
count = 1
for i in range(0, len(numbers)):
if i < len(numbers) - 1:
if numbers[i] == numbers[i + 1]:
count += 1
else:
print(f"{numbers[i]} -> {count}")
count = 1
else:
print(f"{numbers[i]} -> {count}") | stefanv877/PythonFundamentals_SoftUni | ListExercises/CountNumbers.py | CountNumbers.py | py | 467 | python | en | code | 0 | github-code | 90 |
22487662614 | # !SKA#0001 24/10/2022
import datetime
class log():
def __init__(self,message) -> None:
with open("log.txt", "a") as log:
now = datetime.datetime.now()
now_str: str = now.strftime("%Y-%m-%d %H:%M:%S")
log.write(f"{now_str} - {message}\n")
| woseek/pax | util/Logging.py | Logging.py | py | 289 | python | en | code | 0 | github-code | 90 |
1120069947 | from Object_Detection import logger
from pathlib import Path
from box import ConfigBox
from ensure import ensure_annotations
from box.exceptions import BoxValueError
import os
import yaml
@ensure_annotations
def read_yaml_file(file_path:Path)->ConfigBox:
"""
reads yaml file and returns
Args:
file_path(str): path like input
Raises:
Value Error: if yaml file is empty
e: empty file
Returns:
ConfigBox: ConfigBox type """
try:
with open(file_path,"r") as yaml_file:
data=yaml.safe_load(yaml_file)
logger.info(f"yaml_file:{file_path} is loaded successfully")
return ConfigBox(data)
except BoxValueError:
raise ValueError("yaml file is empty")
except Exception as e:
raise e
def write_yaml_file(file_path,data):
try:
with open(file_path,"w") as file:
yaml.dump(data,file)
except Exception as e:
raise e
def read_file(file_path):
try:
with open(file_path,"r") as file:
data=yaml.safe_load(file)
return data
except Exception as e:
raise e | arun73822/Object_Detection_Yolov5 | src/Object_Detection/util/utility.py | utility.py | py | 1,154 | python | en | code | 0 | github-code | 90 |
17131644033 | #importing several packages
import requests #https requests
from bs4 import BeautifulSoup #web scrapping
import smtplib
#emailBody
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import datetime #system date and time manipulation
now=datetime.datetime.now()
#email content placeholder
content=" "
#Script to extract the news
def extract_news(url):
print("EXTRACTING Economic Times Top Headlines.....")
cnt=''
cnt +=('<b>ET TOP STORIES:</b>\n'+'<br'+'-'*50+'<br>')
response=requests.get(url)
content=response.content
soup=BeautifulSoup(content,'html.parser')
for i,tag in enumerate(soup.find_all('ul',attrs={'href':'','valign':''})):
cnt+=((str(i+1)+' :: '+tag.text + "\n" + '<br>') if tag.text!='Follow Us On' else'')
return cnt
cnt=extract_news('https://economictimes.indiatimes.com/')
content+=cnt
content+=('<br>------<br>')
content+=('<br><br>End of Message')
#updating the email address
SERVER='smtp.gmail.com'
PORT=587
FROM='***************'
TO='*****************' #this can contain multiple gmail id contained in a list
PAS='******' #password of the FROM ID
msg=MIMEMultipart()
msg['Subject']='Top News Stories of Todays in ET [AUTOMATED EMAIL]' + ' ' + str(now.day) + '-' + str(now.month) + '-' + str(now.year)
msg['FROM']=FROM
msg['TO']=TO
msg.attach(MIMEText(content,'html'))
print('Initiating Server')
server=smtplib.SMTP(SERVER, PORT)
server.set_debuglevel(1) #To print any error message if the server fails to connect
server.ehlo()
server.starttls()
server.login(FROM, PAS)
server.sendmail(FROM, TO, msg.as_string())
print('Email has been sent...')
server.quit()
| SushantDEV23/Aut0mation_Project | NewsScrapper.py | NewsScrapper.py | py | 1,680 | python | en | code | 0 | github-code | 90 |
17948860739 | N,M,K = map(int, input().split())
NG_flag = True
for i in range(0,N+1):
for j in range(0,M+1):
if i*M + j*N - 2*i*j == K:
NG_flag = False
print("Yes")
break
if NG_flag == False:
break
else:
print("No") | Aasthaengg/IBMdataset | Python_codes/p03592/s180069471.py | s180069471.py | py | 272 | python | en | code | 0 | github-code | 90 |
34537089979 | from typing import List
class Solution:
#时间复杂度:O(m*n)
#空间复杂度O(n)
def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:
ans=[]
for i in nums1:
flag=False
for index,j in enumerate(nums2):
if i == j:
flag=True
if j>i and flag:
ans.append(j)
break
if index==len(nums2)-1:
ans.append(-1)
return ans
#代码优化,思路一致
def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:
m,n = len(nums1),len(nums2)
res=[0]*m
for i in range(m):
j = nums2.index(nums1[i])
k=j+1
while k<n and nums2[k]<nums2[i]:
k+=1
res[i] = nums2[k] if k<n else -1
return res
#使用单调栈+哈希表
def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:
#用来保存结果
res={}
# 单调栈
stack = []
for num in reversed(nums2):
# 栈不为空,并且当前的数字大于栈顶元素
while stack and num >= stack[-1]:
# 如果栈里面没有比我大的,即都是比我小的,那么全弹出
# 没弹出的那个就是比我大的。
stack.pop()
# 没弹出的那个就是比我大的,如果栈是空的意思就是没有比我大的,那么就是-1,栈有东西,那么栈顶就是比我大的.
res[num]=stack[-1] if stack else -1
# 直接去里面拿结果就行。结果集就是res
return [res[num] for num in nums1]
| zhengyaoyaoyao/leetcodePython | leetcode/easy/496. 下一个更大元素 I.py | 496. 下一个更大元素 I.py | py | 1,771 | python | zh | code | 0 | github-code | 90 |
24116758893 | from urllib import request, parse
import json
def get_json_data(url):
req = request.Request(url)
req.add_header('OS', 'Android')
req.add_header('VERSION', '82')
req.add_header('CHANNEL', '360')
req.add_header('User-Agent', 'nowcoder android 2.21.3.3091')
with request.urlopen(req) as f:
if f.status == 200:
result_json = json.loads(f.read())
return result_json
data_make_paper = parse.urlencode([
('questionCount', '10'),
('tagIds', '570'),
('t', '02436CC60E649584D5C4BBF57709E5CA'),
('fm', 'android_app_2.21.3.3091'),
('source', '1')
])
def write_text(path, text, mode='a'):
with open(path, mode=mode, encoding="utf-8") as f:
f.write(text)
f.write("<br>")
url_get_questions = "http://m.nowcoder.com/test/get-all-question?t=02436CC60E649584D5C4BBF57709E5CA&fm=android_app_2.21.3.3091&tid=10679830"
all_questions = get_json_data(url_get_questions)['data']['allQuestion']
n = 1
for item_question in all_questions:
question = item_question['question']
print(question['content'])
write_text("C://python_test/a.html", str(n)+". "+question['content'], 'a')
answer = question['answer']
answer_option = ''
index = 0
for item_answer in answer:
answer_content = item_answer['content']
print(answer_content)
answer_index_list = ['A', 'B', 'C', 'D']
write_text("C://python_test/a.html", answer_index_list[index]+". "+answer_content, 'a')
answer_type = item_answer['type']
if answer_type == 1:
answer_option += answer_index_list[index]
index += 1
print(answer_option)
write_text("C://python_test/b.html", '', 'a')
write_text("C://python_test/b.html", str(n)+"."+'答案: ' + answer_option, 'a')
write_text("C://python_test/b.html", '', 'a')
n += 1
| MyCloudream/python_test | reptile/Test02.py | Test02.py | py | 1,855 | python | en | code | 2 | github-code | 90 |
14201090783 | from django import template
from django.conf import settings
from payments.forms import CardTokenForm, ChangePlanForm, SubscribeForm
register = template.Library()
@register.inclusion_tag("payments/_change_plan_form.html", takes_context=True)
def change_plan_form(context):
context.update({
"form": ChangePlanForm(initial={
"plan": context["request"].user.customer.plan
})
})
return context
@register.inclusion_tag("payments/_change_plan_form.html", takes_context=True)
def change_card_form(context):
context.update({
"form": CardTokenForm()
})
return context
@register.inclusion_tag("payments/_subscribe_form.html", takes_context=True)
def subscribe_form(context):
context.update({
"form": SubscribeForm(),
"plans": settings.PAYMENTS_PLANS,
})
return context
| bluekite2000/dsp | payments/templatetags/payments_tags.py | payments_tags.py | py | 857 | python | en | code | 0 | github-code | 90 |
18422445999 | import sys
readline = sys.stdin.readline
MOD = 10 ** 9 + 7
INF = float('INF')
sys.setrecursionlimit(10 ** 5)
def main():
a = [int(input()) for _ in range(5)]
b = [(x + 9) // 10 * 10 for x in a]
ans = INF
for i in range(5):
ans = min(ans,sum(b) - b[i] + a[i])
print(ans)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03076/s875486504.py | s875486504.py | py | 343 | python | en | code | 0 | github-code | 90 |
5785944885 | import logging
import smtplib
import sys
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from django.conf import settings
from cms.models import Vulnerability
logger = logging.getLogger('log')
def attention(receiver, maintainer, cve_info):
"""send attention message to person responsible"""
smtp_host = settings.SMTP_HOST
smtp_port = settings.SMTP_PORT
smtp_username = settings.SMTP_USERNAME
smtp_password = settings.SMTP_PASSWORD
sender = settings.SMTP_SENDER
if not all([smtp_host, smtp_port, smtp_username, smtp_password, sender]):
logger.error('Lack of SMTP parameters, please CHECK!')
sys.exit(1)
cve_table = ''
table_start_tag = '<table border=1>'
th = """
<tr>
<th>Package</th>
<th>Version</th>
<th>Fixed Version</th>
<th>Project</th>
<th>Branch</th>
<th>CVE</th>
<th>Severity</th>
<th>Source</th>
</tr>
"""
table_end_tag = '</table>'
cve_table += table_start_tag + th
for cve in cve_info:
package = cve.package
version = cve.version
fixed_version = cve.fixed_version
project = cve.project_url
branch = cve.project_branch
number = cve.cve_num
severity = cve.severity
source = cve.source
cve_detail = cve.cve_detail
if not cve_detail:
cve_detail = '/'.join([settings.VUL_DETAIL_PREFIX, number])
a_tag = '<a href="{0}/{1}">{1}</a>'.format(cve_detail, number)
cve_item = """
<tr>
<td>{}</td>
<td>{}</td>
<td>{}</td>
<td>{}</td>
<td>{}</td>
<td>{}</td>
<td>{}</td>
<td>{}</td>
</tr>
""".format(package, version, fixed_version, project, branch, a_tag, severity, source)
cve_table += cve_item
cve_table += table_end_tag
msg = MIMEMultipart()
with open(settings.ATTENTION_EMAIL_TEMPLATE, 'r') as f:
content = f.read()
text_body = content.replace('{{receiver}}', maintainer).replace('{{cve_table}}', cve_table)
text = MIMEText(text_body, 'html', 'utf-8')
msg.attach(text)
msg['From'] = sender
msg['To'] = receiver
msg['Subject'] = settings.ATTENTION_EMAIL_SUBJECT
try:
if int(smtp_port) == 465:
server = smtplib.SMTP_SSL(smtp_host, smtp_port)
server.ehlo()
server.login(smtp_username, smtp_password)
else:
server = smtplib.SMTP(smtp_host, smtp_port)
server.ehlo()
server.starttls()
server.login(smtp_username, smtp_password)
server.sendmail(sender, [receiver], msg.as_string())
logger.info('Send attention to {}, email: {}.'.format(maintainer, receiver))
except smtplib.SMTPException as e:
logger.error(e)
def receivers_statistics():
"""take turns sending messages"""
receivers = list(set(Vulnerability.objects.filter(status=1).values_list('email', flat=True)))
for receiver in receivers:
maintainer = Vulnerability.objects.filter(email=receiver).values()[0].get('maintainer')
cve_info = list(Vulnerability.objects.filter(email=receiver, status=1))
attention(receiver, maintainer, cve_info)
| Open-Infra-Ops/icms | cms/utils/send_attention.py | send_attention.py | py | 3,324 | python | en | code | 0 | github-code | 90 |
6890499235 | import os
import sys
import ROOT
import argparse
import copy
import time
from datetime import datetime
sys.path.append('../RDFprocessor/framework')
sys.path.append('../Common/data')
from RDFtree import RDFtree
from samples_2016_ul import samplespreVFP
from genSumWClipped import sumwClippedDict
sys.path.append('python/')
from getLumiWeight import getLumiWeight
from binning import ptBins, etaBins, mTBins, isoBins, chargeBins, metBins
from externals import filePt, fileY, fileSFul
ROOT.gSystem.Load('bin/libAnalysisOnData.so')
ROOT.gROOT.ProcessLine("gErrorIgnoreLevel = 2001;")
def RDFprocess(fvec, outputDir, sample, xsec, systType, sumwClipped, pretendJob):
print("processing ", sample)
p = RDFtree(outputDir = outputDir, inputFile = fvec, outputFile="{}.root".format(sample), pretend=pretendJob)
p.EventFilter(nodeToStart='input', nodeToEnd='defs', evfilter="HLT_SingleMu24", filtername="{:20s}".format("Pass HLT"))
p.EventFilter(nodeToStart='defs', nodeToEnd='defs', evfilter="(Vtype==0 || Vtype==1)", filtername="{:20s}".format("Vtype selection"))
p.EventFilter(nodeToStart='defs', nodeToEnd='defs', evfilter="MET_filters==1", filtername="{:20s}".format("Pass MET filter"))
p.EventFilter(nodeToStart='defs', nodeToEnd='defs', evfilter="nVetoElectrons==0", filtername="{:20s}".format("Electron veto"))
p.EventFilter(nodeToStart='defs', nodeToEnd='defs', evfilter="Idx_mu1>-1", filtername="{:20s}".format("Atleast 1 mu"))
p.EventFilter(nodeToStart='defs', nodeToEnd='defs', evfilter="Muon_hasTriggerMatch[Idx_mu1]", filtername="{:20s}".format("mu trigger matched"))
#not for customizeforUL(isMC=true, isWorZ=false)
if systType == 0: #this is data
p.branch(nodeToStart='defs', nodeToEnd='defs', modules=[ROOT.customizeforUL(False, False), ROOT.recoDefinitions(False, False)])
p.Histogram(columns = ["Mu1_eta","Mu1_pt","Mu1_charge","MT","Mu1_relIso"], types = ['float']*5,node='defs',histoname=ROOT.string('data_obs'),bins = [etaBins,ptBins,chargeBins,mTBins,isoBins], variations = [])
return p
elif systType < 2: #this is MC with no PDF variations
#falling back to old lumi weight computation
p.branch(nodeToStart = 'defs', nodeToEnd = 'defs', modules = [ROOT.customizeforUL(True,False), ROOT.recoDefinitions(True, False), getLumiWeight(xsec=xsec, inputFile = fvec, genEvsbranch = "genEventSumw", targetLumi = 19.3), ROOT.SF_ul(fileSFul)])
p.Histogram(columns = ["Mu1_eta","Mu1_pt","Mu1_charge","MT","Mu1_relIso", "lumiweight","puWeight", "PrefireWeight","SF"], types = ['float']*9,node='defs',histoname=ROOT.string('ewk'),bins = [etaBins,ptBins,chargeBins,mTBins,isoBins], variations = [])
else:
if 'DY' in sample: #reweight full Z kinematics
p.branch(nodeToStart = 'defs', nodeToEnd = 'defs', modules = [ROOT.customizeforUL(True, True), ROOT.recoDefinitions(True, False),ROOT.lumiWeight(xsec=xsec, sumwclipped=sumwClipped, targetLumi = 19.3), ROOT.SF_ul(fileSFul)])
p.Histogram(columns = ["Mu1_eta","Mu1_pt","Mu1_charge","MT","Mu1_relIso", "lumiweight","puWeight", "PrefireWeight","SF"], types = ['float']*9,node='defs',histoname=ROOT.string('ewk'),bins = [etaBins,ptBins,chargeBins,mTBins,isoBins], variations = [])
else:
p.branch(nodeToStart = 'defs', nodeToEnd = 'defs', modules = [ROOT.customizeforUL(True, True), ROOT.recoDefinitions(True, False),ROOT.lumiWeight(xsec=xsec, sumwclipped=sumwClipped, targetLumi = 19.3), ROOT.SF_ul(fileSFul)])
p.Histogram(columns = ["Mu1_eta","Mu1_pt","Mu1_charge","MT","Mu1_relIso", "lumiweight","puWeight", "PrefireWeight","SF"], types = ['float']*9,node='defs',histoname=ROOT.string('ewk'),bins = [etaBins,ptBins,chargeBins,mTBins,isoBins], variations = [])
return p
def main():
parser = argparse.ArgumentParser("")
parser.add_argument('-p', '--pretend',type=bool, default=False, help="run over a small number of event")
parser.add_argument('-r', '--report',type=bool, default=False, help="Prints the cut flow report for all named filters")
parser.add_argument('-o', '--outputDir',type=str, default='outputUL', help="output dir name")
parser.add_argument('-i', '--inputDir',type=str, default='/scratchnvme/wmass/NanoAOD2016-UL/', help="input dir name")
parser.add_argument('-e', '--era',type=str, default='preVFP', help="either (preVFP|postVFP)")
args = parser.parse_args()
pretendJob = args.pretend
now = datetime.now()
dt_string = now.strftime("_%d_%m_%Y_%H_%M_%S")
outputDir = args.outputDir + dt_string
inDir = args.inputDir
era=args.era
##Add era to input dir
inDir+=era
if pretendJob:
print("Running a test job over a few events")
else:
print("Running on full dataset")
ROOT.ROOT.EnableImplicitMT(128)
RDFtrees = {}
samples = samplespreVFP
for sample in samples:
#print('analysing sample: %s'%sample)
#if 'TTTo' in sample: continue
direc = samples[sample]['dir']
xsec = samples[sample]['xsec']
fvec=ROOT.vector('string')()
for d in direc:
targetDir='{}/{}/merged/'.format(inDir, d)
for f in os.listdir(targetDir):#check the directory
if not f.endswith('.root'): continue
inputFile=targetDir+f
#print(f)
fvec.push_back(inputFile)
if fvec.empty():
print("No files found for directory:", samples[sample], " SKIPPING processing")
continue
print(fvec)
systType = samples[sample]['nsyst']
sumwClipped=1.
if systType == 2:
sumwClipped=sumwClippedDict[sample]
print(sample, sumwClipped)
RDFtrees[sample] = RDFprocess(fvec, outputDir, sample, xsec, systType, sumwClipped, pretendJob)
#sys.exit(0)
#now trigger all the event loops at the same time:
objList = []
cutFlowreportDict = {}
for sample in samples:
RDFtreeDict = RDFtrees[sample].getObjects()
if args.report: cutFlowreportDict[sample] = RDFtrees[sample].getCutFlowReport()
for node in RDFtreeDict:
objList.extend(RDFtreeDict[node])
#magic happens here
start = time.time()
ROOT.RDF.RunGraphs(objList)
#now write the histograms:
for sample in samples:
print(sample)
#RDFtrees[sample].getOutput()
RDFtrees[sample].gethdf5Output()
if args.report: cutFlowreportDict[sample].Print()
print('all samples processed in {} s'.format(time.time()-start))
if __name__ == "__main__":
main()
| emanca/wproperties-analysis | templateMaker/runBkg_ul.py | runBkg_ul.py | py | 6,653 | python | en | code | 0 | github-code | 90 |
5772137266 | class Persona:
def __init__(self, nombre, edad, dni):
self.nombre = nombre
self.edad = edad
self.dni = dni
def constructor(self, nombre, edad, dni):
self.nombre = nombre
self.edad = edad
self.dni = dni
# getters for each attribute - Regla primero los getters siempre
@property # decorator to make the method a property of this class
def nombre(self):
return self._nombre
@property
def edad(self):
return self._edad
@property
def dni(self):
return self._dni
# setters for each attribute
@nombre.setter
def nombre(self, value):
# print('Setting Nombre')
try:
str(value)
self._nombre = value
except ValueError:
raise TypeError('Ingrese un nombre valido')
@edad.setter
def edad(self, value):
# print('Cambiando Edad')
try:
int(value)
self._edad = value
except ValueError:
raise TypeError('Ingresa edad valida')
@dni.setter
def dni(self, value):
# print('Cambiando DNI')
try:
int(value)
self._dni = value
except ValueError:
raise TypeError('Ingresa un DNI valido')
def mostrar(self):
print(f"Nombre: {self._nombre}, Edad: {self._edad}, DNI: {self._dni}")
def es_mayor_de_edad(self):
return self.edad > 18
print("\033[H\033[J") #limpiar consola
alumno = Persona('jorge', 23, 564)
alumno.mostrar()
print('\n')
print('\n')
print(alumno.es_mayor_de_edad())
print('\n')
print('\n')
print(alumno.nombre)
| DanielUTN/django2023_ejercicios_integradores | ejercicio6.py | ejercicio6.py | py | 1,651 | python | es | code | 0 | github-code | 90 |
36839490496 | def main():
fraction = get_input()
percentage = into_percentage(fraction)
print_output(percentage)
def get_input():
while True:
try:
fraction = input("Fraction: ")
if fraction[2] >= fraction[0] and fraction[1] == "/":
split_fraction = fraction.split("/")
return [int(split_fraction[0]), int(split_fraction[1])]
except:
pass
def into_percentage(fraction):
percentage = (fraction[0] / fraction[1]) * 100
return percentage
def print_output(percentage):
if percentage <= 1:
print("E")
elif percentage >= 99:
print("F")
else:
new_percentage = round(percentage)
print(f"{new_percentage}%")
main()
| Cozkou/cs50p-exercises | fuel.py | fuel.py | py | 747 | python | en | code | 0 | github-code | 90 |
41227103839 | def curling(red_stones, yellow_stones, r_stone, r_house):
red_squared_distances = [x**2 + y**2 for x, y in red_stones]
yellow_squared_distances = [x**2 + y**2 for x, y in yellow_stones]
return sum(
d <= (r_house + r_stone) ** 2
and ((not yellow_stones or d <= min(yellow_squared_distances)))
for d in red_squared_distances
), sum(
d <= (r_house + r_stone) ** 2
and ((not red_stones or d <= min(red_squared_distances)))
for d in yellow_squared_distances
)
if __name__ == "__main__":
n_cases = int(input())
for case in range(1, n_cases + 1):
r_stone, r_house = map(int, input().split())
red_stones = [
tuple(map(int, input().split())) for _ in range(int(input()))
]
yellow_stones = [
tuple(map(int, input().split())) for _ in range(int(input()))
]
print(
f"Case #{case}:",
*curling(red_stones, yellow_stones, r_stone, r_house),
)
| alexbouayad/google-kickstart | 2022/round-g/curling/solution.py | solution.py | py | 1,012 | python | en | code | 0 | github-code | 90 |
20615837 | # -*- coding: utf-8 -*-
import discord
import asyncio
import TOKEN
import importer
import datetime
from send import Command
from commands.background import *
from discord.ext import commands
import sys
import os
import time
prefix = TOKEN.prefix
loop = asyncio.get_event_loop()
try:
os.system('cls')
except:
os.system('clear')
print('\n\n봇을 시작합니다.')
class Bot(discord.Client):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.load_command = []
self.get_all_commands()
self.loop = asyncio.get_event_loop()
self.bg_task = self.loop.create_task(change_activity(self))
print(self.load_command , "\n\n")
def get_all_commands(self):
for i in Command.commands:
self.load_command.append(i(self))
async def on_ready(self):
print(self.user.name + "으로 봇이 로그인함.")
print("=======")
print("작동 시작!")
print("\n\n")
async def on_message(self, message):
nowtime1 = datetime.datetime.now()
try:
servername = message.guild.name
serverid = message.guild.id
channelid = message.channel.id
channelname = message.channel.name
except:
channelid = "DM이라 기록되지 않았습니다."
channelname = "DM이라 기록되지 않았습니다."
servername = "DM이라 기록되지 않았습니다."
serverid = "DM이라 기록되지 않았습니다."
if not message.attachments == []: # 보낸 메시지에 파일이 아무것도 없으면,
attachedfile = message.attachments[0] # 첨부파일 정보를 받아온다. ( 정보는 다음과 같이 표시됨. [{"width": 1366, "url": "https://cdn.discordapp.com/attachments/397039361792802816/397726279668989963/unknown.png", "size": 222618, "proxy_url": "https://media.discordapp.net/attachments/397039361792802816/397726279668989963/unknown.png", "id": "397726279668989963", "height": 768, "filename": "unknown.png"}] )
filelink = attachedfile.url # 저 데이터중 파일 url을 분석
attach = filelink
else:
attach = "None"
try:
membersize = 0
for i in message.guild.members:
membersize = membersize + 1
nobotsize = 0
for i in message.guild.members:
if i.bot == False:
nobotsize = nobotsize + 1
except:
membersize = 'DM이라 기록되지 않았습니다.'
nobotsize = 'DM이라 기록되지 않았습니다.'
print("%s + Message Helper\n User: %s [ %s ]\n Server: %s [ %s ]\n ServerMember: %s [ notbot: %s ]\n Channel: %s [ %s ]\n Message: %s\n File: %s\n Embed: %s\n" %(nowtime1,message.author, message.author.id, servername, serverid, membersize, nobotsize, channelname, channelid, message.content,attach,message.embeds))
if message.author.bot:
return
for i in self.load_command:
self.loop.create_task(i._send(message))
client = Bot()
client.run(TOKEN.bot_token)
| DATAKOREA/DATAKOREA | main.py | main.py | py | 3,206 | python | en | code | 0 | github-code | 90 |
36343018867 | import ipaddress
import json
from requests.api import delete
from termcolor import colored, cprint
from cloudflare import createDNSRecord, deleteDNSRecord, getZoneRecords, isValidDNSRecord, getZoneId
from tailscale import getTailscaleDevice, isTailscaleIP
from config import getConfig
import sys
def main():
config = getConfig()
cf_ZoneId = getZoneId(config['cf-key'], config['cf-domain'])
cf_recordes = getZoneRecords(config['cf-key'], config['cf-domain'], zoneId=cf_ZoneId)
ts_records = getTailscaleDevice(config['ts-key'], config['ts-tailnet'])
records_typemap = {
4: 'A',
6: 'AAAA'
}
cprint("Adding new devices:", "blue")
# Check if current hosts already have records:
for ts_rec in ts_records:
#if ts_rec['hostname'] in cf_recordes['name']:
if config.get("cf-sub"):
sub = "." + config.get("cf-sub").lower()
else:
sub = ""
tsfqdn = ts_rec['hostname'].lower()+sub+"."+config['cf-domain']
if any(c['name'] == tsfqdn and c['content'] == ts_rec['address'] for c in cf_recordes):
print("[{state}]: {host} -> {ip}".format(host=tsfqdn, ip=ts_rec['address'], state=colored("FOUND", "green")))
else:
ip = ipaddress.ip_address(ts_rec['address'])
if isValidDNSRecord(ts_rec['hostname']):
print("[{state}]: {host} -> {ip}".format(host=tsfqdn, ip=ts_rec['address'], state=colored("ADDING", "yellow")))
createDNSRecord(config['cf-key'], config['cf-domain'], ts_rec['hostname'], records_typemap[ip.version], ts_rec['address'],subdomain=config["cf-sub"], zoneId=cf_ZoneId)
else:
print("[{state}]: {host}.{tld} -> {ip} -> (Hostname: \"{host}.{tld}\" is not valid)".format(host=ts_rec['hostname'], ip=ts_rec['address'], state=colored("SKIPING", "red"), tld=config['cf-domain']))
cprint("Cleaning up old records:", "blue")
# Check for old records:
cf_recordes = getZoneRecords(config['cf-key'], config['cf-domain'])
# set tailscale hostnames to lower cause dns is
for i in range(len(ts_records)):
ts_records[i]['hostname'] = ts_records[i]['hostname'].lower()
for cf_rec in cf_recordes:
if config.get('cf-sub'):
sub = '.' + config.get('cf-sub').lower()
else: sub = ""
cf_name = cf_rec['name'].rsplit(sub + '.' + config['cf-domain'], 1)[0]
# Ignore any records not matching our prefix/postfix
if not cf_name.startswith(config.get('prefix', '')):
continue
if not cf_name.endswith(config.get('postfix', '')):
continue
# Ignore any records not matching our subdomain
if not cf_rec['name'].endswith(sub.lower() + '.' + config['cf-domain']):
continue
if any(a['hostname'] == cf_name and a['address'] == cf_rec['content'] for a in ts_records):
print("[{state}]: {host} -> {ip}".format(host=cf_rec['name'], ip=cf_rec['content'], state=colored("IN USE", "green")))
else:
if (not isTailscaleIP(cf_rec['content'])):
print("[{state}]: {host} -> {ip} (IP does not belong to a tailscale host. please remove manualy)".format(host=cf_rec['name'], ip=cf_rec['content'], state=colored('SKIP DELETE', "red")))
continue
print("[{state}]: {host} -> {ip}".format(host=cf_rec['name'], ip=cf_rec['content'], state=colored('DELETING', "yellow")))
deleteDNSRecord(config['cf-key'], config['cf-domain'], cf_rec['id'], zoneId=cf_ZoneId)
if __name__ == '__main__':
main() | marc1307/tailscale-cloudflare-dnssync | app/app.py | app.py | py | 3,627 | python | en | code | 113 | github-code | 90 |
34108906464 | import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import utils
from torchvision.utils import save_image
from tqdm import tqdm
import config
from dataset import (CompressedImageDataset, ImageDataset,
cifar10, imagenet_mini)
from model import Classifier
from compress import HyperpriorWrapper
from utils import load_checkpoint, norm, save_checkpoint, unnorm
def train(epoch, model, dataloader, opt, criterion, labels, co=None, **kwargs):
progress = tqdm(
dataloader, leave=True, desc=f"Epoch [{epoch+1}/{config.NUM_EPOCHS}]"
)
log_step = len(dataloader) // 10
losses = []
acc = []
model.train()
for i, (x, y) in enumerate(progress):
x = x.to(config.DEVICE)
y = y.to(config.DEVICE)
# if co:
# x = co.compress(x).detach()
# else:
# x = norm(x)
pred = model(x)
loss = criterion(pred, y)
opt.zero_grad()
loss.backward()
opt.step()
losses.append(loss.item())
acc.append((pred.argmax(dim=-1) == y).cpu().numpy().mean())
progress.set_description(
(
f"train | epoch [{epoch+1}/{config.NUM_EPOCHS}] | "
f"loss = {np.mean(losses):.3f} | "
f"acc = {np.mean(acc):.3f} | "
)
)
if i % log_step == 0:
if co:
x = co.decompress(x).detach()
else:
x = unnorm(x)
x = x[0]
y = pred.argmax(dim=-1)[0]
label = labels[y]
save_image(x, f"results/{epoch}_{i//log_step}_{label}.png")
return losses
def val(epoch, model, dataloader, criterion, labels, co=None, **kwargs):
progress = tqdm(
dataloader, leave=True, desc=f"Epoch [{epoch+1}/{config.NUM_EPOCHS}]"
)
log_step = len(dataloader) // 10
losses = []
acc = []
model.eval()
for i, (x, y) in enumerate(progress):
x = x.to(config.DEVICE)
y = y.to(config.DEVICE)
# if co:
# x = co.compress(x).detach()
# else:
# x = norm(x)
pred = model(x)
loss = criterion(pred, y)
losses.append(loss.item())
acc.append((pred.argmax(dim=-1) == y).cpu().numpy().mean())
progress.set_description(
(
f"val | epoch [{epoch+1}/{config.NUM_EPOCHS}] | "
f"loss = {np.mean(losses):.3f} | "
f"acc = {np.mean(acc):.3f} | "
)
)
if i == 0:
if co:
x = co.decompress(x).detach()
else:
x = unnorm(x)
x = x[0]
y = pred.argmax(dim=-1)[0]
label = labels[y]
save_image(x, f"results/{epoch}_val_{label}.png")
return losses
def main():
print(config.DEVICE)
# compressor = None
compressor = (
HyperpriorWrapper(config.COMPRESS_QUALITY, pretrained=True)
.eval()
.to(config.DEVICE)
)
# CH = 3 if compressor is None else 192
# model = Classifier(in_features=192*16*16, n_classes=10, hidden_layers=0, n_hidden=1024)
model = Classifier(
in_features=192 * 16 * 16, n_classes=10, hidden_layers=1, n_hidden=1024
)
opt = optim.Adam(
list(model.parameters()),
lr=config.LEARNING_RATE,
betas=(0.5, 0.999),
)
criterion = nn.CrossEntropyLoss()
if config.LOAD_MODEL:
load_checkpoint(config.CHECKPOINT_CLASS, model, opt, config.LEARNING_RATE)
dataset_train = CompressedImageDataset(
root=cifar10.train_root,
compressor=compressor,
transform=config.transform_train,
)
dataset_val = CompressedImageDataset(
root=cifar10.val_root,
compressor=compressor,
transform=config.transform_val,
)
dataloader_train = DataLoader(
dataset_train,
batch_size=config.BATCH_SIZE,
shuffle=True,
num_workers=config.NUM_WORKERS,
pin_memory=True,
)
dataloader_val = DataLoader(
dataset_val,
batch_size=config.BATCH_SIZE,
shuffle=True,
num_workers=config.NUM_WORKERS,
pin_memory=True,
)
for epoch in range(config.NUM_EPOCHS):
train(
epoch,
model,
dataloader_train,
opt,
criterion,
dataset_train.labels,
compressor,
)
val(epoch, model, dataloader_val, criterion, dataset_val.labels, compressor)
if config.SAVE_MODEL:
save_checkpoint(model, opt, filename=config.CHECKPOINT_CLASS)
if __name__ == "__main__":
main()
| vvh413/compression | classification/train.py | train.py | py | 4,820 | python | en | code | 0 | github-code | 90 |
3760027871 | from django.dispatch import receiver
from django.db.models.signals import pre_delete, post_save
from bookings.models import Invitation
from .models import Notification, Booking
@receiver(post_save, sender=Invitation)
def send_notification_on_invite_sent(sender, instance, **kwargs):
"""
Sends a user a notification when another user has
sent an invitation.
"""
invitation = instance
if not invitation.is_accepted:
notification_sender = invitation.invite_sender
notification_receiver = invitation.invite_receiver
Notification.objects.create(
notification_sender=notification_sender,
notification_receiver=notification_receiver,
notification_type=1,
related_invitation=invitation
)
@receiver(post_save, sender=Invitation)
def send_notification_on_invite_accepted(sender, instance,
created, **kwargs):
"""
Sends a user a notification once a sent
invitation has been accepted.
"""
if not created:
invitation = instance
notification_sender = invitation.invite_receiver
notification_receiver = invitation.invite_sender
if invitation.is_accepted:
Notification.objects.create(
notification_sender=notification_sender,
notification_receiver=notification_receiver,
notification_type=2,
related_invitation=invitation
)
@receiver(post_save, sender=Booking)
def send_notification_on_booking_details_sent(sender, instance,
created, **kwargs):
"""
Sends a notification once bookings details
for an accepted invitation have been sent.
"""
if not created:
booking = instance
if booking.booking_details_sent:
if booking.related_invitation:
notification_sender = (
booking.related_invitation.invite_sender)
notification_receiver = (
booking.related_invitation.invite_receiver)
else:
notification_sender = (
booking.related_job.job_poster)
notification_receiver = (
booking.related_job.confirmed_member)
Notification.objects.create(
notification_sender=notification_sender,
notification_receiver=notification_receiver,
notification_type=4,
related_booking=booking
)
else:
return
| OliverCadman/dept_ci_ms4 | social/signals.py | signals.py | py | 2,612 | python | en | code | 1 | github-code | 90 |
42196587987 | dic = {
'emp1': {'name': 'Jhon', 'salary': 7500},
'emp2': {'name': 'Emma', 'salary': 8000},
'emp3': {'name': 'Brad', 'salary': 6500}
}
for k in dic:
print(k)
for k2 in dic[k]:
print(k2, ":", dic[k][k2])
dic['emp3']['salary'] = 8500
for k in dic:
print(k)
for k2 in dic[k]:
print(k2, ":", dic[k][k2]) | alexeiakimenko/portfolio | HomeWork/hw 12.02/salary.py | salary.py | py | 343 | python | en | code | 0 | github-code | 90 |
30737620993 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 28 12:40:23 2019
last update 20 oct 2019
@author: Hektor & Wilton
to change the number of runs go to function UBVRI_tools line 708
To determine reliable errors set bootstrap = True at line 79
the input file must have at least the following columns:
U SU B SB V SV R SR I SI P
(use the file synth-cluster-UBVRI.txt as example)
The parameter of the synthetic file are:
age = 8.3
FeH = 0.2
dist = 2.500
Av = 1.5
bin_frac = 0.5
The code uses the parallaxes of the members as prior to estimate the distance.
To chage the priors go to line 168. Note that very large sigma values imply that prior is flat.
The code print the CMD and color-color plots with memberships in the output directory before making isochrone fits.
magcut = stars with Vmag greater than this value are not used in the fit
probcut = stars with membership probality than this value are not used in the fit
"""
import numpy as np
from matplotlib import pyplot as plt
from UBVRI_tools_V2 import *
import os
import time
import warnings
import glob
import sys
warnings.filterwarnings("ignore")
plt.close('all')
########################################################################################
# directory where the codes are
dir = os.getenv("HOME")+'/OCFit/UBVRI/'
dirout = dir+'Results/'
# create directory for results
try:
os.stat(dirout)
except:
os.mkdir(dirout)
data_dir = dir+'data/UBVRI-MOITINHO2001/teste/'
# get data for clusters to be fit
files = [f for f in glob.glob(data_dir + "*", recursive=True)]
names = [f[len(data_dir):-4] for f in files]
for i,file in enumerate(files):
print('Using file: ',file, ' for cluster: ',names[i])
obs_file = file
#name of the cluster being fit
name = names[i]
# magcut = stars with mags greater than this value will not be used
magcut = 20.
probcut = 0.5
########################################################################################
# create directory for results of the cluster
try:
os.stat(dirout+name)
except:
os.mkdir(dirout+name)
# file to save all output
verbosefile = open(dirout+name+'/'+'verbose-output.dat', 'w')
logfilename = 'results_'+name+'.txt'
logfile = open(dirout+name+'/'+logfilename, "w")
logfile.write(time.strftime("%c")+'\n')
logfile.write(' \n')
#verbosefile.write('Starting isochrone fitting...\n')
guess = False
obs = np.genfromtxt(obs_file,names=True)
##remove nans
#cond1 = np.isfinite(obs['U'])
#cond2 = np.isfinite(obs['B'])
#cond3 = np.isfinite(obs['V'])
#cond4 = np.isfinite(obs['R'])
#cond5 = np.isfinite(obs['I'])
#
#ind = np.where(cond1&cond2&cond3&cond4&cond5)
#
#obs = obs[ind]
ind_m = obs['P'] > probcut
Plx = obs['Plx']
erPlx = obs['e_Plx']
###################################################################
#plot CMD of members
refmag = 'Vmag'
Umag = obs['U']
Bmag = obs['B']
Vmag = obs['V']
Rmag = obs['R']
Imag = obs['I']
sUmag = obs['SU']
sBmag = obs['SB']
sVmag = obs['SV']
sRmag = obs['SR']
sImag = obs['SI']
members = obs['P']
ind_m = members > probcut
###################################################################
#print the nuumber of members
nstars51 = np.where(members > 0.51)
nstars70 = np.where(members > 0.70)
nstars80 = np.where(members > 0.80)
nstars90 = np.where(members > 0.90)
print ('stars with P>51% = ', len(Vmag[nstars51]))
print ('stars with P>70% = ', len(Vmag[nstars70]))
print ('stars with P>80% = ', len(Vmag[nstars80]))
print ('stars with P>90% = ', len(Vmag[nstars90]))
###################################################################
#plots with P>51%
# B-V versus V
fig, ax = plt.subplots()
plt.scatter(Bmag-Vmag,Vmag,s=1,color='gray',alpha=0.4)
plt.scatter(Bmag[ind_m]-Vmag[ind_m],Vmag[ind_m], cmap='jet',s=4.e2*sVmag[ind_m],c=members[ind_m])
plt.ylim(np.nanmax(Vmag)+0.5,np.nanmin(Vmag)-1.)
plt.xlim(np.nanmin(Bmag-Vmag)-0.3,np.nanmax(Bmag-Vmag)+0.3)
plt.xlabel('B-V')
plt.ylabel('V')
# plt.title(name)
plt.savefig(dirout+name+'/'+name+'_membership51-BVxV.png', dpi=300)
# color - color
fig, ax = plt.subplots()
plt.scatter(Bmag-Vmag,Umag-Bmag,s=1,color='gray',alpha=0.4)
plt.scatter(Bmag[ind_m]-Vmag[ind_m],Umag[ind_m]-Bmag[ind_m], cmap='jet',s=4.e2*sVmag[ind_m],c=members[ind_m])
plt.ylim(np.nanmax(Umag-Bmag)-0.5,np.nanmin(Umag-Bmag)-0.5)
plt.xlim(np.nanmin(Bmag-Vmag)-0.3,np.nanmax(Bmag-Vmag)+0.3)
plt.xlabel('B-V')
plt.ylabel('U-B')
plt.savefig(dirout+name+'/'+name+'_membership51-BVxUB.png', dpi=300)
##################################################################################
#plots with P>70%
# B-V versus V
fig, ax = plt.subplots()
plt.scatter(Bmag-Vmag,Vmag,s=1,color='gray',alpha=0.4)
plt.scatter(Bmag[nstars70]-Vmag[nstars70],Vmag[nstars70], cmap='jet',s=4.e2*sVmag[nstars70],c=members[nstars70])
plt.ylim(np.nanmax(Vmag)+0.5,np.nanmin(Vmag)-1.)
plt.xlim(np.nanmin(Bmag-Vmag)-0.3,np.nanmax(Bmag-Vmag)+0.3)
plt.xlabel('B-V')
plt.ylabel('V')
# plt.title(name)
plt.savefig(dirout+name+'/'+name+'_membership70-BVxV.png', dpi=300)
# color - color
fig, ax = plt.subplots()
plt.scatter(Bmag-Vmag,Umag-Bmag,s=1,color='gray',alpha=0.4)
plt.scatter(Bmag[nstars70]-Vmag[nstars70],Umag[nstars70]-Bmag[nstars70], cmap='jet',s=4.e2*sVmag[nstars70],c=members[nstars70])
plt.ylim(np.nanmax(Umag-Bmag)-0.5,np.nanmin(Umag-Bmag)-0.5)
plt.xlim(np.nanmin(Bmag-Vmag)-0.3,np.nanmax(Bmag-Vmag)+0.3)
plt.xlabel('B-V')
plt.ylabel('U-B')
plt.savefig(dirout+name+'/'+name+'_membership70-BVxUB.png', dpi=300)
##################################################################################
#plots with P>80%
# B-V versus V
fig, ax = plt.subplots()
plt.scatter(Bmag-Vmag,Vmag,s=1,color='gray',alpha=0.4)
plt.scatter(Bmag[nstars80]-Vmag[nstars80],Vmag[nstars80], cmap='jet',s=4.e2*sVmag[nstars80],c=members[nstars80])
plt.ylim(np.nanmax(Vmag)+0.5,np.nanmin(Vmag)-1.)
plt.xlim(np.nanmin(Bmag-Vmag)-0.3,np.nanmax(Bmag-Vmag)+0.3)
plt.xlabel('B-V')
plt.ylabel('V')
# plt.title(name)
plt.savefig(dirout+name+'/'+name+'_membership80-BVxV.png', dpi=300)
# color - color
fig, ax = plt.subplots()
plt.scatter(Bmag-Vmag,Umag-Bmag,s=1,color='gray',alpha=0.4)
plt.scatter(Bmag[nstars80]-Vmag[nstars80],Umag[nstars80]-Bmag[nstars80], cmap='jet',s=4.e2*sVmag[nstars80],c=members[nstars80])
plt.ylim(np.nanmax(Umag-Bmag)-0.5,np.nanmin(Umag-Bmag)-0.5)
plt.xlim(np.nanmin(Bmag-Vmag)-0.3,np.nanmax(Bmag-Vmag)+0.3)
plt.xlabel('B-V')
plt.ylabel('U-B')
plt.savefig(dirout+name+'/'+name+'_membership80-BVxUB.png', dpi=300)
##################################################################################
#plots with P>90%
# B-V versus V
fig, ax = plt.subplots()
plt.scatter(Bmag-Vmag,Vmag,s=1,color='gray',alpha=0.4)
plt.scatter(Bmag[nstars90]-Vmag[nstars90],Vmag[nstars90], cmap='jet',s=4.e2*sVmag[nstars90],c=members[nstars90])
plt.ylim(np.nanmax(Vmag)+0.5,np.nanmin(Vmag)-1.)
plt.xlim(np.nanmin(Bmag-Vmag)-0.3,np.nanmax(Bmag-Vmag)+0.3)
plt.xlabel('B-V')
plt.ylabel('V')
# plt.title(name)
plt.savefig(dirout+name+'/'+name+'_membership90-BVxV.png', dpi=300)
# color - color
fig, ax = plt.subplots()
plt.scatter(Bmag-Vmag,Umag-Bmag,s=1,color='gray',alpha=0.4)
plt.scatter(Bmag[nstars90]-Vmag[nstars90],Umag[nstars90]-Bmag[nstars90], cmap='jet',s=4.e2*sVmag[nstars90],c=members[nstars90])
plt.ylim(np.nanmax(Umag-Bmag)-0.5,np.nanmin(Umag-Bmag)-0.5)
plt.xlim(np.nanmin(Bmag-Vmag)-0.3,np.nanmax(Bmag-Vmag)+0.3)
plt.xlabel('B-V')
plt.ylabel('U-B')
plt.savefig(dirout+name+'/'+name+'_membership90-BVxUB.png', dpi=300)
##################################################################################
###################################################################
#distance prior from parallaxe data
####################################################################################
guess_dist = infer_dist(Plx[ind_m]+0.029, erPlx[ind_m],guess=1./Plx[ind_m].mean())
print('Infered distance from parallax: %8.3f \n'%(guess_dist))
# verbosefile.write('Infered distance from parallax: %8.3f \n'%(guess_dist))
dist_posterior_x=[]
dist_posterior_y=[]
for d in np.linspace(0.01,3*guess_dist,1000):
dist_posterior_x.append(d)
dist_posterior_y.append(-likelihood_dist(d,Plx[ind_m]+0.029, erPlx[ind_m]))
dist_posterior_x = np.array(dist_posterior_x)
dist_posterior_y = np.array(dist_posterior_y)
dist_posterior_y[dist_posterior_y<0.]=0
cum = np.cumsum(dist_posterior_y)/np.sum(dist_posterior_y)
# conf_int = np.where((cum > 0.16)&(cum<0.84))[0]
conf_int = np.where((cum > 0.16)&(cum<0.5))[0]
try:
# dist_guess_sig = (dist_posterior_x[conf_int[-1]] - dist_posterior_x[conf_int[0]])/2.
dist_guess_sig = (dist_posterior_x[conf_int[-1]] - dist_posterior_x[conf_int[0]])
except:
print('using rough distance interval estimate...')
if (clusters['Plx'][i] > 1*clusters['e_Plx'][i]):
dist_guess_sig = ( 1./(clusters['Plx'][i]-1*clusters['e_Plx'][i]) -
1./(clusters['Plx'][i]+1*clusters['e_Plx'][i]) )/2.
else:
dist_guess_sig = np.min([0.5*guess_dist,1.])
guessparameters = [8.8,guess_dist,0.0,1.]
guess_sig = np.array([1.e3, dist_guess_sig, 1.e3, 1.e3]) # prior values = [age,dist,Fe/H,Av]
#guess_sig = np.array([1.e-2, dist_guess_sig, 1.e3, 1.e3]) # prior values = [age,dist,Fe/H,Av]
prior = np.stack([guessparameters,guess_sig]) # sigma of the prior values
print ('prior:')
print(guessparameters)
print('Prior sig: ')
print(guess_sig)
verbosefile.write('Guess: \n')
verbosefile.write(str(guess)+'\n')
verbosefile.write('Prior sigma: \n')
verbosefile.write(str(guess_sig)+'\n')
print ('Mag. Cut:')
print(magcut)
verbosefile.write('Mag. Cut: \n')
verbosefile.write(str(magcut)+'\n')
print ('number of member stars:', Vmag[ind_m].size)
verbosefile.write('number of member stars: %i \n'%Vmag[ind_m].size)
#################################################################
res_isoc, res_isoc_er = fit_isochroneUBVRI(obs_file, verbosefile, probcut, guess=False,magcut=20.0, obs_plx=False,
obs_plx_er=0.05,prior=np.array([[1.],[1.e6]]),bootstrap=False)
###############################################################################
filters = ['Umag','Bmag','Vmag','Rmag','Imag']
refmag = 'Vmag'
Umag = obs['U']
Bmag = obs['B']
Vmag = obs['V']
Rmag = obs['R']
Imag = obs['I']
sUmag = obs['SU']
sBmag = obs['SB']
sVmag = obs['SV']
sRmag = obs['SR']
sImag = obs['SI']
members = obs['P']
ind_m = members > probcut
grid_iso = get_iso_from_grid(res_isoc[0],(10.**res_isoc[2])*0.0152,filters,refmag, Abscut=False)
fit_iso = make_obs_iso(filters, grid_iso, res_isoc[1], res_isoc[3])
# B-V versus V
fig, ax = plt.subplots()
plt.scatter(Bmag-Vmag,Vmag,s=1,color='gray',alpha=0.4)
plt.scatter(Bmag[ind_m]-Vmag[ind_m],Vmag[ind_m], cmap='jet',s=4.e2*sVmag[ind_m],c=members[ind_m])
plt.ylim(np.nanmax(Vmag)+0.5,np.nanmin(Vmag)-1.)
plt.xlim(np.nanmin(Bmag-Vmag)-0.3,np.nanmax(Bmag-Vmag)+0.3)
plt.plot(fit_iso['Bmag']-fit_iso['Vmag'],fit_iso['Vmag'],'g', label='best solution',alpha=0.9)
plt.xlabel('B-V')
plt.ylabel('V')
# plt.title(name)
plt.savefig(dirout+name+'/'+name+'_BVxV.png', dpi=300)
# cor - cor
fig, ax = plt.subplots()
plt.scatter(Bmag-Vmag,Umag-Bmag,s=1,color='gray',alpha=0.4)
plt.scatter(Bmag[ind_m]-Vmag[ind_m],Umag[ind_m]-Bmag[ind_m], cmap='jet',s=4.e2*sVmag[ind_m],c=members[ind_m])
plt.ylim(np.nanmax(Umag-Bmag)-0.5,np.nanmin(Umag-Bmag)-0.5)
plt.xlim(np.nanmin(Bmag-Vmag)-0.3,np.nanmax(Bmag-Vmag)+0.3)
plt.plot(fit_iso['Bmag']-fit_iso['Vmag'],fit_iso['Umag']-fit_iso['Bmag'],'g', alpha=0.9)
plt.xlabel('B-V')
plt.ylabel('U-B')
plt.savefig(dirout+name+'/'+name+'_BVxUB.png', dpi=300)
# V-R versus V
fig, ax = plt.subplots()
plt.scatter(Vmag-Rmag,Vmag,s=1,color='gray',alpha=0.4)
plt.scatter(Vmag[ind_m]-Rmag[ind_m],Vmag[ind_m], cmap='jet',s=4.e2*sVmag[ind_m],c=members[ind_m])
plt.ylim(np.nanmax(Vmag)+0.3,np.nanmin(Vmag)-1.)
plt.xlim(np.nanmin(Vmag-Rmag)-0.3,np.nanmax(Vmag-Rmag)+0.3)
plt.plot(fit_iso['Vmag']-fit_iso['Rmag'],fit_iso['Vmag'],'g', alpha=0.9)
plt.xlabel('V-R')
plt.ylabel('V')
plt.savefig(dirout+name+'/'+name+'_VRxV.png', dpi=300)
# V-I versus V
fig, ax = plt.subplots()
plt.scatter(Vmag-Imag,Vmag,s=1,color='gray',alpha=0.4)
plt.scatter(Vmag[ind_m]-Imag[ind_m],Vmag[ind_m], cmap='jet',s=4.e2*sVmag[ind_m],c=members[ind_m])
plt.ylim(np.nanmax(Vmag)+0.3,np.nanmin(Vmag)-1.)
plt.xlim(np.nanmin(Vmag-Imag)-0.3,np.nanmax(Vmag-Imag)+0.3)
plt.plot(fit_iso['Vmag']-fit_iso['Imag'],fit_iso['Vmag'],'g', alpha=0.9)
plt.xlabel('V-I')
plt.ylabel('V')
plt.savefig(dirout+name+'/'+name+'_VIxV.png', dpi=300)
# U-B versus V
fig, ax = plt.subplots()
plt.scatter(Umag-Bmag,Vmag,s=1,color='gray',alpha=0.4)
plt.scatter(Umag[ind_m]-Bmag[ind_m],Vmag[ind_m], cmap='jet',s=4.e2*sVmag[ind_m],c=members[ind_m])
plt.ylim(np.nanmax(Vmag)+0.3,np.nanmin(Vmag)-1.)
plt.xlim(np.nanmin(Umag-Bmag)-0.3,np.nanmax(Umag-Bmag)+0.3)
plt.plot(fit_iso['Umag']-fit_iso['Bmag'],fit_iso['Vmag'],'g', label='best solution',alpha=0.9)
plt.xlabel('U-B')
plt.ylabel('V')
plt.savefig(dirout+name+'/'+name+'_UBxV.png', dpi=300)
verbosefile.close()
logfile.close()
print ('DONE!')
| hektor-monteiro/OCFit | UBVRI/OCFit_UBVRI_V2.py | OCFit_UBVRI_V2.py | py | 14,515 | python | en | code | 1 | github-code | 90 |
44237601196 | from .exceptions import UserNotFound
from .user import User, Role
available_users = [User("Dominik", "Dominik", "Z", 100, Role.USER, )]
def find_user_by_login(login):
lower_case_login = login.lower()
for user in available_users:
if lower_case_login == user.login.lower():
return user
raise UserNotFound()
| DominikZazula1/Pyton-Wtajemniczenie | mod_2/new_movies/users_directory.py | users_directory.py | py | 340 | python | en | code | 0 | github-code | 90 |
31473228477 | #https://wingnim.tistory.com/39
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_size=64
learning_rate = 0.1
layers = 100
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
# input dimsnsion을 정하고, output dimension을 정하고(growh_rate임), dropRate를 정함.
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True) # inplace 하면 input으로 들어온 것 자체를 수정하겠다는 뜻. 메모리 usage가 좀 좋아짐. 하지만 input을 없앰.
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
return torch.cat([x, out], 1)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
# out_planes => growh_rate를 입력으로 받게 된다.
super(BottleneckBlock, self).__init__()
inter_planes = out_planes * 4 # bottleneck layer의 conv 1x1 filter chennel 수는 4*growh_rate이다.
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, inter_planes, kernel_size=1, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(inter_planes)
self.conv2 = nn.Conv2d(inter_planes, out_planes, kernel_size=3, stride=1, padding=0, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
out = self.conv2(self.relu(self.bn2(out)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return torch.cat([x, out], 1) # 입력으로 받은 x와 새로 만든 output을 합쳐서 내보낸다
class DenseBlock(nn.Module):
def __init__(self,nb_layers,in_planes,growh_rate,block,dropRate=0.0):
super(DenseBlock,self).__init__()
self.layer = self._make_layer(block, in_planes, growh_rate, nb_layers, dropRate)
def _make_layer(self,block,in_planes,growh_rate,nb_layers,dropRate):
layers=[]
for i in range(nb_layers):
layers.append(block(in_planes + i*growh_rate ,growh_rate,dropRate))
return nn.Sequential(*layers)
def forward(self,x):
return self.layer(x)
class TransitionBlock(nn.Module):
def __init__(self,in_planes,out_planes,dropRate=0.0):
super(TransitionBlock,self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes,out_planes,kernel_size=1,stride=1,padding=0,bias=False)
self.droprate = dropRate
def forward(self,x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate>0:
out = F.dropout(out,p=self.droprate,inplace=False,training=self.training)
return F.avg_pool2d(out,2)
class DenseNet(nn.Module):
def __init__(self, depth, num_classes, growh_rate=12, reduction=0.5, bottleneck=True, dropRate=0.0):
super(DenseNet, self).__init__()
num_of_blocks = 3
in_planes = 16 # 2 * growh_rate
n = (
depth - num_of_blocks - 1) / num_of_blocks # 총 depth에서 첫 conv , 2개의 transit , 마지막 linear 빼고 / num_of_blocks
if reduction != 1:
in_planes = 2 * growh_rate
if bottleneck == True:
in_planes = 2 * growh_rate # 논문에서 Bottleneck + Compression 할 경우 first layer은 2*growh_rate라고 했다.
n = n / 2 # conv 1x1 레이어가 추가되니까 !
block = BottleneckBlock
else:
block = BasicBlock
n = int(n) # n = DenseBlock에서 block layer 개수를 의미한다.
self.conv1 = nn.Conv2d(3, in_planes, kernel_size=3, stride=1, padding=1,
bias=False) # input:RGB -> output:growhR*2
# 1st block
# nb_layers,in_planes,growh_rate,block,dropRate
self.block1 = DenseBlock(n, in_planes, growh_rate, block, dropRate)
in_planes = int(in_planes + n * growh_rate) # 입력 + 레이어 만큼의 growh_rate
# in_planes,out_planes,dropRate
self.trans1 = TransitionBlock(in_planes, int(math.floor(in_planes * reduction)), dropRate=dropRate)
in_planes = int(math.floor(in_planes * reduction))
# 2nd block
# nb_layers,in_planes,growh_rate,block,dropRate
self.block2 = DenseBlock(n, in_planes, growh_rate, block, dropRate)
in_planes = int(in_planes + n * growh_rate) # 입력 + 레이어 만큼의 growh_rate
# in_planes,out_planes,dropRate
self.trans2 = TransitionBlock(in_planes, int(math.floor(in_planes * reduction)), dropRate=dropRate)
in_planes = int(math.floor(in_planes * reduction))
# 3rd block
# nb_layers,in_planes,growh_rate,block,dropRate
self.block3 = DenseBlock(n, in_planes, growh_rate, block, dropRate)
in_planes = int(in_planes + n * growh_rate) # 입력 + 레이어 만큼의 growh_rate
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(in_planes, num_classes) # 마지막에 ave_pool 후에 1x1 size의 결과만 남음.
self.in_planes = in_planes
# module 초기화
for m in self.modules():
if isinstance(m, nn.Conv2d):
# Conv layer들은 필터에서 나오는 분산 root(2/n)로 normalize 함
# mean = 0 , 분산 = sqrt(2/n) // 이게 무슨 초기화 방법이었는지 기억이 안난다.
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d): # shifting param이랑 scaling param 초기화(?)
m.weight.data.fill_(1) #
m.bias.data.zero_()
elif isinstance(m, nn.Linear): # linear layer 초기화.
m.bias.data.zero_()
def forward(self, x):
# x : 32*32
out = self.conv1(x) # 32*32
out = self.block1(out) # 32*32
out = self.trans1(out) # 16*16
out = self.block2(out) # 16*16
out = self.trans2(out) # 8*8
out = self.block3(out) # 8*8
out = self.relu(self.bn1(out)) # 8*8
out = F.avg_pool2d(out, 8) # 1*1
out = out.view(-1, self.in_planes) # channel수만 남기 때문에 Linear -> in_planes
return self.fc(out)
| SlowMonk/pytorch_ | classification/models/Densenet.py | Densenet.py | py | 7,150 | python | en | code | 0 | github-code | 90 |
13637229055 | # The challange consisted of checking string format which I've done
# using regex and then doing a simple calculation
def get_check_digit(input):
import re
r = re.compile('\d-\d{2}-\d{6}-x')
if len(input) == 13:
if r.match(input):
stripped = input[:-2].replace("-","")
s = 0
for index, num in enumerate(stripped):
s += int(num) * (index+1)
return(s % 11)
return -1
print(get_check_digit("-19-852663-x")) | Andycko/UniCode-20-21 | 11_TheArchives/solution.py | solution.py | py | 493 | python | en | code | 0 | github-code | 90 |
18091046066 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" inkavail.py
This file is part of InkTools.
InkTools is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
InkTools is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with InkTools. If not, see <http://www.gnu.org/licenses/>."""
import os.path
import sys
from orgmodeparser import *
import re
import datetime
from math import sqrt
from colorsys import rgb_to_hls
from collections import OrderedDict, namedtuple
VERSION = '1.12.6'
TITLE = 'InkTools'
TITLE_VERSION = '%s v%s' % (TITLE, VERSION)
COPYRIGHT = '🄯 2020 - 2022 MC-6312'
URL = 'https://github.com/mc6312/inktools'
MILLILITERS = 1000.0
""" InkTools-специфичные "расширения" формата Emacs OrgMode,
не ломающие совместимость, т.к. для штатного парсера Emacs OrgMode
являются простым текстом.
Ветви дерева, содержащие описания чернил, должны иметь метку "ink".
Статус ветвей:
- не указан: чернила НЕ планируются к покупке (т.к. забракованы
по какой-либо причине, в т.ч. по результатам испытаний)
- TODO: чернила планируются к покупке и/или испытанию
- DONE: чернила были куплены и испытаны
Наличие чернил в коллекции описано отдельно (см. ниже) и статусом
TODO/DONE/... не указывается.
Данные, помещаемые в комментарии.
После символа комментария обязателен пробел, чтобы стандартный парсер
не спотыкался.
@TAGSTAT Общий заголовок:заголовок 1го столбца:метка [... метка]
Шаблон для вывода таблицы статистики, параметры разделяются символом ":".
1й параметр - "общий заголовок" - название таблицы,
2й параметр - заголовок 1го столбца,
3й параметр - метки, разделённые пробелами
Пример: # @TAGSTAT По цветам:Цвет:black blue blue_black gray green
@TAGNAMES метка=название:[...:меткаN=названиеN:]
Соответствие меток Emacs OrgMode (которые не могут содержать пробелов
и некоторых других символов) и человекочитаемых строк, для отображения
в таблицах статистики.
Пример: # @TAGNAMES dark=тёмные:black=чёрные:blue=синие:blue_black=сине-чёрные:
Дополнительно обрабатываются текстовые поля ветвей, имеющих названия:
"параметры" - обрабатываются строки вида:
- "цвет: #RRGGBB"
- "основной цвет: метка";
"наличие" или "в наличии" - в тексте ищутся строки вида:
- "[N ]флакон[ов] NN мл" и/или "картридж[и]";
количество картриджей в текущей версии не учитывается;
"использование" или "заправки" - в тексте ищутся строки вида
"дата[:примечания]".
"""
RX_AVAIL_ML = re.compile('^(\d+)?\s*флакон\w*\s([\d\.]+)\s?(мл|л|ml|l)?\s?.*?$', re.UNICODE|re.IGNORECASE)
# 1 - кол-во флаконов
# 2 - объём флакона
# 3 - единица измерения
RX_AVAIL_CR = re.compile('.*картридж.*', re.UNICODE|re.IGNORECASE)
# количество картриджей пока не учитываем
RX_INK_COLOR = re.compile('^цвет:\s*#([0-9,a-f]{6})$', re.UNICODE|re.IGNORECASE)
RX_INK_MAIN_COLOR = re.compile('^основной\s+цвет:\s*(.*)$', re.UNICODE|re.IGNORECASE)
def days_ago(days):
"""Преобразование целого значения days
в человекочитаемое значение (количество дней)
в виде строки."""
if days == 0:
sda = 'сегодня'
elif days == 1:
sda = 'вчера'
elif days == 2:
sda = 'позавчера'
else:
sda = '%d дн. назад' % days
return sda
__STR_DAYS_AGO_APPROX = (
(1, 'сегодня'),
(2, 'вчера'),
(3, 'позавчера'),
(6, 'несколько дней назад'),
(8, 'неделю назад'),
(30, 'около месяца назад'),
(32, 'месяц назад'),
(182, 'несколько месяцев назад'),
(183, 'полгода назад'),
(365, 'около года назад'),
(366, 'год назад'),
(730, 'около двух лет назад'),
)
__STR_DAYS_AGO_MAX = 'больше двух лет назад'
def days_ago_approx(days):
"""Преобразование количества дней (целого числа days)
в приблизительное человекочитаемое значение в виде строки."""
for edge, rs in __STR_DAYS_AGO_APPROX:
#print('> %3d, %3d, "%s": %s' % (days, edge, rs, days < edge))
if days < edge:
return rs
return __STR_DAYS_AGO_MAX
class ColorValue():
# строим велосипед, т.к. Gdk.RGBA с какого-то чорта уродуется внутри Gtk.ListStore
#__slots__ = 'r', 'g', 'b'
def __init__(self, r, g, b):
self.r = r
self.g = g
self.b = b
# Т.к. colorsys.rgb_to_hls() пытается определить диапазон значений
# (м.б. как 0.0-1.0, так и 0-255) - у этой функции случаются
# ошибки при значениях <=1, а потому принудительно приводим
# входные значения к диапазону 0.0-1.0, а выходные - к
# h: 0-359, l: 0-100, s: 0-100.
self.h, self.l, self.s = rgb_to_hls(self.r / 255, self.g / 255, self.b / 255)
self.h = int(round(self.h * 359))
self.l = int(round(self.l * 100))
self.s = int(round(self.s * 100))
self.hexv = self.get_hex_value(self.r, self.g, self.b)
self.navg = 0
self.ravg = 0.0
self.gavg = 0.0
self.bavg = 0.0
def __eq__(self, other):
return (self.r == other.r) and (self.g == other.g) and (self.b == other.b)
@staticmethod
def get_int_value(r, g, b):
"""Возвращает 32-битное значение вида 0xRRGGBBAA,
которое можно скормить Pixbuf.fill()."""
return (r << 24) | (g << 16) | (b << 8) | 0xff
@staticmethod
def get_rgb32_value(v):
"""Возвращает 32-битное значение вида 0xRRGGBBAA,
которое можно скормить Pixbuf.fill().
Используются 24 бита значения v."""
return ((v & 0xffffff) << 8) | 0xff
@classmethod
def new_from_rgb24(cls, rgb):
"""Создаёт экземпляр ColorValue из целого 0xRRGGBB."""
return cls((rgb >> 16) & 255, (rgb >> 8) & 255, rgb & 255)
def __int__(self):
return self.get_int_value(self.r, self.g, self.b)
def __repr__(self):
return '%s(r=%d, g=%d, b=%d, h=%d, l=%d, s=%d, hex=%s)' % (self.__class__.__name__,
self.r, self.g, self.b, self.h, self.l, self.s, self.hexv)
def get_values(self):
return (self.r, self.g, self.b)
@staticmethod
def get_hex_value(r, g, b):
return '#%.2x%.2x%.2x' % (r, g, b)
HUE_NAMES = (
(12, 'красный'),
(35, 'оранжевый'),
(65, 'жёлтый'),
(85, 'жёлто-зелёный'),
(135, 'зелёный'),
(165, 'бирюзовый'),
(215, 'голубой'),
(240, 'синий'),
(265, 'фиолетово-синий'),
(305, 'фиолетовый'),
(335, 'красно-фиолетовый'),
(360, 'красный'))
LIGHTNESS_NAMES = (
(5, 'близкий к чёрному'),
(12, 'очень тёмный'),
(20, 'тёмный'),
(65, 'светлый'),
(100, 'яркий'))
SATURATION_NAMES = (
(5, 'ненасыщенный'),
(12, 'слабо насыщенный'),
(45, 'средне-насыщенный'),
(100, 'насыщенный'))
def get_description(self):
"""Возвращает текстовое описание цвета (как умеет, хехе).
Соответствие названий цветов и т.п. значениям HLS - чистый
авторский произвол.
Соответствия Pantone/RAL/... на данный момент нет, и, вероятно,
не будет."""
def __getv(fromlst, v):
for vrange, vstr in fromlst:
if v <= vrange:
return vstr
return fromlst[-1][1]
# костыль для тёмных малонасыщенных цветов
if self.s <= 3:
if self.l <= 4:
desc = 'чёрный'
elif self.l >= 90:
desc = 'белый'
else:
desc = '%s серый' % __getv(self.LIGHTNESS_NAMES, self.l)
else:
desc = '%s, %s (%d%%), %s (%d%%)' % (
__getv(self.HUE_NAMES, self.h),
__getv(self.SATURATION_NAMES, self.s), self.s,
__getv(self.LIGHTNESS_NAMES, self.l), self.l)
return '%s; %s' % (self.hexv, desc)
def avg_color_add(self, colorv):
"""Накопление значений для вычисления среднего цвета.
colorv - экземпляр ColorValue."""
self.ravg += colorv.r * colorv.r
self.gavg += colorv.g * colorv.g
self.bavg += colorv.b * colorv.b
self.navg += 1
def avg_color_reset(self):
"""Сброс переменных для вычисления среднего цвета."""
self.navg = 0
self.ravg = 0.0
self.gavg = 0.0
self.bavg = 0.0
def avg_color_get(self):
"""Вычисляет среднее значение цвета,
на основе значений, добавленных avg_color_add().
Если не из чего было считать - возвращает None,
иначе - экземпляр ColorValue."""
if self.navg:
return ColorValue(int(sqrt(self.ravg / self.navg)),
int(sqrt(self.gavg / self.navg)),
int(sqrt(self.bavg / self.navg)))
else:
return
class TagStatInfo():
"""Класс для отображаемой статистики по меткам"""
class StatCounters():
__slots__ = 'available', 'unavailable', 'wanted', 'unwanted', 'inks', 'sortorder'
"""Класс для хранения результатов статистики, отобранных
по некоему признаку (тэгу или др.).
Поля:
available - количество соответствующих марок чернил в наличии;
unavailable - количество отсутствующих марок чернил;
wanted - количество марок чернил, запланированных к покупке;
unwanted - количество марок чернил, которые идут фпень;
inks - список всех экземпляров OrgHeadlineNode,
соответствующих признаку;
sortorder - необязательное значение; м.б. None
или любым пригодным для сравнения значением,
которое будет использовано для сортировки
при отображении;
используется, если соотв. статистическая группа
требует специфической сортировки."""
def __init__(self):
self.available = 0
self.unavailable = 0
self.wanted = 0
self.unwanted = 0
self.sortorder = None
self.inks = []
def __repr__(self):
return '%s(available=%d, unavailable=%d, wanted=%d, unwanted=%d, inks=%s)' % (self.__class__.__name__,
self.available, self.unavailable, self.wanted, self.unwanted, self.inks)
def counter_strs(self):
"""Возвращает кортеж из строк со значениями счётчиков
для отображения."""
def __to_str(i):
return '-' if i == 0 else str(i)
return (__to_str(self.available), __to_str(self.unavailable),
__to_str(self.wanted), __to_str(self.unwanted))
def add_ink(self, inknode):
"""Обновляет счётчики, добавляет чернила в список.
inknode - экземпляр OrgHeadlineNode."""
if inknode.avail:
self.available += 1
else:
# inknode.avail == False:
self.unavailable += 1
if inknode.done is None:
self.unwanted += 1
elif not inknode.done:
self.wanted += 1
self.inks.append(inknode)
def __init__(self, totals, title, col1title, tags):
"""Параметры:
totals - экземпляр класса InkNodeStatistics,
которому принадлежит текущий экземпляр TagStatInfo;
title - название статистической таблицы;
col1title - название первого столбца;
tags - список меток, которые следует учитывать"""
self.totalstats = totals
self.title = title
self.col1title = col1title
# ключ - текст для первого столбца, значение - экземпляр StatCounters
self.stats = OrderedDict()
# параметры сортировки для UI:
# сортировать ли вообще
self.issortable = True
# сортировка в обратном порядке
self.sortreversed = True
# все метки, которые учитываем
self.tags = set(tags)
def get_sortable_value(self, tag, value):
"""Получение значения для сортировки.
tag - строка, значение тэга или отображаемый текст;
value - экземпляр StatCounters.
Возвращает значение типа, пригодного для обработки функцией sorted()."""
return '%5d%s' % (value.available, self.totalstats.get_tag_display_name(tag).lower())
def __repr__(self):
return '%s(title="%s", col1title="%s", tags=%s, stats=%s)' % (self.__class__.__name__,
self.title, self.col1title, self.tags, self.stats)
def add_special_value(self, name, inks):
"""Создание и добавление в self.stats специального экземпляра
StatCounters.
Параметры:
name - строка, имя псевдометки;
inks - список экземпляров OrgHeadlineNode.
Возвращает экземпляр StatCounters."""
if name not in self.stats:
sctrs = self.StatCounters()
self.stats[name] = sctrs
else:
sctrs = self.stats[name]
for ink in inks:
sctrs.add_ink(ink)
return sctrs
def gather_statistics(self, inknode):
"""Учёт чернил в статистике, если у них есть метки, совпадающие
с self.tags.
inknode - экземпляр OrgHeadlineNode.
Метод возвращает булевское значение: True, если чернила
попали в статистику, иначе - False."""
ntags = set(inknode.tags) & self.tags
if ntags:
for tag in ntags:
if tag in self.stats:
nfo = self.stats[tag]
else:
nfo = self.StatCounters()
self.stats[tag] = nfo
nfo.add_ink(inknode)
return True
return False
class TagSortableStatInfo(TagStatInfo):
def get_sortable_value(self, tag, value):
return tag.lower()
class ByOrderValueSortableTagStatInfo(TagStatInfo):
def get_sortable_value(self, tag, value):
return value.sortorder
class ByDaysTagStatInfo(ByOrderValueSortableTagStatInfo):
pass
class ByUsageTagStatInfo(ByOrderValueSortableTagStatInfo):
pass
class MainColorStatInfo(TagSortableStatInfo):
"""Специальная статистика "по основному цвету"."""
def gather_statistics(self, inknode):
if inknode.maincolor:
if inknode.maincolor in self.stats:
nfo = self.stats[inknode.maincolor]
else:
nfo = self.StatCounters()
self.stats[inknode.maincolor] = nfo
nfo.add_ink(inknode)
return True
return False
class InkNodeStatistics():
def __init__(self, rootnode):
self.availMl = 0.0
# список экземпляров OrgHeadlineNode - чернила в наличии
self.availInks = []
# список экземпляров OrgHeadlineNode - отсутствующие чернила
self.unavailInks = []
# список экземпляров OrgHeadlineNode - нафиг не нужные чернила
# (ветви, которые и не TODO, и не DONE)
self.unwantedInks = []
# список экземпляров TagStatInfo - статистика по тэгам
self.tagStats = []
# словарь переводов названий тэгов,
# где ключ - тэг, а значение - перевод названия
self.tagNames = {}
# обратное соответствие переводов названий тэгов и тэгов
self.namesTags = {}
# временный список экземпляров OrgHeadlineNode с неполными данными
self.hasMissingData = []
# временный список экземпляров OrgHeadlineNode чернил,
# запланированных к покупке
self.plannedForBuy = []
# временный список экземпляров OrgHeadlineNode, которые не попали
# в списки tagStats
self.outOfStatsInks = []
#
# статистика популярности чернил
#
self.nowDate = datetime.datetime.now().date()
# статистика по кол-ву дней с последнего использования
#TODO вместо использования в качестве ключей всех значений "дней" сделать группировку по диапазонам
self.inksByDaysSLU = ByDaysTagStatInfo(self, 'Последнее использование', 'Дней', [])
self.inksByDaysSLU.sortreversed = False
# статистика по количеству "использований" (напр. заправок)
# чернил, а значения - множества (set) соотв. чернил
#TODO вместо использования в качестве ключей всех значений кол-ва заправок сделать группировку по диапазонам
self.inksByUsage = ByUsageTagStatInfo(self, 'Количество заправок', 'Кол-во', [])
self.inksByUsage.sortreversed = False
# очень специальная ветка
maincolorStats = MainColorStatInfo(self, 'По основному цвету', '...', [])
maincolorStats.sortreversed = False
ixMainColorStats = len(self.tagStats) # некоторый костылинг
self.tagStats.append(maincolorStats)
#
# рекурсивный обход ветвей и заполнение вышеуказанных полей
#
self.scan_node(rootnode, 0)
# ...продолжение некоторого костылинга
if not self.tagStats[ixMainColorStats].stats:
# статистика пуста - выпиливаем ветвь из списка,
# дабы юзера не смущать
del self.tagStats[ixMainColorStats]
#
# статистика популярности чернил
#
if self.inksByDaysSLU.stats:
self.tagStats.append(self.inksByDaysSLU)
if self.inksByUsage.stats:
self.tagStats.append(self.inksByUsage)
# ...а теперь из hasMissingData и outOfStatsInks делаем
# специальную ветку в tagStats
others = TagStatInfo(self, 'Прочее', '...', [])
others.issortable = False
if self.plannedForBuy:
others.add_special_value('запланирована покупка', self.plannedForBuy)
if self.outOfStatsInks:
others.add_special_value('прочие метки', self.outOfStatsInks)
if self.hasMissingData:
others.add_special_value('с неполными данными', self.hasMissingData)
if others.stats:
self.tagStats.append(others)
# потому что содержимое уже лежит в others,
# и в виде отдельной сущности больше не нужно
del self.plannedForBuy
del self.hasMissingData
del self.outOfStatsInks
# список всех меток
self.tags = []
# ищем ветви типа OrgDirectiveNode только на верхнем уровне
for node in rootnode.children:
if isinstance(node, OrgDirectiveNode) and node.name == 'TAGS':
self.tags += node.text.split(None)
def get_tag_display_name(self, tag):
return self.tagNames[tag] if tag in self.tagNames else tag
def get_total_result_table(self):
"""Возвращает список списков, содержащих строки
со значениями общей статистики."""
totalMl = self.availMl
if totalMl < MILLILITERS:
units = 'мл'
else:
totalMl /= MILLILITERS
units = 'л'
inksAvail = len(self.availInks)
inksUnavail = len(self.unavailInks)
inksUnwanted = len(self.unwantedInks)
inksTotal = inksAvail + inksUnavail
def __percent(n):
pc = '%.1f%%' % (0 if inksTotal == 0 else 100.0 * n / inksTotal)
return (str(n), pc)
# 4 столбца: название поля, абсолютное значение, процент от общего числа, объем в л/мл
# объем указывается только для чернил в наличии, для прочих - пустые строки
return [
['Всего:', str(inksTotal), '', ''],
['В наличии:', *__percent(inksAvail), '≈{:.2f} {:s}'.format(totalMl, units)],
['Отсутствуют:', *__percent(inksUnavail), ''],
['Не нужны:', *__percent(inksUnwanted), ''],
]
def __repr__(self):
return '%s(availMl=%.2f, availInks=%s, unavailInks=%s, unwantedInks=%s, hasMissingData=%s, tagStats=%s, outOfStatsInks=%s)' % (
self.__class__.__name__,
self.availMl,
self.availInks,
self.unavailInks,
self.unwantedInks,
self.hasMissingData,
self.tagStats,
self.outOfStatsInks)
# флаги для проверки полноты описания
MISSING_TAGS, MISSING_DESCRIPTION, MISSING_COLOR, MISSING_MAIN_COLOR = range(4)
STR_MISSING = {MISSING_TAGS:'метки',
MISSING_DESCRIPTION:'описание',
MISSING_COLOR:'цвет',
MISSING_MAIN_COLOR:'основной цвет'}
__INK_TAG = 'ink'
usageinfo = namedtuple('usageinfo', 'date comment')
def get_ink_node_statistics(self, node):
"""Сбор статистики для node, если это OrgHeadlineNode с описание
чернил.
Возвращает True, если node содержало описание чернил, иначе False."""
if not isinstance(node, OrgHeadlineNode):
return False
# учитываем только ветви, имеющие метку "ink"
if self.__INK_TAG not in node.tags:
return False
#
# поле для проверки статистики, в файле НЕ сохраняется
#
node.missing = set()
if len(node.tags) == 1:
# получается, что метка только одна - "ink"
node.missing.add(self.MISSING_TAGS)
# это "чернильный" элемент дерева - его содержимым кормим статистику
#
# проверяем наличие текстового описания
#
ntexts = 0
for child in node.children:
# isinstance тут не годится, нужна строгая проверка типа
# т.к. гипотетический наследник OrgTextNode может быть
# чем-то заковыристым и не в тему
if type(child) is OrgTextNode:
ntexts += 1
if ntexts == 0:
node.missing.add(self.MISSING_DESCRIPTION)
#
# обрабатываем специальные подветви
#
def __get_special_text_node(*headname):
"""Ищет ветви типа OrgHeadlineNode с текстом заголовка,
совпадающим с перечисленными в headname.
Возвращает список найденных OrgHeadlineNode,
или пустой список, если ничего не находит."""
retl = []
# внимание! ищем все имеющиеся варианты названия
for hn in headname:
hlnode = node.find_child_by_text(hn, OrgHeadlineNode)
if hlnode:
retl.append(hlnode)
return retl
def __get_node_text_children(hlnodes):
"""Перебирает экземпляры OrgHeadlineNode из списка hlnodes,
нерекурсивно перебирает в них дочерние элементы типа
OrgTextNode и возвращает их список.
Возвращаемый список может быть пустым."""
retl = []
for hlnode in hlnodes:
for child in hlnode.children:
# isinstance тут не годится
if type(child) is OrgTextNode:
retl.append(child)
return retl
#
# параметры
#
# эти значения в документе (БД) не хранятся,
# используются только статистикой
# образец цвета (RGB)
node.color = None
# название основного цвета (см. ниже)
# используется для группировки статистики, если значение указано,
# иначе - используется одна из меток (как в предыдущих версиях)
node.maincolor = None
params = __get_node_text_children(__get_special_text_node('параметры'))
for paramnode in params:
# цвет чернил #RRGGBB
rm = RX_INK_COLOR.match(paramnode.text)
if rm:
node.color = int(rm.group(1), 16)
# название основного цвета
# должно быть одним из значений цветовых тэгов (из директивы +TAGS),
# или одним из человекочитаемых значений (из директивы @TAGNAMES)
# прочие значения игнорируются
rm = RX_INK_MAIN_COLOR.match(paramnode.text)
if rm:
#TODO возможно, придётся _везде_ приводить тэги к нижнему регистру
cv = rm.group(1).lower()
# проверяем сначала "человекочитаемое" название тэга
tn = self.namesTags.get(cv)
if tn is None:
# тогда проверяем обычный тэг
if cv in self.tagNames:
tn = cv
node.maincolor = tn
if node.color is None:
node.missing.add(self.MISSING_COLOR)
if node.maincolor is None:
node.missing.add(self.MISSING_MAIN_COLOR)
#
# статистика использования
#
def __parse_date(ds):
"""Разбирает строку ds, содержащую дату, возвращает
экземпляр datetime.date.
Если строка не соответствует допустимым вариантам формата
или содержит значения вне допустимого диапазона, возвращает
None.)"""
da = ds.split('.', 2)
if len(da) != 3:
da = da.split('-', 2)
if len(da) != 3:
return
lda = tuple(map(len, da))
if lda != (4,2,2) and lda != (2,2,4):
return
try:
da = tuple(map(int, da))
if lda[0] == 4:
return datetime.date(da[0], da[1], da[2])
else:
return datetime.date(da[2], da[1], da[0])
except ValueError:
return
# список node.usage в документе не хранится, используется только статистикой
# список содержит экземпляры usageinfo
node.usage = []
# кол-во дней с последнего использования чернил, если в файле есть соотв. данные
node.daysSLU = None
'''TODO в будущем м.б. следует различать "использование" (подразумевая перья-макалки)
и "заправки" (подразумевая авторучки)'''
usage = __get_node_text_children(__get_special_text_node('использование', 'заправки'))
for ustr in usage:
udate, _, ucmt = ustr.text.partition(':')
udate = __parse_date(udate)
if not udate:
continue
dslu = (self.nowDate - udate).days
if node.daysSLU is None or node.daysSLU > dslu:
node.daysSLU = dslu
node.usage.append(self.usageinfo(udate, ucmt.strip()))
#
# наличие
#
# поля avail/availMl/availCartridges в документе не хранятся
# - используется только статистикой
# для загрузки их значений производится разбор текста соотв. ветви файла
node.avail = False
node.availMl = 0.0
node.availCartridges = False
avails = __get_node_text_children(__get_special_text_node('в наличии', 'наличие'))
for availnode in avails:
if availnode.text.lower() == 'нет':
continue
rm = RX_AVAIL_ML.match(availnode.text)
if rm:
try:
# 1 - кол-во флаконов
# 2 - объём флакона
# 3 - единица измерения
squantity = rm.group(1)
quantity = int(squantity) if squantity else 1
avail = float(rm.group(2))
_units = rm.group(3).lower()
# допустимые единицы измерения - литры и миллилитры
# если единицы не указаны, или НЕ литры - всегда считаем
# миллилитрами
if _units in ('л', 'l'):
avail *= 1000.0
avail *= quantity # кол-во банок пока учитываем только так
node.avail = True
node.availMl += avail
self.availMl += avail
except ValueError:
pass
else:
rm = RX_AVAIL_CR.match(availnode.text)
# количество картриджей пока не учитываем
if rm:
node.avail = True
node.availCartridges = True
# Внимание:
# node.avail НЕ зависит от node.done
if node.avail:
self.availInks.append(node)
elif node.avail == False:
self.unavailInks.append(node)
# т.е. "нежелательные" могут одновременно быть в списках
# avail/unavail!
if node.done == None:
self.unwantedInks.append(node)
#
# запланированные к покупке
#
if node.done == False: # and node.priority
self.plannedForBuy.append(node)
#
# записи с неполными данными
#
if node.missing:
self.hasMissingData.append(node)
#
# пихаем чернила в общую статистику по датам
#
if node.daysSLU is None:
# sort order - пока вот так, костылём
so = 999999
ns = 'никогда'
else:
ns = days_ago_approx(node.daysSLU)
so = node.daysSLU
self.inksByDaysSLU.add_special_value(ns, [node]).sortorder = so
#
# пихаем чернила в общую статистику по используемости
#
#TODO: возможно, добавить диапазоны больше 10 заправок
nusage = len(node.usage)
if nusage > 0:
if nusage == 1:
ns = '1 раз'
# sort order - пока вот так, костылём
so = 0
elif nusage < 6:
ns = '2-5 раз'
so = 1
elif nusage < 11:
ns = '6-10 раз'
so = 2
else:
ns = 'больше 10 раз'
so = 3
self.inksByUsage.add_special_value(ns, [node]).sortorder = so
#
# скармливаем всё, что следует, статистике "по тэгам"
#
ninstats = 0
for tagstat in self.tagStats:
if tagstat.gather_statistics(node):
ninstats += 1
if ninstats == 0:
self.outOfStatsInks.append(node)
return True
def scan_node(self, node, level):
"""Рекурсивный обход дерева экземпляров OrgNode.
Сбор статистики по наличию чернил.
node - экземпляр Org*Node;
level - уровень вложенности."""
for child in node.children:
if isinstance(child, OrgCommentNode):
# для комментариев особая обработка:
# на нулевом уровне ищем "самопальные "директивы вида
# "@directive parameter [parameter]",
# (см. метод process_directive)
# на следующих ничего не делаем, один фиг там нет описаний чернил
if level == 0:
dargs = list(map(lambda s: s.strip(), child.text.split(None, 1)))
if not dargs:
continue
dname = dargs[0]
if not dname.startswith('@'):
# просто комментарий или не наша директива
continue
dname = dname[1:]
if not dname:
# "@" без текста за директиву не считаем
continue
dargs = dargs[1:] # м.б. пустой список!
self.process_directive(dname, dargs[0] if dargs else '')
elif not self.get_ink_node_statistics(child):
self.scan_node(child, level + 1)
def __process_tagstat_directive(self, dvalue):
# статистика по тэгам
# формат заголовка -
# "название таблицы:название 1го столбца:метка1 [... [меткаN]]"
tsargs = list(map(lambda s: s.strip(), dvalue.split(':', 2)))
#TODO присобачить проверку ошибок синтаксиса
if len(tsargs) != 3:
return
tstitle = tsargs[0]
if not tstitle:
return
tsc1title = tsargs[1]
if not tsc1title:
return
tstags = set(filter(None, map(lambda s: s.strip(), tsargs[2].split(None))))
if not tstags:
return
self.tagStats.append(TagStatInfo(self, tstitle, tsc1title, tstags))
def __process_tagnames_directive(self, dvalue):
# переводы названий тэгов в формате
# tagname=translation[:tagname1=translation1[...:tagnameN=translationN]
for rawtrans in dvalue.split(':'):
tagname, sep, tagtrans = map(lambda s: s.strip(), rawtrans.partition('='))
#TODO прикрутить обработку ошибок синтаксиса
if sep != '=' or not tagname or not tagtrans:
continue
self.tagNames[tagname] = tagtrans
self.namesTags = OrderedDict(map(lambda r: (r[1].lower(), r[0]), self.tagNames.items()))
def process_directive(self, dname, dvalue):
"""Обработка "самопальной" (не стандарта OrgMode) директивы вида
'@ИМЯ значение'.
dname - имя директивы (без символа @),
dvalue - значение директивы.
Имена директив регистро-зависимы.
В случае ошибок генерируются исключения."""
if dname == 'TAGSTAT':
self.__process_tagstat_directive(dvalue)
elif dname == 'TAGNAMES':
self.__process_tagnames_directive(dvalue)
def get_ink_description(self, ink):
"""Получение описания чернил.
Параметры:
ink - экземпляр OrgHeadlineNode.
Возвращает кортеж из четырёх строк:
'название', 'отсортированный список человекочитаемых меток',
'описание', 'наличие'."""
if not isinstance(ink, OrgHeadlineNode):
raise TypeError('get_ink_description(ink): "ink" must be OrgHeadlineNode')
if self.__INK_TAG not in ink.tags:
raise ValueError('get_ink_description(ink): "ink" must contain ink description')
desc = []
for chld in ink.children:
if isinstance(chld, OrgTextNode):
desc.append(chld.text)
avails = []
if ink.availMl > 0.0:
if ink.availMl < 500.0:
avs = '%.f мл' % ink.availMl
else:
avs = '%.2f л' % (ink.availMl / 1000.0)
avails.append(avs)
if ink.availCartridges:
avails.append('картриджи')
# некоторый костылинг
disptags = ink.tags.copy()
# удаляем служебную метку - она нужна при загрузке БД, не для отображения
disptags.remove(self.__INK_TAG)
return (ink.text,
', '.join(sorted(map(lambda tag: self.tagNames[tag] if tag in self.tagNames else tag, disptags))),
'\n'.join(desc),
' и '.join(avails))
def get_ink_missing_data_str(self, ink):
"""Возвращает строку, в которой перечислены недостающие данные
для ink (экземпляра OrgHeadlineNode), или пустую строку
(когда всё данные есть)."""
return ', '.join(map(lambda k: self.STR_MISSING[k], ink.missing))
def load_ink_db(fname):
if not fname:
print('Файл не указан', file=sys.stderr)
return None
if not os.path.exists(fname):
print('Файл "%s" не найден' % fname, file=sys.stderr)
return None
#print(f'Загружаю {fname}')
return MinimalOrgParser(fname)
def get_ink_stats(db):
return InkNodeStatistics(db) if db is not None else None
def __test_stats():
print('%s\n' % TITLE_VERSION)
from inktoolscfg import Config
cfg = Config()
cfg.load()
stats = get_ink_stats(load_ink_db(cfg.databaseFileName))
if stats:
print(stats.get_total_result_table())
for tagstat in stats.tagStats:
print('\n%s' % tagstat.title)
print(tagstat.stats)
return 0
def __test_misc1():
print('%s\n' % TITLE_VERSION)
from inktoolscfg import Config
cfg = Config()
cfg.load()
stats = get_ink_stats(load_ink_db(cfg.databaseFileName))
#for node in stats.availInks:
# print(stats.get_ink_description(node))
def __test_colordesc():
colors = ((0, 0, 0),
(255, 255, 255),
(255, 0, 0),
(255, 192, 0),
(0, 0, 255),
(0, 192, 255),
(255, 0, 255),
(20, 20, 20),
(20, 20, 50),
(96, 96, 255),
(192, 192, 255),
(96, 220, 255),
(240, 240, 255))
for r, g, b in colors:
colorv = ColorValue(r, g, b)
print(colorv.hexv, colorv.get_description())
def __test_days_ago():
for d in (0, 1, 2, 3, 5, 7, 15, 30, 31, 40, 120, 182, 223, 360, 365, 450):
print('%3d %20s %25s' % (d,
days_ago(d),
days_ago_approx(d)))
if __name__ == '__main__':
print('[debugging %s]' % __file__)
__test_stats()
#__test_colordesc()
#__test_misc1()
#__test_days_ago()
| mc6312/inktools | inkavail.py | inkavail.py | py | 46,003 | python | ru | code | 0 | github-code | 90 |
11032606963 | import base64
import json
import os
import uuid
from collections import namedtuple
from contextlib import contextmanager
import pytest
from httmock import HTTMock, urlmatch
from util.config.provider import KubernetesConfigProvider
def normalize_path(path):
return path.replace("/", "_")
@contextmanager
def fake_kubernetes_api(tmpdir_factory, files=None):
hostname = "kubapi"
service_account_token_path = str(tmpdir_factory.mktemp("k8s").join("serviceaccount"))
auth_header = str(uuid.uuid4())
with open(service_account_token_path, "w") as f:
f.write(auth_header)
global secret
secret = {"data": {}}
def write_file(config_dir, filepath, value):
normalized_path = normalize_path(filepath)
absolute_path = str(config_dir.join(normalized_path))
try:
os.makedirs(os.path.dirname(absolute_path))
except OSError:
pass
with open(absolute_path, "w") as f:
f.write(value)
config_dir = tmpdir_factory.mktemp("config")
if files:
for filepath, value in files.items():
normalized_path = normalize_path(filepath)
write_file(config_dir, filepath, value)
secret["data"][normalized_path] = base64.b64encode(value.encode("utf-8")).decode(
"ascii"
)
@urlmatch(
netloc=hostname,
path="/api/v1/namespaces/quay-enterprise/secrets/quay-enterprise-config-secret$",
method="get",
)
def get_secret(_, __):
return {"status_code": 200, "content": json.dumps(secret)}
@urlmatch(
netloc=hostname,
path="/api/v1/namespaces/quay-enterprise/secrets/quay-enterprise-config-secret$",
method="put",
)
def put_secret(_, request):
updated_secret = json.loads(request.body)
for filepath, value in updated_secret["data"].items():
if filepath not in secret["data"]:
# Add
write_file(
config_dir, filepath, base64.b64decode(value.encode("utf-8")).decode("ascii")
)
for filepath in secret["data"]:
if filepath not in updated_secret["data"]:
# Remove.
normalized_path = normalize_path(filepath)
os.remove(str(config_dir.join(normalized_path)))
secret["data"] = updated_secret["data"]
return {"status_code": 200, "content": json.dumps(secret)}
@urlmatch(netloc=hostname, path="/api/v1/namespaces/quay-enterprise$")
def get_namespace(_, __):
return {"status_code": 200, "content": json.dumps({})}
@urlmatch(netloc=hostname)
def catch_all(url, _):
print(url)
return {"status_code": 404, "content": "{}"}
with HTTMock(get_secret, put_secret, get_namespace, catch_all):
provider = KubernetesConfigProvider(
str(config_dir),
"config.yaml",
"config.py",
api_host=hostname,
service_account_token_path=service_account_token_path,
)
# Validate all the files.
for filepath, value in files.items():
normalized_path = normalize_path(filepath)
assert provider.volume_file_exists(normalized_path)
with provider.get_volume_file(normalized_path) as f:
assert f.read() == value
yield provider
def test_basic_config(tmpdir_factory):
basic_files = {
"config.yaml": "FOO: bar",
}
with fake_kubernetes_api(tmpdir_factory, files=basic_files) as provider:
assert provider.config_exists()
assert provider.get_config() is not None
assert provider.get_config()["FOO"] == "bar"
@pytest.mark.parametrize(
"filepath",
[
"foo",
"foo/meh",
"foo/bar/baz",
],
)
def test_remove_file(filepath, tmpdir_factory):
basic_files = {
filepath: "foo",
}
with fake_kubernetes_api(tmpdir_factory, files=basic_files) as provider:
normalized_path = normalize_path(filepath)
assert provider.volume_file_exists(normalized_path)
provider.remove_volume_file(normalized_path)
assert not provider.volume_file_exists(normalized_path)
class TestFlaskFile(object):
def save(self, buf):
buf.write("hello world!")
def test_save_file(tmpdir_factory):
basic_files = {}
with fake_kubernetes_api(tmpdir_factory, files=basic_files) as provider:
assert not provider.volume_file_exists("testfile")
flask_file = TestFlaskFile()
provider.save_volume_file(flask_file, "testfile")
assert provider.volume_file_exists("testfile")
| quay/quay | util/config/provider/test/test_k8sprovider.py | test_k8sprovider.py | py | 4,682 | python | en | code | 2,281 | github-code | 90 |
23005331751 | import re
import os
import sqlalchemy
from . import utils
ChannelMetadata = None
ContentNode = None
File = None
class WebViewApi(object):
def __init__(self, main_window):
self.__main_window = main_window
global ChannelMetadata, ContentNode, File
from .models import session, Base
self.session = session
self.models = Base.classes
ChannelMetadata = self.models.content_channelmetadata
ContentNode = self.models.content_contentnode
File = self.models.content_file
def dispatch(self, payload):
try:
func = payload['func']
kwargs = payload.get('args', {})
return {
'callId': payload['callId'],
'result': getattr(self, func)(**kwargs),
}
except Exception as e:
return {
'callId': payload['callId'],
'error': {
'message': str(e),
'type': e.__class__.__name__,
}
}
def get_metadata(self):
channel_metadata = self.session.query(ChannelMetadata).one()
return {
'id': channel_metadata.id,
'name': channel_metadata.name,
'description': channel_metadata.description,
}
def set_header_title(self, **kwargs):
self.__main_window.set_header_title(**kwargs)
def _content_node_to_json(self, content_node):
return {
'id': content_node.id,
'title': content_node.title,
'description': content_node.description,
'kind': content_node.kind,
'thumbnail_file': None,
'main_file': None,
'supplementary_files': [],
'other_files': [],
}
def _file_to_json(self, file):
filename = utils.get_kolibri_storage_file_path(
'{id}.{extension}'.format(
id=file.local_file_id, extension=file.extension
)
)
return {
'id': file.id,
'file_uri': 'file://{filename}'.format(filename=filename),
'ekn_uri': 'ekn:///kolibri/storage/{id}.{extension}'.format(
id=file.local_file_id, extension=file.extension
),
'preset': file.preset,
'lang': file.lang_id,
'file_size': file.file_size,
'available': os.path.isfile(filename),
}
def get_content_node(self, content_node_id):
content_node = self.session.query(ContentNode) \
.filter(ContentNode.id == content_node_id) \
.one()
result = self._content_node_to_json(content_node)
files = self.session.query(File) \
.filter(File.contentnode_id == content_node_id) \
.order_by(File.priority) \
.all()
for file in files:
if file.thumbnail == 1:
result['thumbnail_file'] = self._file_to_json(file)
elif file.supplementary == 1:
result['supplementary_files'].append(self._file_to_json(file))
else:
if result['main_file'] is None:
result['main_file'] = self._file_to_json(file)
else:
result['other_files'].append(self._file_to_json(file))
children = self.session.query(ContentNode) \
.filter(ContentNode.parent_id == content_node_id) \
.order_by(ContentNode.sort_order).all()
result['children'] = list(map(self._content_node_to_json, children))
return result
def search(self, query, **kwargs):
"""
Inspired by the Kolibri search API (`ContentNodeSearchViewset`):
https://github.com/learningequality/kolibri/blob/develop/kolibri/core/content/api.py
"""
MAX_RESULTS = 30
# all words with punctuation removed
all_words = [w for w in re.split('[?.,!";: ]', query) if w]
# TODO
# words in all_words that are not stopwords
# critical_words = [w for w in all_words if w not in stopwords_set]
# queries ordered by relevance priority
all_queries = [
sqlalchemy.and_(*[ContentNode.title.ilike('%{w}%'.format(w=w)) for w in all_words]),
# sqlalchemy.and_(*[ContentNode.title.ilike('%{w}%'.format(w=w)) for w in critical_words]),
sqlalchemy.and_(*[ContentNode.description.ilike('%{w}%'.format(w=w)) for w in all_words]),
# sqlalchemy.and_(*[ContentNode.description.ilike('%{w}%'.format(w=w)) for w in critical_words]),
]
# # any critical word in title, reverse-sorted by word length
# for w in sorted(critical_words, key=len, reverse=True):
# all_queries.append(Q(title__icontains=w))
# # any critical word in description, reverse-sorted by word length
# for w in sorted(critical_words, key=len, reverse=True):
# all_queries.append(Q(description__icontains=w))
content_node_ids = []
content_ids = set()
for query in all_queries:
content_nodes = self.session.query(ContentNode.id, ContentNode.content_id) \
.filter(~ContentNode.content_id.in_(list(content_ids))) \
.filter(query) \
.limit(MAX_RESULTS * 2) \
.all()
for content_node in content_nodes:
if content_node.content_id not in content_ids:
content_ids.add(content_node.content_id)
content_node_ids.append(content_node.id)
if len(content_node_ids) >= MAX_RESULTS:
break
if len(content_node_ids) >= MAX_RESULTS:
break
nodes = self.session.query(ContentNode) \
.filter(ContentNode.id.in_(content_node_ids)) \
.all()
return {
'results': list(map(self._content_node_to_json, nodes)),
}
| endlessm/kolibri-webview-demo | kolibri_webview_demo/web_view_api.py | web_view_api.py | py | 5,998 | python | en | code | 0 | github-code | 90 |
14420737281 | n = int(input())
data = [input() for _ in range(n)]
result = 0
for st in data:
ch = [st[0]]
flag = True
for i in range(1, len(st)):
if ord(st[i]) != ord(st[i-1]):
if st[i] in ch:
flag = False
break
else:
ch.append(st[i])
if flag:
result += 1
print(result)
| Hong-kee/Algorithm-Study | hyuns/String/1316_그룹단어체커.py | 1316_그룹단어체커.py | py | 362 | python | en | code | 2 | github-code | 90 |
17549010987 | import tensorflow as tf
from .psd import calculate_psd
import tensorflow_probability as tfp
import tensorflow.signal as tfs
@tf.function
def planck(N: int, nleft: int, nright: int) -> tf.Tensor:
"""
Create a Planck-taper window.
Parameters
----------
N : int
The total number of samples in the window.
nleft : int
The number of samples in the left taper segment.
nright : int
The number of samples in the right taper segment.
Returns
-------
window : tf.Tensor
A window of length `N` with a Planck taper applied.
"""
# Creating left and right ranges
left = tf.range(nleft, dtype=tf.float32)
right = tf.range(nright, dtype=tf.float32) - nright + 1
# Apply the Planck-taper function to left and right ranges
taper_left = 1 / (tf.exp(-left/(nleft-1)) + 1)
taper_right = 1 / (tf.exp(-right/(nright-1)) + 1)
# Combine the left taper, a flat middle segment, and the right taper
window = tf.concat([
taper_left,
tf.ones(N-nleft-nright),
tf.reverse(taper_right, axis=[0])
], axis=0)
return window
@tf.function
def truncate_transfer(
transfer: tf.Tensor,
ncorner: int = None
) -> tf.Tensor:
"""
Smoothly zero the edges of a frequency domain transfer function.
Parameters
----------
transfer : tf.Tensor
The transfer function to truncate.
ncorner : int, optional
The number of extra samples to zero off at low frequency.
Returns
-------
transfer : tf.Tensor
The truncated transfer function.
"""
nsamp = transfer.shape[-1]
ncorner = ncorner if ncorner else 0
# Validate that ncorner is within the range of the array size
if ncorner >= nsamp:
raise ValueError(
"ncorner must be less than the size of the transfer array"
)
plank = planck(nsamp-ncorner, nleft=5, nright=5)
transfer_zeros = tf.zeros_like(transfer[:,:ncorner])
transfer_mod = tf.multiply(transfer[:,ncorner:nsamp], plank)
new_transfer = tf.concat([transfer_zeros, transfer_mod], axis=-1)
return new_transfer
@tf.function
def truncate_impulse(
impulse: tf.Tensor,
ntaps: int,
window: str = 'hann'
) -> tf.Tensor:
"""
Smoothly truncate a time domain impulse response.
Parameters
----------
impulse : tf.Tensor
The impulse response to truncate.
ntaps : int
Number of taps in the final filter, must be an even number.
window : str, optional
Window function to truncate with, default is 'hann'.
Returns
-------
impulse: tf.Tensor
The truncated impulse response.
"""
# Ensure ntaps does not exceed the size of the impulse response
if ntaps % 2 != 0:
raise ValueError("ntaps must be an even number")
trunc_start = int(ntaps / 2)
trunc_stop = impulse.shape[-1] - trunc_start
if window == 'hann':
window = tfs.hann_window(ntaps)
# Extend this section with more cases if more window functions are required.
else:
raise ValueError(f"Window function {window} not supported")
impulse_start = impulse[:,:trunc_start] * window[trunc_start:ntaps]
impulse_stop = impulse[:,trunc_stop:] * window[:trunc_start]
impulse_middle = tf.zeros_like(impulse[:,trunc_start:trunc_stop])
new_impulse = tf.concat([impulse_start, impulse_middle, impulse_stop], axis=-1)
return new_impulse
@tf.function
def fir_from_transfer(
transfer: tf.Tensor,
ntaps: int,
window: str = 'hann',
ncorner: int = 0
) -> tf.Tensor:
"""
Design a Type II FIR filter given an arbitrary transfer function
Parameters
----------
transfer : tf.Tensor
transfer function to start from, must have at least ten samples
ntaps : int
number of taps in the final filter, must be an even number
window : str, tf.Tensor, optional
window function to truncate with, default: 'hann'
ncorner : int, optional
number of extra samples to zero off at low frequency, default: 0
Returns
-------
impulse : tf.Tensor
A time domain FIR filter of length ntaps
"""
if ntaps % 2 != 0:
raise ValueError("ntaps must be an even number")
transfer = truncate_transfer(transfer, ncorner=ncorner)
impulse = tf.signal.irfft(tf.cast(transfer, dtype=tf.complex64)) # the equivalent of irfft
impulse = truncate_impulse(impulse, ntaps=ntaps, window=window)
impulse = tf.roll(impulse, shift=int(ntaps/2 - 1), axis=-1)[:,: ntaps]
return impulse
@tf.function
def fftconvolve_(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT.
This function works similarly to the fftconvolve function you provided,
but uses TensorFlow's signal processing API.
"""
in1 = tf.constant(in1)
in2 = tf.constant(in2)
if in1.shape.ndims != in2.shape.ndims:
raise ValueError("in1 and in2 should have the same dimensionality")
elif tf.size(in1) == 0 or tf.size(in2) == 0: # empty arrays
return tf.constant([])
s1 = tf.shape(in1)
s2 = tf.shape(in2)
complex_result = (tf.dtypes.as_dtype(in1.dtype).is_complex or
tf.dtypes.as_dtype(in2.dtype).is_complex)
shape = tf.maximum(s1, s2)
shape = s1 + s2 - 1
# Check that input sizes are compatible with 'valid' mode
if mode == 'valid' and tf.reduce_any(s1 < s2):
# Convolution is commutative; order doesn't have any effect on output
in1, s1, in2, s2 = in2, s2, in1, s1
if not complex_result:
sp1 = tf.signal.rfft(in1)
sp2 = tf.signal.rfft(in2)
ret = tf.signal.irfft(sp1 * sp2)
else:
sp1 = tf.signal.fft(in1)
sp2 = tf.signal.fft(in2)
ret = tf.signal.ifft(sp1 * sp2)
if mode == "full":
return ret
elif mode == "same":
start = s1 // 2
return tf.slice(ret, [start], [s1])
elif mode == "valid":
start = s2 - 1
return tf.slice(ret, [start], [s1 - s2 + 1])
else:
raise ValueError(
"acceptable mode flags are 'valid', 'same', or 'full'"
)
@tf.function
def _centered(arr, newsize):
# Ensure correct dimensionality
if len(arr.shape) == 1:
arr = tf.expand_dims(arr, 0)
# Calculate start and end indices
start_ind = (arr.shape[-1] - newsize) // 2
end_ind = start_ind + newsize
return arr[..., start_ind:end_ind]
@tf.function
def fftconvolve(in1, in2, mode="full"):
# Extract shapes
s1 = tf.shape(in1)[-1]
s2 = tf.shape(in2)[-1]
shape = s1 + s2 - 1
# Compute convolution in Fourier space
sp1 = tf.signal.rfft(in1, [shape])
sp2 = tf.signal.rfft(in2, [shape])
ret = tf.signal.irfft(sp1 * sp2, [shape])
# Crop according to mode
if mode == "full":
cropped = ret
elif mode == "same":
cropped = _centered(ret, s1)
elif mode == "valid":
cropped = _centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
return cropped
@tf.function
def convolve(
timeseries: tf.Tensor,
fir: tf.Tensor,
window: str = 'hann'
) -> tf.Tensor:
"""
Perform convolution between the timeseries and the finite impulse response
filter.
Parameters
----------
timeseries : tf.Tensor
The time series data to convolve.
fir : tf.Tensor
The finite impulse response filter.
window : str, optional
Window function to use, default is 'hann'.
Returns
-------
conv : tf.Tensor
The convolved time series.
"""
pad = int(tf.math.ceil(fir.shape[-1]/2))
# Optimizing FFT size to power of 2 for efficiency
nfft = min(8*fir.shape[-1], timeseries.shape[-1])
if window == 'hann':
window = tf.signal.hann_window(fir.shape[-1])
# Extend this section with more cases if more window functions are required.
else:
raise ValueError(f"Window function {window} not supported")
timeseries_new_front = timeseries[:, :pad] * window[:pad]
timeseries_new_back = timeseries[:, -pad:] * window[-pad:]
timeseries_new_middle = timeseries[:, pad:-pad]
timeseries_new = tf.concat([
timeseries_new_front,
timeseries_new_middle,
timeseries_new_back
], axis=1)
conv = tf.zeros_like(timeseries_new)
if nfft >= timeseries_new.shape[-1]/2:
conv = fftconvolve(timeseries_new, fir, mode='same')
else:
nstep = nfft - 2*pad
conv[:, :nfft-pad] = fftconvolve(
timeseries_new[:, :nfft],
fir,
mode='same'
)[:, :nfft-pad]
k = nfft - pad
while k < timeseries_new.shape[-1] - nfft + pad:
yk = fftconvolve(
timeseries_new[:, k-pad:k+nstep+pad],
fir,
mode='same'
)
conv[:, k:k+yk.shape[-1]-2*pad] = yk[:, pad:-pad]
k += nstep
conv[:, -nfft+pad:] = fftconvolve(
timeseries_new[:, -nfft:], fir, mode='same'
)[:, -nfft+pad:]
return conv
@tf.function
def whiten(
timeseries: tf.Tensor,
background: tf.Tensor,
sample_rate_hertz: float,
fftlength: int = 4,
overlap: int = 2,
highpass: float = None,
detrend: str ='constant',
fduration: int = 2.0,
window: str = "hann"
) -> tf.Tensor:
"""
Whiten a timeseries using the given parameters.
Parameters
----------
timeseries : tf.Tensor
The time series data to whiten.
background : tf.Tensor
The time series to use to calculate the asd.
sample_rate_hertz : float
The sample rate of the time series data.
fftlength : int, optional
Length of the FFT window, default is 4.
overlap : int, optional
Overlap of the FFT windows, default is 2.
highpass : float, optional
Highpass frequency, default is None.
fduration : int, optional
Duration of the filter in seconds, default is 2.
window : str, optional
Window function to use, default is 'hann'.
Returns
-------
out : tf.Tensor
The whitened time series.
"""
# Check if input is 1D or 2D
is_1d = len(timeseries.shape) == 1
if is_1d:
# If 1D, add an extra dimension
timeseries = tf.expand_dims(timeseries, axis=0)
background = tf.expand_dims(background, axis=0)
dt = 1 / sample_rate_hertz
freqs, psd = calculate_psd(
background,
nperseg=int(sample_rate_hertz*fftlength),
noverlap=int(sample_rate_hertz*overlap),
sample_rate_hertz=sample_rate_hertz
)
asd = tf.sqrt(psd)
df = 1.0 / (timeseries.shape[-1] / sample_rate_hertz)
fsamples = tf.range(0, timeseries.shape[-1]//2+1, dtype=tf.float32) * df
freqs = tf.cast(freqs, tf.float32)
asd = \
tfp.math.interp_regular_1d_grid(
fsamples,
freqs[0],
freqs[-1],
asd,
axis=-1
)
ncorner = int(highpass / df) if highpass else 0
ntaps = int(fduration * sample_rate_hertz)
transfer = 1.0 / asd
tdw = fir_from_transfer(transfer, ntaps, window=window, ncorner=ncorner)
out = convolve(timeseries, tdw)
# If input was 1D, return 1D
if is_1d:
out = out[0]
return out * tf.sqrt(2.0 * dt) | mrknorman/py_ml_tools | whiten.py | whiten.py | py | 11,656 | python | en | code | 0 | github-code | 90 |
8541715538 | def solve_part_one(puzzle_input):
sorted_bag = sorted(puzzle_input)
sorted_bag.append(sorted_bag[-1] + 3)
current_joltage = 0
one_jolt_differences = 0
three_jolt_differences = 0
for adapter in sorted_bag:
current_difference = adapter - current_joltage
if current_difference == 1:
one_jolt_differences += 1
elif current_difference == 3:
three_jolt_differences += 1
current_joltage = adapter
return one_jolt_differences * three_jolt_differences
def prepare_bag(bag):
sorted_bag = sorted(bag, reverse=True)
sorted_bag.append(0)
result = [sorted_bag[0] + 3]
result.extend(sorted_bag)
return result
def solve_part_two(puzzle_input):
prepared_input = prepare_bag(puzzle_input)
result = {}
for adapter in prepared_input:
if not result:
result[adapter] = 1
else:
result[adapter] = sum(result[joltage] for joltage in range(adapter + 1, adapter + 4)
if joltage in result)
return result[0]
with open("../day10.txt") as f:
puzzle_input = [int(line) for line in f.read().split("\n") if line]
example_input = [16, 10, 15, 5, 1, 11, 7, 19, 6, 12, 4]
example_input_2 = [28, 33, 18, 42, 31, 14, 46, 20, 48, 47, 24, 23, 49, 45, 19, 38, 39, 11, 1, 32, 25, 35, 8, 17, 7, 9, 4, 2, 34, 10, 3]
assert solve_part_one(example_input) == 7 * 5
assert solve_part_one(example_input_2) == 22 * 10
assert solve_part_two(example_input) == 8
assert solve_part_two(example_input_2) == 19208
| AlessandroW/AdventOfCode-2020 | Python/day10.py | day10.py | py | 1,563 | python | en | code | 0 | github-code | 90 |
18108132949 | def insertionSort(A, n, g, cnt):
for i in range(g, n):
v = A[i]
j = i - g
while j >= 0 and A[j] > v:
A[j+g] = A[j]
j = j - g
cnt += 1
A[j+g] = v
return [cnt, A]
def shellSort(A, n):
cnt = 0
a = 1
G = []
while a <= n:
G.append(a)
a = 3*a + 1
m = len(G)
G = G[::-1]
for i in range(0, m):
cnt, A = insertionSort(A, n, G[i], cnt)
return [m, G, cnt, A]
if __name__ == "__main__":
n = int(input())
A = [int(input()) for _ in range(n)]
m, G, cnt, A = shellSort(A, n)
print(m)
print(*G)
print(cnt)
for i in A:
print(i)
| Aasthaengg/IBMdataset | Python_codes/p02262/s410673889.py | s410673889.py | py | 689 | python | en | code | 0 | github-code | 90 |
34060384396 | print('''#######################
##Grupo da Maioridade##
#######################''')
print('→←'*30)
from datetime import date
branco = '\033[m'
azul = '\033[1;36m'
vermelho = '\033[1;31m'
idade = list()
cont = cont1 = 0
for p in range(1, 8):
nascimento = int(input(f'Digite o ANO DE NASCIMENTO da {p}º pessoa → '))
idade.append(date.today().year - nascimento)
print(idade)
for i in idade:
if i < 18:
cont += 1
else:
cont1 += 1
print(f'{azul}Existem {cont1} pessoas com maior idade.{branco}')
print(f'{vermelho}Existem {cont} pessoas com menor idade.{branco}')
print('→←'*30)
| dougfunny1983/Hello_Word_Python3 | ex054.py | ex054.py | py | 618 | python | pt | code | 0 | github-code | 90 |
1789312425 | # https://leetcode.com/problems/subsets-ii/
# In the array A at every step we have two choices for each element either we can
# ignore the element or we can include the element in our subset
class Solution:
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
# sort the array -> needed to skip the repeated elements easily
nums.sort()
# resultant list
subsets=[]
# call the recursive function
self.generateSubsets(0,nums,[],subsets)
return subsets
def generateSubsets(self,ind,nums,curr,subsets):
# append the current-sub-array to the resultant array
subsets.append(curr[:])
# make the next iterations -> with the curr-ele & without the curr-ele added in the current-sub-array
for i in range(ind,len(nums)):
# to skip the repeated elements
if ind!=i and nums[i]==nums[i-1]:
continue
# include the nums[i] in subset.
curr.append(nums[i])
# move onto the next element
self.generateSubsets(i+1,nums,curr,subsets)
# exclude nums[i] from the subset
curr.pop() | danish-faisal/Striver-s-SDE-Sheet | Recursion - Day 9/subsets-2.py | subsets-2.py | py | 1,188 | python | en | code | 0 | github-code | 90 |
18111350189 | li1 = []
li2 = []
for i, s in enumerate(input()):
if s == "\\":
li1.append(i)
elif s == "/" and li1:
j = li1.pop()
c = i - j
while li2 and li2[-1][0] > j:
c += li2[-1][1]
li2.pop()
li2.append((j, c))
if li2:
li3 = list(zip(*li2))[1]
print(sum(li3))
print(len(li3), *li3)
else:
print(0, 0, sep="\n")
| Aasthaengg/IBMdataset | Python_codes/p02266/s368824415.py | s368824415.py | py | 388 | python | en | code | 0 | github-code | 90 |
73133244138 | import requests
from bs4 import BeautifulSoup
from csv import DictWriter, DictReader
from random import choice
response = requests.get('https://quotes.toscrape.com/')
soup = BeautifulSoup(response.text, "html.parser")
excavated_html = soup.find_all( class_ ="quote" )
# capture author, author quote , href of author store in list
captured = [[x.find(class_ = 'text').get_text(), x.find(class_ = 'author').get_text(), x.find('a')['href']] for x in excavated_html ]
print(captured)
#capture each author data and shit
'''
author_response = requests.get('https://quotes.toscrape.com' + chosen_one[2])
soupers = BeautifulSoup(author_response.text, 'html.parser')
author_hints = [f"Author's name starts with {chosen_one[1][0]} and ends with {chosen_one[1][-1::]}", soupers.find( class_ = "author-born-date").get_text(), soupers.find( class_ = "author-born-location").get_text() ]
'''
def guess_the_auth():
number_of_guesses = 4
chosen_one = choice(captured)
author_response = requests.get('https://quotes.toscrape.com' + chosen_one[2])
soupers = BeautifulSoup(author_response.text, 'html.parser')
author_hints = [f"Author's name starts with {chosen_one[1][0]} and ends with {chosen_one[1][-1::]}", f"Author is born on {soupers.find( class_ = 'author-born-date').get_text()}", f"Author is born in {soupers.find( class_ = 'author-born-location').get_text()}" ]
while number_of_guesses != 0:
print(chosen_one[0])
player_input = input("Guess the author: ")
if chosen_one[1] != player_input:
number_of_guesses -= 1
if number_of_guesses > 0:
print(f"You still have {number_of_guesses} guesses to go")
print(f"Hint: {author_hints[number_of_guesses]}")
else:
print("No guesses for u, Game Over")
cont = input("Do you wanna keep playing? y/n ")
if cont == "y":
number_of_guesses = 4
chosen_one = choice(captured)
else:
print("Thanks for playing")
break
else:
print ('You got it right')
cont = input("Do you wanna keep playing? y/n ")
if cont == "y":
number_of_guesses = 4
chosen_one = choice(captured)
else:
print("Thanks for playing")
break
guess_the_auth()
#Create a file called `scraping_project.py` which, when run, grabs data on every quote from the website http://quotes.toscrape.com
#You can use `bs4` and `requests` to get the data. For each quote you should grab the text of the quote,
#the name of the person who said the quote, and the href of the link to the person's bio. Store all of this information in a list.
#Next, display the quote to the user and ask who said it. The player will have four guesses remaining.
#After each incorrect guess, the number of guesses remaining will decrement.
# If the player gets to zero guesses without identifying the author, the player loses and the game ends. If the player correctly identifies the author, the player wins!
#After every incorrect guess, the player receives a hint about the author.
#For the first hint, make another request to the author's bio page (this is why we originally scrape this data), and tell the player the author's birth date and location.
#The next two hints are up to you!
# Some ideas: the first letter of the author's first name, the first letter of the author's last name, the number of letters in one of the names, etc.
#When the game is over, ask the player if they want to play again. If yes, restart the game with a new quote. If no, the program is complete. | aynfrancesco06/python_scraping_mini_game | webscrape.py | webscrape.py | py | 3,804 | python | en | code | 0 | github-code | 90 |
35989235365 | import sys
sys.path.append('./model/RAFT/core')
import yaml
import random
import torch
import torchmetrics
import lpips
import time
import cv2
import os.path as osp
import numpy as np
import torch.distributed as dist
from argparse import ArgumentParser
from torch.utils.data import DataLoader
from model.MBD import MBD
from model.utils import AverageMeter
from os.path import join
from logger import Logger
from tqdm import tqdm
from raft import RAFT
from utils.utils import InputPadder
loss_fn_alex = lpips.LPIPS(net='alex').to('cuda:0')
def init_seeds(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def validation(local_rank, configs):
# Preparation
torch.backends.cudnn.benchmark = True
# model init
model = MBD(local_rank=local_rank, configs=configs)
# dataset init
dataset_args = configs['dataset_args']
valid_dataset = BDDataset(set_type='valid', **dataset_args)
valid_loader = DataLoader(valid_dataset,
batch_size=1,
num_workers=configs['num_workers'],
pin_memory=True)
evaluate(model, valid_loader, local_rank)
@torch.no_grad()
def _gen_flow(img0, img1):
padder = InputPadder(img0.shape)
img0, img1 = padder.pad(img0, img1)
flow_low, flow_up = raft(img0, img1, iters=30, test_mode=True)
flow_up = padder.unpad(flow_up)
return flow_up[0].permute(1, 2, 0).cpu().numpy()
def gen_flow(img_ref, img_tgt):
flow = _gen_flow(img_tgt, img_ref) # backward flow
flow = flow * (-1.)
size = (int(flow_ratio * flow.shape[1]), int(flow_ratio * flow.shape[0]))
# ! resizing flow needs to time ratio
flow = flow_ratio * cv2.resize(flow, size, interpolation=cv2.INTER_AREA)
trend_x = flow[:, :, 0::2]
trend_y = flow[:, :, 1::2]
trend_x = np.mean(trend_x, axis=-1, keepdims=True)
trend_y = np.mean(trend_y, axis=-1, keepdims=True)
trend_x_temp = trend_x.copy()
trend_y_temp = trend_y.copy()
trend_x[np.sqrt((trend_x_temp ** 2) + (trend_y_temp ** 2)) < threshold] = 0
trend_y[np.sqrt((trend_x_temp ** 2) + (trend_y_temp ** 2)) < threshold] = 0
trend_x[trend_x > 0] = 1
trend_x[trend_x < 0] = -1
trend_y[trend_y > 0] = 1
trend_y[trend_y < 0] = -1
trend_x[(trend_x == 0) & (trend_y == 1)] = 1
trend_x[(trend_x == 0) & (trend_y == -1)] = -1
trend_y[(trend_y == 0) & (trend_x == 1)] = -1
trend_y[(trend_y == 0) & (trend_x == -1)] = 1
trend = np.concatenate([trend_x, trend_y], axis=-1)
trend = trend.astype(np.int8)
return torch.from_numpy(trend).permute(2, 0, 1)[None].float()
@torch.no_grad()
def evaluate(model, valid_loader, local_rank):
# Preparation
torch.cuda.empty_cache()
device = torch.device("cuda", local_rank)
loss_meter = AverageMeter()
psnr_meter = AverageMeter()
ssim_meter = AverageMeter()
lpips_meter = AverageMeter()
time_stamp = time.time()
# One epoch validation
for i, tensor in enumerate(tqdm(valid_loader, total=len(valid_loader))):
tensor['inp'] = tensor['inp'].to(device) # (b, 1, 3, h, w)
img_ref = tensor['inp'][:, 0]
img_tgt = tensor['inp'][:, 1]
trend = gen_flow(img_ref, img_tgt)
tensor['trend'] = trend.unsqueeze(dim=1).to(device) # (b, 1, 2, h, w)
tensor['inp'] = img_tgt.unsqueeze(dim=1).to(device) # (b, 1, 3, h, w)
tensor['gt'] = tensor['gt'][:, 7:].to(device) # (b, num_gts, 3, h, w)
out_tensor = model.update(inp_tensor=tensor, training=False)
pred_imgs = out_tensor['pred_imgs'] # pred_imgs shape (b, num_gts, 3, h, w)
gt_imgs = out_tensor['gt_imgs'] # gt_imgs shape (b, num_gts, 3, h, w)
loss = out_tensor['loss']
# Record loss and metrics
pred_imgs = pred_imgs.to('cuda:0')
gt_imgs = gt_imgs.to('cuda:0')
pred_imgs = pred_imgs[:, [0, 3, 6]]
gt_imgs = gt_imgs[:, [0, 3, 6]]
b, num_gts, c, h, w = pred_imgs.shape
pred_imgs = pred_imgs.reshape(num_gts * b, c, h, w)
gt_imgs = gt_imgs.reshape(num_gts * b, c, h, w)
psnr_val = torchmetrics.functional.psnr(pred_imgs, gt_imgs, data_range=255)
ssim_val = torchmetrics.functional.ssim(pred_imgs, gt_imgs, data_range=255)
pred_imgs = (pred_imgs - (255. / 2)) / (255. / 2)
gt_imgs = (gt_imgs - (255. / 2)) / (255. / 2)
lpips_val = loss_fn_alex(pred_imgs, gt_imgs)
psnr_meter.update(psnr_val, num_gts * b)
ssim_meter.update(ssim_val, num_gts * b)
lpips_meter.update(lpips_val.mean().detach(), num_gts * b)
loss_meter.update(loss.item(), b)
# print('{}/{}'.format(i + 1, len(valid_loader)), psnr_meter.avg, ssim_meter.avg, lpips_meter.avg)
# Ending of validation
eval_time_interval = time.time() - time_stamp
msg = 'eval time: {:.4f} sec, loss: {:.4f}, psnr: {:.4f}, ssim: {:.4f}, lpips: {:.4f}'.format(
eval_time_interval, loss_meter.avg, psnr_meter.avg, ssim_meter.avg, lpips_meter.avg
)
logger(msg, prefix='[valid]')
logger.close()
if __name__ == '__main__':
# load args & configs
parser = ArgumentParser(description='Blur Decomposition')
parser.add_argument('--local_rank', default=0, type=int, help='local rank')
parser.add_argument('--log_dir', default='logs', help='path of log')
parser.add_argument('--log_name', default='valid', help='log name')
parser.add_argument('--resume_dir', help='path of checkpoint dir', required=True)
parser.add_argument('--data_dir', nargs='+', required=True)
parser.add_argument('--num_iters', type=int, default=1, help='number of iters')
parser.add_argument('--verbose', action='store_true', help='whether to print out logs')
# arguments for RAFT
parser.add_argument('--model_path', default='./checkpoints/raft-sintel.pth', help="restore checkpoint")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation')
args = parser.parse_args()
args.config = osp.join(args.resume_dir, 'cfg.yaml')
with open(args.config) as f:
configs = yaml.full_load(f)
configs['resume_dir'] = args.resume_dir
configs['num_iterations'] = args.num_iters
device = torch.device("cuda", args.local_rank)
flow_ratio = 1
threshold = 0.5 * flow_ratio
# Import blur decomposition dataset
is_gen_blur = True
for root_dir in configs['dataset_args']['root_dir']:
if 'b-aist++' in root_dir:
is_gen_blur = False
if is_gen_blur:
from data.dataset import GenBlur as BDDataset
configs['dataset_args']['aug_args']['valid']['image'] = {}
else:
from data.dataset import BAistPP as BDDataset
configs['dataset_args']['aug_args']['valid']['image']['NearBBoxResizedSafeCrop']['max_ratio'] = 0
configs['dataset_args']['root_dir'] = args.data_dir
configs['dataset_args']['num_past'] = 1
configs['dataset_args']['num_fut'] = 0
configs['dataset_args']['use_trend'] = False
# DDP init
dist.init_process_group(backend="nccl")
torch.cuda.set_device(args.local_rank)
rank = dist.get_rank()
init_seeds(seed=rank)
# Logger init
logger = Logger(file_path=join(args.log_dir, '{}.txt'.format(args.log_name)),
verbose=args.verbose)
# model init
raft = torch.nn.DataParallel(RAFT(args))
raft.load_state_dict(torch.load(args.model_path))
raft = raft.to(device)
raft.eval()
# Training model
validation(local_rank=args.local_rank, configs=configs)
# Tear down the process group
dist.destroy_process_group()
| zzh-tech/Animation-from-Blur | valid_video.py | valid_video.py | py | 7,890 | python | en | code | 57 | github-code | 90 |
5977225561 | import pygame.font # pygame.font可将文本渲染到屏幕
class Button():
def __init__(self,ai_settings,screen,msg):
'''初始化按钮的属性'''
self.screen = screen
self.screen_rect = screen.get_rect()
# 设置按钮其他属性和尺寸
self.width,self.height = 200,50
self.button_color = (0,128,255)
self.text_color = (255,255,255)
self.font = pygame.font.SysFont(None,48)
# 创建按钮的rect对象
self.rect = pygame.Rect(0,0,self.width,self.height) # 创建一个表示按钮的rect对象(见❹),并将其center属性设置为屏幕的center属性。
self.rect.center = self.screen_rect.center
self.prep_msg(msg)
def prep_msg(self,msg):
'''将msg渲染为图像,并使其在按钮上居中'''
self.msg_image = self.font.render(msg,True,self.text_color,self.button_color) # font.render()将存储在msg中的文本转换为图像,然后将该图像存储在msg_image中。
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
def draw_button(self):
'''绘制一个用颜色填充的按钮,在绘制文本'''
self.screen.fill(self.button_color,self.rect) # fill():来绘制标识按钮的矩形。
self.screen.blit(self.msg_image,self.msg_image_rect) # blit():向矩形传递一幅图像。
| xiaocong-Fu/alien_WarGame | pyfile/pyGame/button.py | button.py | py | 1,654 | python | zh | code | 0 | github-code | 90 |
23045587443 | """
step09.py: t-SNE with R2-score data
"""
import argparse
import matplotlib
import matplotlib.pyplot
import pandas
import seaborn
import sklearn.manifold
import step00
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input", type=str, help="Input TAR.gz file")
parser.add_argument("output", type=str, help="Output PNG file")
parser.add_argument("--cpus", type=int, default=1, help="CPUS to use")
args = parser.parse_args()
if not args.output.endswith(".png"):
raise ValueError("Output must end with .PNG!!")
elif args.cpus < 1:
raise ValueError("CPUS must be greater than zero!!")
data = step00.read_pickle(args.input)
feature = list(filter(lambda x: x.startswith("Clinical_"), list(data.columns)))[0]
feature_data = list(data[feature])
del data[feature]
print(data)
tsne_data = pandas.DataFrame(sklearn.manifold.TSNE(n_components=2, init="pca", random_state=0, method="exact", n_jobs=args.cpus).fit_transform(data), columns=["TSNE1", "TSNE2"])
for column in tsne_data.columns:
tsne_data[column] = sklearn.preprocessing.scale(tsne_data[column])
tsne_data["index"] = list(data.index)
tsne_data.set_index("index", inplace=True)
tsne_data[feature] = feature_data
print(tsne_data)
seaborn.set(context="poster", style="whitegrid")
fig, ax = matplotlib.pyplot.subplots(figsize=(24, 24))
seaborn.scatterplot(data=tsne_data, x="TSNE1", y="TSNE2", hue=feature, size=feature, ax=ax, legend="brief")
fig.savefig(args.output)
matplotlib.pyplot.close(fig)
| CompbioLabUnist/dream_challenge-anti-pd1_response | jwlee230/Program/Python/step09.py | step09.py | py | 1,604 | python | en | code | 0 | github-code | 90 |
18212566819 | import sys, math, itertools, collections, bisect
input = lambda: sys.stdin.buffer.readline().rstrip().decode('utf-8')
inf = float('inf') ;mod = 10**9+7
mans = inf ;ans = 1 ;count = 0 ;pro = 1
def gcd(a, b):
while(b != 0):
a, b = b, a % b
return a
def lcm(m,n):
return (m*n)//gcd(m,n)
n = int(input())
def make_key0(a,b):
if a == 0 and b != 0:
return(0,1)
elif a != 0 and b == 0:
return(1,0)
if (a < 0 and b > 0) or (a > 0 and b < 0):hugou = 1
else: hugou = -1
a = abs(a)
b = abs(b)
G = gcd(a,b)
return (hugou*(abs(a)//G),abs(b)//G)
C0 = collections.Counter()
for i in range(n):
a,b = map(int,input().split())
if (a,b) == (0,0) :count += 1
else:
a0,b0 = make_key0(a,b)
C0[(a0,b0)] += 1
S = set()
for key,val in C0.items():
a0,b0 = key
S.add((a0,b0))
a1,b1 = make_key0(b0,a0)
if (a1,b1) in S:continue
ans *= pow(2,val,mod) + pow(2,C0[(a1,b1)],mod) -1
ans %= mod
ans += count
print((ans-1)%mod) | Aasthaengg/IBMdataset | Python_codes/p02679/s800664858.py | s800664858.py | py | 971 | python | en | code | 0 | github-code | 90 |
35599879467 | # This file is part of Slice2Print.
#
# Slice2Print is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Slice2Print is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Slice2Print. If not, see <http://www.gnu.org/licenses/>.
import wx
import slicer
class SlicerDialog(wx.Dialog):
def __init__(self, parent, model, slicer_config):
wx.Dialog.__init__(self, parent, -1, "Slicing...", style=wx.CAPTION)
self.cancel = False
self.sliced_model = None
self.model = model
self.slicer_config = slicer_config
sizer = wx.BoxSizer(wx.VERTICAL)
self.staticText = wx.StaticText(self, -1, "")
sizer.Add(self.staticText, 0, wx.EXPAND | wx.ALL, 7)
self.gauge = wx.Gauge(self, -1, 120)
self.gauge.SetValue(0)
sizer.Add(self.gauge, 0, wx.EXPAND | wx.LEFT | wx.RIGHT, 7)
btn_sizer = wx.BoxSizer(wx.VERTICAL)
btn_cancel = wx.Button(self, 1, "Cancel")
btn_sizer.Add(btn_cancel, 0, wx.ALIGN_RIGHT)
sizer.Add(btn_sizer, 0, wx.EXPAND | wx.ALL, 7)
self.Bind(wx.EVT_BUTTON, self.on_cancel, id=btn_cancel.GetId())
self.Bind(wx.EVT_SHOW, self.on_show)
self.SetSizer(sizer)
self.Layout()
self.Fit()
self.SetSize((400, -1))
self.CenterOnParent(wx.BOTH)
def on_cancel(self, event):
self.cancel = True
def on_show(self, event):
if event.IsShown():
wx.CallAfter(self.slice)
def slice(self):
s = slicer.Slicer(self.slicer_config, self.model, self.update)
self.sliced_model = s.slice()
if self.sliced_model:
self.update(110, "Creating perimeters")
self.sliced_model.create_perimeters()
self.update(120, "Creating top and bottom infill")
self.sliced_model.create_infill()
self.EndModal(wx.ID_OK)
else:
self.EndModal(wx.ID_CANCEL)
def update(self, progress, msg):
self.gauge.SetValue(progress)
self.staticText.SetLabel(msg)
wx.Yield()
return self.cancel
def get_sliced_model(self):
return self.sliced_model
| mprochnow/Slice2Print | slice2print/ui/dialog.py | dialog.py | py | 2,606 | python | en | code | 2 | github-code | 90 |
18354200699 | import sys
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
sys.setrecursionlimit(10 ** 9)
INF = 1 << 60
MOD = 1000000007
def main():
S = readline().strip()
T = readline().strip()
N = len(S)
M = len(T)
SS = S + S
A = [False] * 26
for c in S:
A[ord(c) - 97] = True
for c in T:
if not A[ord(c) - 97]:
print(-1)
return
B = [[-1] * N for _ in range(26)]
for c in set(T):
code = ord(c) - 97
row = B[code]
left = right = 0
while True:
right = SS.find(c, left + 1)
if right < N:
row[left:right] = [right] * (right - left)
left = right
else:
row[left:N] = [right] * (N - left)
break
ans = i = S.find(T[0])
for c in T[1:]:
j = B[ord(c) - 97][i]
ans += j - i
if j < N:
i = j
else:
i = j - N
print(ans + 1)
return
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p02937/s324758280.py | s324758280.py | py | 1,072 | python | en | code | 0 | github-code | 90 |
40374763886 | from pymongo import MongoClient
client = MongoClient("localhost", 27017)
db = client.WDMOV
ewi_building = [4.373502, 51.998847]
runtimes = []
for i in range(0, 200):
runtime = db.stops.find({
"loc": {
"$near": {
"$geometry": {
"type": "Point",
"coordinates": ewi_building
}
}
}
}).limit(20).explain()["executionStats"]["executionTimeMillis"]
runtimes.append(runtime)
# On average around 1.5ms
print("On average the query takes: %f" % (sum(runtimes)/len(runtimes)))
| 8uurg/WDMOV | mongo/static/closest-stop.py | closest-stop.py | py | 598 | python | en | code | 0 | github-code | 90 |
17825798535 | import web3
from .tokens import eth, dai
w3 = web3.Web3(web3.Web3.HTTPProvider(f"https://mainnet.infura.io/v3/{os.environ['INFURA_KEY']}"))
with open('offchain/uniswap-v3/quoter.abi', 'r') as f:
quoter_abi = f.read()
uniswap_v3_quoter = w3.eth.contract(address="0xb27308f9F90D607463bb33eA1BeBb41C27CE5AB6", abi=quoter_abi)
print(uniswap_v3_quoter.functions.quoteExactInputSingle(dai, eth, 10**6, 10**18, 0).call())
| carterjfulcher/flashloan-arbitrage | offchain/main.py | main.py | py | 421 | python | en | code | 0 | github-code | 90 |
18303096079 | import sys
N = int(input())
# 5^26 > 10^18
if N % 2 == 1:
print(0)
sys.exit()
ans = 0
mul = 1
for i in range(1, 27):
mul *= 5
add = N // (2 * mul)
if add == 0: break
ans += add
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02833/s988045750.py | s988045750.py | py | 214 | python | en | code | 0 | github-code | 90 |
18004212869 |
n = int(input())
a = list(map(int, input().split(" ")))
res1 = 0
sum = 0
# sei, hu, sei, hu....
# guu, ki, guu, ki
for i in range(n):
sum += a[i]
if sum <= 0 and i%2 == 0:
res1 += abs(sum) + 1
sum = 1
elif sum >= 0 and i%2 == 1:
res1 += abs(sum) + 1
sum = -1
# hutuunitoku
res2 = 0
sum = 0
# hu, sei, hu, sei....
# guu, ki, guu, ki
for i in range(n):
sum += a[i]
if sum <= 0 and i%2 == 1:
res2 += abs(sum) + 1
sum = 1
elif sum >= 0 and i%2 == 0:
res2 += abs(sum) + 1
sum = -1
# hutuunitoku
print(min(res1, res2))
| Aasthaengg/IBMdataset | Python_codes/p03739/s746234668.py | s746234668.py | py | 616 | python | en | code | 0 | github-code | 90 |
29442068573 | def main():
n = int(input())
for _ in range(n):
t = int(input())
arr = list(map(int, input().split()))
index = arr.index(min(arr))
arr[index] += 1
multi = 1
for i in arr:
multi *= i
print(multi)
if __name__ == "__main__":
main() | Alexey-Home/Codeforses | 800/1873B.py | 1873B.py | py | 309 | python | en | code | 0 | github-code | 90 |
18381407359 | import sys
def input(): return sys.stdin.readline().rstrip()
def main():
n, k = map(int, input().split())
full = (n-1)*(n-2)//2
if k > full:
print(-1)
else:
num = full-k
print(num+n-1)
for i in range(2, n+1):
print(1, i)
for i in range(2, n):
for j in range(i+1, n+1):
if num == 0:
exit()
print(i, j)
num -= 1
if __name__ == '__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p02997/s013134525.py | s013134525.py | py | 499 | python | en | code | 0 | github-code | 90 |
22048681513 | ################################################################################
# Run 'sh init_sner' in the terminal before running this script #
################################################################################
import os
import PyPDF2
import textract
from os import walk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.tag import StanfordNERTagger
################################ PDF To Text ###################################
dir_name = 'Financial Reports/29BNP PARIBAS'
dir_path = os.path.join(os.path.abspath(os.pardir), dir_name)
pdf_files = []
for (dirpath, dirnames, filenames) in walk(dir_path):
pdf_files.extend(filenames)
break
# change here to decide which file to loop
filename = os.path.join(dir_path, pdf_files[0])
pdfFileObj = open(filename,'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
num_pages = pdfReader.numPages
count = 0
text = ""
# To convert simple, text-based PDF files into text readable by Python
while count < num_pages:
pageObj = pdfReader.getPage(count)
count +=1
text += pageObj.extractText()
if text != "":
text = text
else:
# To convert non-trivial, scanned PDF files into text readable by Python
text = textract.process(filename, method='tesseract', language='eng')
tokens = word_tokenize(text)
punctuations = ['(',')',';',':','[',']',',']
stop_words = stopwords.words('english')
keywords = [word for word in tokens if not word in stop_words and not word in punctuations]
################################################################################
############################## Get All Names ###################################
model_name = 'stanford-ner/english.all.3class.distsim.crf.ser.gz'
jar_name = 'stanford-ner/stanford-ner.jar'
model_path = os.path.join(os.path.abspath(os.curdir), model_name)
jar_path = os.path.join(os.path.abspath(os.curdir), jar_name)
st = StanfordNERTagger(model_path, jar_path, encoding='utf8')
tags = st.tag(keywords)
people = [tag for tag in tags if tag[1]=='PERSON']
################################################################################
| polly63/NLP_Sentiment_Analysis | keyword_extraction.py | keyword_extraction.py | py | 2,143 | python | en | code | 9 | github-code | 90 |
15992778740 | import os
import torch
import os.path as osp
import torch.nn as nn
import torch.nn.functional as F
from lib.core.config import BASE_DATA_DIR
from lib.models.spin import Regressor
from torch.autograd import Variable ##
class TemporalAttention(nn.Module):
def __init__(self, attention_size, seq_len, non_linearity='tanh'):
super(TemporalAttention, self).__init__()
if non_linearity == "relu":
activation = nn.ReLU()
else:
activation = nn.Tanh()
self.fc = nn.Linear(attention_size, 256)
self.relu = nn.ReLU()
self.attention = nn.Sequential(
nn.Linear(256 * seq_len, 256),
activation,
nn.Linear(256, 256),
activation,
nn.Linear(256, seq_len),
activation
)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
batch = x.shape[0]
x = self.fc(x)
x = x.view(batch, -1)
scores = self.attention(x)
scores = self.softmax(scores)
return scores
class TemporalEncoder(nn.Module):
def __init__(self, channel):
super(TemporalEncoder, self).__init__()
self.inter_channel = channel // 2
self.conv_phi = nn.Conv1d(in_channels=channel, out_channels=self.inter_channel, kernel_size=1, stride=1,padding=0, bias=False)
self.conv_theta = nn.Conv1d(in_channels=channel, out_channels=self.inter_channel, kernel_size=1, stride=1, padding=0, bias=False)
self.conv_g = nn.Conv1d(in_channels=channel, out_channels=self.inter_channel, kernel_size=1, stride=1, padding=0, bias=False)
self.softmax = nn.Softmax(dim=-1)
self.conv_mask = nn.Conv1d(in_channels=self.inter_channel, out_channels=channel, kernel_size=1, stride=1, padding=0, bias=False)
self.conv_mask_forR = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=1, stride=1, padding=0, bias=False)
self.attention = TemporalAttention(attention_size=2048, seq_len=3, non_linearity='tanh')
def forward(self, x, is_train=False):
# NTF -> NFT
x = x.permute(0,2,1)
b, c, thw = x.size() # N x 2048 x 16
x_=x.permute(0, 2, 1)
xx = torch.matmul(x_, x)
xx = self.softmax(xx) # N x 16 x 16
x_phi = self.conv_phi(x).view(b, self.inter_channel, -1) # N x 2048/2 x 16
x_theta = self.conv_theta(x).view(b, self.inter_channel, -1).permute(0, 2, 1).contiguous() # N x 16 x 2048/2
x_g = self.conv_g(x).view(b, self.inter_channel, -1).permute(0, 2, 1).contiguous() # N x 16 x 2048/2
mul_theta_phi = torch.matmul(x_theta, x_phi)
mul_theta_phi = self.softmax(mul_theta_phi) # N x 16 x 16
R = torch.cat((xx,mul_theta_phi),dim=0).view(xx.size(0),-1,xx.size(1),xx.size(2)) # 2 x N x 16 x 16
Y = self.conv_mask_forR(R).reshape(b, thw, thw)
Y = self.softmax(Y) # N x 16 x 16
mul_theta_phi_g = torch.matmul(Y, x_g) # N x 16 x 2048/2
mul_theta_phi_g = mul_theta_phi_g.permute(0,2,1).contiguous().view(b,self.inter_channel, thw) # N x 2048/2 x 16
mask = self.conv_mask(mul_theta_phi_g) # N x 2048 x 16
out_ = mask + x #
y_cur_2=out_[:,:,8] #
y_cur_1=out_[:,:,7] #
y_cur_3=out_[:,:,9] #
y_bef_2=out_[:,:,5] #
y_bef_1=out_[:,:,4] #
y_bef_3=out_[:,:,6] #
y_aft_2=out_[:,:,11] #
y_aft_1=out_[:,:,10] #
y_aft_3=out_[:,:,12] #
y_cur_ = torch.cat((y_cur_1[:, None, :], y_cur_2[:, None, :], y_cur_3[:, None, :]), dim=1) #
y_bef_ = torch.cat((y_bef_1[:, None, :], y_bef_2[:, None, :], y_bef_3[:, None, :]), dim=1) #
y_aft_ = torch.cat((y_aft_1[:, None, :], y_aft_2[:, None, :], y_aft_3[:, None, :]), dim=1) #
scores = self.attention(y_cur_) #
y_cur = torch.mul(y_cur_, scores[:, :, None]) #
y_cur = torch.sum(y_cur, dim=1) #
scores = self.attention(y_bef_) #
y_bef = torch.mul(y_bef_, scores[:, :, None]) #
y_bef = torch.sum(y_bef, dim=1) #
scores = self.attention(y_aft_) #
y_aft = torch.mul(y_aft_, scores[:, :, None]) #
y_aft = torch.sum(y_aft, dim=1) #
y = torch.cat((y_bef[:, None, :], y_cur[:, None, :], y_aft[:, None, :]), dim=1)
scores = self.attention(y)
out = torch.mul(y, scores[:, :, None])
out = torch.sum(out, dim=1) # N x 2048
if not is_train:
return out, scores, out_
else:
y = torch.cat((out[:, None, :], out[:, None, :], out[:, None, :]), dim=1)
return y, scores, out_
class MPSnet(nn.Module):
def __init__(
self,
seqlen,
batch_size=64,
n_layers=1,
hidden_size=2048,
add_linear=False,
bidirectional=False,
use_residual=True,
pretrained=osp.join(BASE_DATA_DIR, 'spin_model_checkpoint.pth.tar'),
):
super(MPSnet, self).__init__()
self.seqlen = seqlen
self.batch_size = batch_size
self.nonlocalblock = TemporalEncoder(channel=2048) #nonlocalblock --> real name: MoCA+HAFI
# regressor can predict cam, pose and shape params in an iterative way
self.regressor = Regressor()
if pretrained and os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained)['model']
self.regressor.load_state_dict(pretrained_dict, strict=False)
print(f'=> loaded pretrained model from \'{pretrained}\'')
def forward(self, input, is_train=False, J_regressor=None):
# input size NTF
batch_size, seqlen = input.shape[:2]
feature, scores, feature_seqlen = self.nonlocalblock(input, is_train=is_train)
feature = Variable(feature.reshape(-1, feature.size(-1)))
feature_seqlen = Variable(feature_seqlen.reshape(-1, feature_seqlen.size(1))) #
smpl_output = self.regressor(feature, is_train=is_train, J_regressor=J_regressor)
smpl_output_Dm = self.regressor(feature_seqlen, is_train=is_train, J_regressor=J_regressor) #
if not is_train:
for s in smpl_output:
s['theta'] = s['theta'].reshape(batch_size, -1)
s['verts'] = s['verts'].reshape(batch_size, -1, 3)
s['kp_2d'] = s['kp_2d'].reshape(batch_size, -1, 2)
s['kp_3d'] = s['kp_3d'].reshape(batch_size, -1, 3)
s['rotmat'] = s['rotmat'].reshape(batch_size, -1, 3, 3)
s['scores'] = scores
else:
repeat_num = 3
for s in smpl_output:
s['theta'] = s['theta'].reshape(batch_size, repeat_num, -1)
s['verts'] = s['verts'].reshape(batch_size, repeat_num, -1, 3)
s['kp_2d'] = s['kp_2d'].reshape(batch_size, repeat_num, -1, 2)
s['kp_3d'] = s['kp_3d'].reshape(batch_size, repeat_num, -1, 3)
s['rotmat'] = s['rotmat'].reshape(batch_size, repeat_num, -1, 3, 3)
s['scores'] = scores
for s_Dm in smpl_output_Dm: #
s_Dm['theta_forDM'] = s_Dm['theta'].reshape(batch_size, seqlen, -1)
return smpl_output, scores, smpl_output_Dm #
| MPS-Net/MPS-Net_release | lib/models/mpsnet.py | mpsnet.py | py | 8,322 | python | en | code | 81 | github-code | 90 |
18583945529 | n,a,b=map(int,input().split())
ans=0
for i in range(n):
i+=1
ii=str(i)
cnt=0
for j in ii:
cnt+=int(j)
if a<=cnt<=b:
ans+=int(ii)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03478/s003805913.py | s003805913.py | py | 175 | python | en | code | 0 | github-code | 90 |
24225375866 | import flask
from flask import Flask
from flask import jsonify
from flask import request
from PIL import Image
import io
import os
from io import BytesIO
import flask
import json
from flask_cors import CORS
import requests
import torch
from fastai import *
from fastai.vision import load_learner
from fastai.vision import open_image
app = Flask(__name__)
CORS(app)
PROJECT_HOME = os.path.dirname(os.path.realpath(__file__))
learn = load_learner("",'5D_SIN_Resnet50_SGD.pkl')
@app.route("/predictnewimage", methods=["POST"])
def predictnewimage():
print('first')
resp = {"success": False}
# ensure an image was properly uploaded to our endpoint
if flask.request.method == "POST":
print('second')
if flask.request.files.get("pic"):
print("has pic")
# read the image in PIL format
img = flask.request.files["pic"].read()
#print("img>>>>",ig)
img_open = Image.open(io.BytesIO(img))
img_open.save("./raw_image.jpg")
np_image = open_image("./raw_image.jpg")
#resp["predictions"] = []
resp =[]
resp = learn.predict(np_image)
print(resp[2])
probability = resp[2].numpy()
defect_name = ['Broken End','Broken Pick','Missing Pick','No Defect','Rub Mark','Starting Mark']
resp = dict(zip(defect_name, probability))
print(resp)
for key in resp:
resp[key] = round(resp[key]*100 ,2)
return json.dumps(str(resp))
else:
return "Where is the image?"
@app.route("/predict", methods=["POST"])
def predict():
print('first')
# initialize the data dictionary that will be returned from the
# view
resp = {"success": False}
# ensure an image was properly uploaded to our endpoint
if flask.request.method == "POST":
print('second')
print("has pic")
url= request.data
print(url)
response = requests.get(url)
print(response)
img_open = Image.open(BytesIO(response.content))
img_open.save("./raw_image.jpg")
np_image = open_image("./raw_image.jpg")
#resp["predictions"] = []
resp =[]
resp = learn.predict(np_image)
print(resp[2])
probability = resp[2].numpy()
defect_name = ['Broken End','Broken Pick','Missing Pick','No Defect','Rub Mark','Starting Mark']
resp = dict(zip(defect_name, probability))
print('before multiflication: ',resp)
for key in resp:
resp[key] = round(resp[key]*100 ,2)
print('After multiflication :',resp)
return json.dumps(str(resp))
else:
return "Where is the image?"
if __name__ == "__main__":
app.run(host='0.0.0.0',port=5010, debug=False)
| akkiaffine/Raymonds_Fabric | fabrication.py | fabrication.py | py | 3,001 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.