seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
26694182467 | from collections import deque
# Challenge 1
def balanced_symbols(input_string: str="")->bool:
"""
Checks whether an input string is properly balanced with respect to '(' and ')',
'[' and ']' and '{' and '}'. The function ignores the symbols within comments, a comment starts
with a '/*' and end with '*/'.
Parameters
----------
input_string: str
Input string to evaluate.
Returns
-------
bool
True if the 'input_string' is balanced, False otherwise.
Examples
--------
>>> balanced_symbols("({[()[]]})")
True
>>> balanced_symbols("/* abcd /* efgh */ ijkl */")
False
>>> balanced_symbols("/*")
False
"""
if type(input_string) != str:
raise TypeError("The parameter 'input_string' should be a string.")
opening = set('([{')
closing = set(')]}')
matches = set([('(', ')'), ('[', ']'), ('{', '}')])
stack = []
i = 0
inside_comment = False
while i < len(input_string):
if(input_string[i] == "/") and ((i+1) < len(input_string)) and not inside_comment:
if input_string[i+1] == "*":
inside_comment = True
i += 1
elif(input_string[i] == "*") and ((i+1) < len(input_string)) and inside_comment:
if input_string[i+1] == "/":
inside_comment = False
i +=1
elif(input_string[i] == "*") and ((i+1) < len(input_string)) and not inside_comment:
return False
elif(input_string[i] in opening) and not inside_comment:
stack.append(input_string[i])
elif(input_string[i] in closing) and not inside_comment:
if len(stack) == 0:
return False
elif (stack[-1], input_string[i]) in matches:
stack.pop()
else:
return False
i += 1
if len(stack) == 0 and not inside_comment:
return True
else:
return False
# Challenge 2
def cost_prorating(cost: int, weights : list)->list:
"""
Returns the distribution of a cost proportionally to a set of weights, maintaining the order
of the input weights. The total amount of the distribution is the input cost.
Parameters
----------
cost: int
Cost to distribute.
weights: list
Set of weigths.
Returns
-------
list
Cost distribution as a list of integers.
Examples
--------
>>> cost_prorating(10, [2, 5])
[3, 7]
>>> cost_prorating(10, [1, 0])
[10, 0]
>>> cost_prorating(123, [1, 2, 3, 4, 5, 6])
[6, 12, 18, 23, 29, 35]
"""
if type(cost) != int:
raise TypeError("The parameter 'cost' should be an int.")
if cost < 0:
raise ValueError("The cost should be greater or equal to zero.")
if type(weights) != list:
raise TypeError("The parameter 'weights' should be a list of integers.")
if len(weights) == 0:
raise ValueError("The 'weights' list should not be empty.")
all_zero = True
for weight in weights:
if type(weight) != int:
raise TypeError("Each of the weights should be integers.")
if weight < 0:
raise ValueError("Each of the weights should be greater or equal to 0.")
if weight != 0:
all_zero = False
if all_zero:
raise ValueError("There should be at least one non-zero weight.")
rounded_cost_distribution, relative_rounding_errors = [], []
weights_sum = sum(weights)
for weight in weights:
rounded_cost_distribution.append(round(cost/weights_sum*weight))
if weight == 0:
relative_rounding_errors.append(0)
else:
relative_rounding_errors.append((cost/weights_sum*weight-round(cost/weights_sum*weight))/weight)
diff = sum(rounded_cost_distribution) - cost
while diff != 0:
min_error, max_error = 1, -1
idx = 0
if diff > 0:
for i in range(len(relative_rounding_errors)):
if min_error > relative_rounding_errors[i]:
min_error = relative_rounding_errors[i]
idx = i
rounded_cost_distribution[idx] -= 1
relative_rounding_errors[idx] = 0
else:
for i in range(len(relative_rounding_errors)):
if max_error < relative_rounding_errors[i]:
max_error = relative_rounding_errors[i]
idx = i
rounded_cost_distribution[idx] += 1
relative_rounding_errors[idx] = 0
diff = sum(rounded_cost_distribution) - cost
return rounded_cost_distribution
# Challenge 3
def water_jugs(target:float, capacities: list) -> str:
"""
Given a set of jugs' capacities and a target volume, this function returns a minimal sequence
of pouring operations to reach a state where at least one jug has the target volume of water in it.
If no sequence exists, it returns None.
BFS algorithm is used for this implementation of N-water jugs.
Parameters
----------
target: float
Target volume of water.
capacities: list
Set of jugs' capacities.
Returns
-------
str
The minimum steps in format source -> destination : states.
Where:
source: the jug to pour from, or -1 to fill from the faucet
destination: the jug to pour into, or -1 to empty into the sink
states: the state of each jug, i.e. its volume of water, after the pouring operation
Examples
--------
>>> water_jugs(target=4, capacities=[3, 5])
-1 -> 1 : (0, 5)
1 -> 0 : (3, 2)
0 -> -1 : (0, 2)
1 -> 0 : (2, 0)
-1 -> 1 : (2, 5)
1 -> 0 : (3, 4)
>>> water_jugs(target=15, capacities=[1, 2, 10, 20])
-1 -> 3 : (0, 0, 0, 20)
3 -> 0 : (1, 0, 0, 19)
3 -> 1 : (1, 2, 0, 17)
1 -> -1 : (1, 0, 0, 17)
3 -> 1 : (1, 2, 0, 15)
"""
if type(target) != int and type(target) != float:
raise TypeError("The parameter 'target' should be a float.")
if target <= 0:
raise ValueError("The 'target' should be greater than zero.")
if type(capacities) != list:
raise TypeError("The parameter 'capacities' should be a list of integers.")
if len(capacities) == 0:
raise ValueError("The 'capacities' list should not be empty.")
for capacity in capacities:
if type(capacity) != int and type(capacity) != float:
raise TypeError("Each of the jugs' capacities should be float or int.")
if capacity <= 0:
raise ValueError("Each of the weights should be greater than 0.")
def fill_a_jug(state, capacities):
possible_states, prev_states, pouring_ops = [], [], []
for i in range(len(capacities)):
temp = list(state)
temp[i] = capacities[i]
possible_states.append(temp)
prev_states.append(state)
pouring_ops.append([-1, i])
return possible_states, prev_states, pouring_ops
def empty_a_jug(state):
possible_states, prev_states, pouring_ops = [], [], []
for i in range(len(state)):
temp = list(state)
temp[i] = 0
possible_states.append(temp)
prev_states.append(state)
pouring_ops.append([i, -1])
return possible_states, prev_states, pouring_ops
def pour_a_jug_into_another(state, capacities):
possible_states, prev_states, pouring_ops = [], [], []
for i in range(len(state)):
for e in range(len(state)):
temp = list(state)
if e == i: continue
diff = min(state[i], capacities[e] - temp[e])
temp[i] = temp[i] - diff
temp[e] = temp[e] + diff
possible_states.append(temp)
prev_states.append(state)
pouring_ops.append([i, e])
return possible_states, prev_states, pouring_ops
states_queue = deque()
ops_queue = deque()
prevs_queue = deque()
path = []
ops = []
prevs = []
searched = []
solution = ""
states_queue.append([0 for i in range(len(capacities))])
ops_queue.append([0 for i in range(len(capacities))])
prevs_queue.append([0, 0])
while len(states_queue) > 0:
state = states_queue.popleft()
op = ops_queue.popleft()
prev = prevs_queue.popleft()
if state in searched:
continue
path.append(state)
ops.append(op)
prevs.append(prev)
searched.append(state)
for s in state:
if s == target:
i = len(path) - 1
while i != 0:
state_tuple = tuple(path[i])
solution = "{} -> {} : {}\n".format(ops[i][0], ops[i][1], state_tuple) + solution
i = path.index(prevs[i])
return solution
# fill a jug from the faucet
possible_states, prev_states, pouring_ops = fill_a_jug(state, capacities)
for possible_state, prev_state, pouring_op in zip(possible_states, prev_states, pouring_ops):
states_queue.append(possible_state)
prevs_queue.append(prev_state)
ops_queue.append(pouring_op)
# empty a jug in the sink
possible_states, prev_states, pouring_ops = empty_a_jug(state)
for possible_state, prev_state, pouring_op in zip(possible_states, prev_states, pouring_ops):
states_queue.append(possible_state)
prevs_queue.append(prev_state)
ops_queue.append(pouring_op)
# pour a jug into another jug
possible_states, prev_states, pouring_ops = pour_a_jug_into_another(state, capacities)
for possible_state, prev_state, pouring_op in zip(possible_states, prev_states, pouring_ops):
states_queue.append(possible_state)
prevs_queue.append(prev_state)
ops_queue.append(pouring_op)
| alvarodiez20/challenge_adiez | cats_and_cheese.py | cats_and_cheese.py | py | 10,495 | python | en | code | 0 | github-code | 90 |
38533972677 | import pygame
import random
pygame.init()
clock = pygame.time.Clock()
WIN_WIDTH = 280
WIN_HEIGHT = 500
FPS = 1
BLACK = (0,0,0)
GROUND_HEIGHT = 400
GROUND_SPEED = 2
SWING = 20
background_image = pygame.image.load("background.png")
ground_image = pygame.image.load("ground.png")
pipe_image = pygame.image.load("pipe.png")
bird1_image = pygame.image.load("bird1.png")
bird2_image = pygame.image.load("bird2.png")
bird3_image = pygame.image.load("bird3.png")
rotaded_pipe = pygame.transform.rotate(pipe_image, 180)
window_size = (WIN_WIDTH, WIN_HEIGHT)
screen = pygame.display.set_mode(window_size)
class FlappyBird:
def __init__(self):
self.x = WIN_WIDTH / 3
self.drawBackground()
self.choose_bird = 0
self.move_ground = 0
self.swing = 0
self.pipe_x = 400
self.pipe = []
self.pipe_up = []
self.next_pipe = 100
def reset(self):
self.y = WIN_HEIGHT / 2
self.score = 0
self.new_pipe = 0
def drawBird(self):
if pygame.display.get_surface() is None:
return
if self.choose_bird == 0:
screen.blit(bird1_image, (self.x, self.y))
self.swing += 1
if self.swing == SWING:
self.choose_bird = 1
self.swing = 0
elif self.choose_bird == 1:
screen.blit(bird2_image, (self.x,self.y))
self.swing += 1
if self.swing == SWING:
self.choose_bird = 2
self.swing = 0
elif self.choose_bird == 2:
screen.blit(bird3_image, (self.x,self.y))
self.swing += 1
if self.swing == SWING:
self.choose_bird = 3
self.swing = 0
elif self.choose_bird == 3:
screen.blit(bird2_image,(self.x,self.y))
self.swing += 1
if self.swing == SWING:
self.choose_bird = 0
self.swing = 0
else:
screen.blit(bird1_image, (self.x, self.y))
def drawPipe(self):
self.pipe_y = random.randint(100, 380)
self.pipe.append((self.pipe_x, self.pipe_y))
self.pipe_up.append((self.pipe_x, self.pipe_y - 430))
for pipes_list in self.pipe:
screen.blit(pipe_image, (pipes_list[0], pipes_list[1]))
for pipesup_list in self.pipe_up:
screen.blit(rotaded_pipe, (pipesup_list[0], pipesup_list[1]))
def movePipe(self):
for pipes_list in self.pipe:
index = self.pipe.index(pipes_list)
self.pipe[index] = (pipes_list[0] - int(GROUND_SPEED), pipes_list[1])
screen.blit(pipe_image, (pipes_list[0], pipes_list[1]))
if pipes_list[0] < -50:
del self.pipe[index]
def movePipeUp(self):
for pipes_up in self.pipe_up:
index_up = self.pipe_up.index(pipes_up)
self.pipe_up[index_up] = (pipes_up[0] - int(GROUND_SPEED), pipes_up[1])
screen.blit(rotaded_pipe, (pipes_up[0], pipes_up[1]))
if pipes_up[0] < -50:
del self.pipe_up[index_up]
def drawBackground(self):
screen.blit(background_image, (0, 0))
def drawGround(self):
screen.blit(ground_image, (0 - self.move_ground,GROUND_HEIGHT))
screen.blit(ground_image, (WIN_WIDTH - 15 - self.move_ground,GROUND_HEIGHT))
self.move_ground += GROUND_SPEED
if (self.move_ground > WIN_WIDTH):
self.move_ground = 0
def drawScore(self):
self.font = pygame.font.Font(None, 25)
self.score_text = self.font.render(f"Score: {self.score}", True, BLACK)
screen.blit(self.score_text, (10,10))
def game_loop(self):
clock.tick(FPS)
self.reset()
self.running = True
while self.running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
pygame.quit()
self.drawBackground()
self.drawBird()
self.movePipe()
self.movePipeUp()
if self.new_pipe == self.next_pipe:
self.drawPipe()
self.new_pipe = 0
self.new_pipe += 1
self.drawGround()
self.drawScore()
pygame.display.update()
def main():
game = FlappyBird()
game.game_loop()
main()
| Poppynator/Flappy-Bird | game.py | game.py | py | 4,435 | python | en | code | 0 | github-code | 90 |
39603083509 | # Input: deadends = ["0201","0101","0102","1212","2002"], target = "0202"
# Output: 6
# Explanation:
# A sequence of valid moves would be "0000" -> "1000" -> "1100" -> "1200" -> "1201" -> "1202" -> "0202".
# Note that a sequence like "0000" -> "0001" -> "0002" -> "0102" -> "0202" would be invalid,
# because the wheels of the lock become stuck after the display becomes the dead end "0102".
from collections import deque
from typing import List
import copy
class Solution:
REAL_TARGET = [0, 0, 0, 0]
def openLock(self, deadends: List[str], target: str) -> int:
real_deadends = []
for deadend in deadends:
real_deadends.append([int(digit) for digit in list(deadend)])
root = [int(digit) for digit in list(target)]
print(root)
total_trials = 0
queue = deque([root])
while queue:
current_combo = queue.pop()
if current_combo == self.REAL_TARGET:
return total_trials
inc_combos = self.inc_by_one(current_combo)
for combo in inc_combos:
if combo != real_deadends:
queue.appendleft(combo)
dec_combos = self.dec_by_one(current_combo)
for combo in dec_combos:
if combo != real_deadends:
queue.appendleft(combo)
total_trials += 1
def inc_by_one(self, current_combo):
inc_combos = []
for i in range(4):
combo_copy = copy.deepcopy(current_combo)
print(combo_copy)
if combo_copy[i] == 9:
combo_copy[i] = 0
else:
combo_copy[i] += 1
inc_combos.append(combo_copy)
return inc_combos
def dec_by_one(self, current_combo):
dec_combos = []
for i in range(4):
combo_copy = copy.deepcopy(current_combo)
if combo_copy[i] == 0:
combo_copy[i] = 9
else:
combo_copy[i] -= 1
dec_combos.append(combo_copy)
return dec_combos
solution = Solution()
print(solution.openLock(["0201", "0101", "0102", "1212", "2002"], "0202"))
# print(solution.inc_by_one([1, 2, 3, 4]))
# print(solution.dec_by_one([1, 2, 3, 4]))
| konstantinosBlatsoukasRepo/leet-code-problems | bfs/open_the_lock.py | open_the_lock.py | py | 2,271 | python | en | code | 0 | github-code | 90 |
13334870434 | from rest_framework.serializers import\
ModelSerializer, SerializerMethodField
from recommendations.models import *
class RCSerializer(ModelSerializer):
class Meta:
fields = (
'id',
'title',
'description',
'image',
)
model = RecommendationCategory
class SimpleRecommendationSerializer(ModelSerializer):
image = SerializerMethodField()
def get_image(self, obj):
return obj.category.image
class Meta:
fields = (
'id',
'image',
'category',
)
model = Recommendation
class RecommendationSerializer(ModelSerializer):
class Meta:
fields = (
'id',
'title',
'category',
'brief',
'html_code',
)
model = Recommendation
class PRSerializer(ModelSerializer):
recommendation = SimpleRecommendationSerializer()
title = SerializerMethodField()
description = SerializerMethodField()
image = SerializerMethodField()
html_code = SerializerMethodField()
def get_title(self, obj):
return obj.recommendation.title
def get_description(self, obj):
return obj.recommendation.brief
def get_image(self, obj):
return obj.recommendation.category.image.url
def get_html_code(self, obj):
return obj.recommendation.html_code
class Meta:
fields = (
'id',
'recommendation',
'favorites_flag',
'title',
'description',
'image',
'user',
'reason',
'html_code',
)
model = PersonalRecommendation | madjar-code/Career-Routes | backend/apps/recommendations/api/serializers.py | serializers.py | py | 1,724 | python | en | code | 0 | github-code | 90 |
20532253195 | # Devin Hurley
#
import math, pylab, random
def fnc(x,mu):
y = mu*x*(1-x)
return y
random.seed()
N = 500.0
mu = [0.0]*500*20
ex = [0.0]*500*20
delMu = 0.01 ## move very slowly
for i in range(500):
m = i*delMu
for j in range(20):
x = random.random()
for k in range(300):
x = fnc(x,m)
mu[20*i + j] = m
ex[20*i + j] = x
"""
mu = 4.0
N = 100.0
delX = 1/N
x = [0.0]*100
y = [0.0]*100
# compute the function on these points
for i in range(100):
x[i] = i*delX
y[i] = mu*(mu*x[i]*(1.0-x[i]))*(1.0-(mu*x[i]*(1.0-x[i])))
"""
pylab.plot(mu,ex,'.')
pylab.show()
| dhurley14/CSIS310 | logMap2.py | logMap2.py | py | 625 | python | en | code | 0 | github-code | 90 |
24657271425 | ''''Given an integer array nums, rotate the array to the right by k steps, where k is non-negative.
Example 1:
Input: nums = [1,2,3,4,5,6,7], k = 3
Output: [5,6,7,1,2,3,4]
Explanation:
rotate 1 steps to the right: [7,1,2,3,4,5,6]
rotate 2 steps to the right: [6,7,1,2,3,4,5]
rotate 3 steps to the right: [5,6,7,1,2,3,4]
Example 2:
Input: nums = [-1,-100,3,99], k = 2
Output: [3,99,-1,-100]
Explanation:
rotate 1 steps to the right: [99,-1,-100,3]
rotate 2 steps to the right: [3,99,-1,-100]'''
class Solution:
def rotate(self, nums, k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
# for i in range(k):
# print(nums)
# last = nums[-1]
# for n in range(len(nums[:-1]), 0, -1):
# nums[n] = nums[n-1]
# nums[0] = last
# print(nums)
temp = []
k = k % len(nums) # the remainder decides how much we need to shift over the numbers
print(k)
temp = nums[-k:] # we set the last numbers to temp variable
print(temp)
print(nums[k:])
print(nums[:-k])
nums[k:] = nums[:-k] # we swap the end with the beginning
print(nums)
nums[:k] = temp # we set the beginning to temp (last numbers)
nums[:k]
print(nums)
sol = Solution()
sol.rotate([1,2,3,4,5,6,7], 30) | abz1997/algorithms | rotate_array.py | rotate_array.py | py | 1,384 | python | en | code | 0 | github-code | 90 |
12874205246 | # -*- coding: utf-8 -*-
""" Group filters """
from django import template
from django.core.urlresolvers import reverse
from django.core.context_processors import request
from django.db.models import Count
from gorod.models import ArticleRubric
register = template.Library()
@register.inclusion_tag('gorod/templatetags/group_filters.html', takes_context=True)
def group_filters(context, rubric):
"""
Display group filters menu
"""
city = context['city']
filters = []
url_named_params = context['request'].resolver_match.kwargs
for filter_name, filter_title in ArticleRubric.FILTERS:
# Adding about (city info module)
filters.append({
'title': filter_title,
'url_name': filter_name,
'link': reverse('gorod:feed-rubric-filter', kwargs={
'city_name': city.name,
'rubric_name': rubric.name,
'filter_name': filter_name
}),
'is_active': filter_name == url_named_params.get('filter_name')
})
return {'filters': filters}
| karbachinsky/gorod_io | gorod/templatetags/group_filters.py | group_filters.py | py | 1,094 | python | en | code | 1 | github-code | 90 |
30593423473 | #!/usr/bin/env python
from __future__ import unicode_literals
import codecs
import numpy as np
def pad(sequences, pad_token='<pad>', pad_left=False):
"""
input sequences is a list of text sequence [[str]]
pad each text sequence to the length of the longest
:param sequences:
:param pad_token:
:param pad_left:
:return:
"""
# max_len = max(5,max(len(seq) for seq in sequences))
max_len = max(len(seq) for seq in sequences)
if pad_left:
return [[pad_token]*(max_len-len(seq)) + seq for seq in sequences]
return [seq + [pad_token]*(max_len-len(seq)) for seq in sequences]
def load_embedding_npz(path):
data = np.load(path)
return [str(w) for w in data['words']], data['vals']
def load_embedding_txt(path):
words = []
vals = []
with codecs.open(path, 'r', encoding='utf-8') as fin:
fin.readline()
for line in fin:
line = line.strip()
if line:
parts = line.split()
words.append(parts[0])
vals += [float(x) for x in parts[1:]] # equal to append
return words, np.asarray(vals).reshape(len(words), -1) # reshape
def load_embedding(path):
if path.endswith(".npz"):
return load_embedding_npz(path)
else:
return load_embedding_txt(path)
| xalanq/chinese-sentiment-classification | elmoformanylangs/dataloader.py | dataloader.py | py | 1,233 | python | en | code | 131 | github-code | 90 |
26327061955 | import sys
import io
sys.setrecursionlimit(10**8)
_INPUT = """\
200000 314 318
"""
sys.stdin = io.StringIO(_INPUT)
readline = sys.stdin.readline
N, M, P = map(int, input().split())
print(int((N-M) / P + 1))
| Amano-take/Atcoder | 300/10/318/A.py | A.py | py | 207 | python | en | code | 0 | github-code | 90 |
5173612111 | import random as rnd
suits = ('Hearts', 'Diamonds', 'Spades', 'Clubs')
numbers = ('2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A')
values = {'2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, '10':10, 'J':10,
'Q':10, 'K':10, 'A':11}
playing = True
# Creating Cards #
class Card:
def __init__(self, suit, number):
self.suit = suit
self.number = number
def __str__(self):
return self.number + ' of ' + self.suit
# Creating the Deck that shuffles automatically and does single dealing #
class Deck:
def __init__(self):
self.deck = []
for suit in suits:
for number in numbers:
self.deck.append(Card(suit, number))
def __str__(self):
deck_comp = ''
for card in self.deck:
deck_comp += '\n' + card.__str__()
return 'The deck has' + deck_comp
def shuffle(self):
rnd.shuffle(self.deck)
def deal(self):
single_card = self.deck.pop()
return single_card
# Creating a hand #
class Hand:
def __init__(self):
self.cards = []
self.value = 0
self.aces = 0
def add_card(self,card):
self.cards.append(card)
self.value += values[card.number]
if card.number == 'Ace':
self.aces += 1
def adjust_for_ace(self):
while self.value > 21 and self.aces:
self.value -= 10
self.aces -= 1
# Creating a betting system as well as money balance system for the player #
class Money:
def __init__(self):
self.total = 1000 # User default value is set to a thousand dollars player can only bet wthin this amount initially
self.bet = 0
def win_bet(self):
self.total += self.bet
def lose_bet(self):
self.total -= self.bet
# Defining the bet taking aspect of the game #
def recieve_bet(Money):
while True:
try:
Money.bet = int(input('How much money would you like to bet? $ '))
except ValueError:
print("Sorry, a bet must be an integer!")
else:
if Money.bet > Money.total:
print('Sorry, your bet cannot exceed {} '.format(Money.total))
else:
break
# Defining a hit #
def hit(deck,hand):
hand.add_card(deck.deal())
hand.adjust_for_ace()
# Defining decision to hit or stand #
def hit_or_stand(deck,hand):
global playing
while True:
x = input("Would you like to Hit or Stand? Enter 'h' or 's'")
if x[0].lower() == 'h':
hit(deck,hand)
elif x[0].lower() == 's':
print("Dealer is playing.")
playing = False
else:
print("Sorry, please choose again.")
continue
break
# Defining the functions that will display the cards #
def show_some(player,dealer):
print("\nDealer's Hand")
print("")
print(' ', dealer.cards[1])
print("\nPlayer's Hand: ", *player.cards, sep= '\n')
def show_all(player,dealer):
print("\nDealer's Hand:", *dealer.cards, sep="\n")
print("Dealer's Hand =",dealer.value)
print("\nPlayer's Hand: ", *player.cards, sep= '\n')
print("Player's Hand = ", player.value)
# Defining the functions that react to game scenarios #
def player_busts(player,dealer,money):
print("It's a bust!")
money.lose_bet()
def player_wins(player,dealer,money):
print("You win!")
money.win_bet()
def dealer_busts(player,dealer,money):
print("Dealer busts!")
money.win_bet()
def dealer_wins(player,dealer,money):
print("Dealer wins!")
money.lose_bet()
def push(player,dealer):
print("You and the dealer have tied! It's a push.")
# Creating the game #
while True:
# Opening statement
print("Welcome to the Blackjack game at the Foundations Casino!")
# Creating & shuffling the deck as well as dealing two cards to each player
deck = Deck()
deck.shuffle()
player_hand = Hand()
player_hand.add_card(deck.deal())
player_hand.add_card(deck.deal())
dealer_hand = Hand()
dealer_hand.add_card(deck.deal())
dealer_hand.add_card(deck.deal())
# Setting up the player's money
player_money = Money()
# Prompting the Player for their bet
recieve_bet(player_money)
# Shows player the cards but keeps one dealer card hidden
show_some(player_hand, dealer_hand)
while playing:
# Prompting player to hit or stand
hit_or_stand(deck, player_hand)
show_some(player_hand,dealer_hand)
# If the player's hand exceeds 21, the player has lost and the loop breaks
if player_hand.value >21:
player_busts(player_hand, dealer_hand, player_money)
break
# If Player hasn't lost, play Dealer's hand until Dealer reaches 17
if player_hand.value <= 21:
while dealer_hand.value <17:
hit(deck, dealer_hand)
# Shows all the cards
show_all(player_hand,dealer_hand)
if dealer_hand.value > 21:
dealer_busts(player_hand,dealer_hand,player_money)
elif dealer_hand.value > player_hand.value:
dealer_wins(player_hand,dealer_hand,player_money)
elif dealer_hand.value < player_hand.value:
player_wins(player_hand,dealer_hand,player_money)
else:
push(player_hand,dealer_hand)
# Inform Player of their money balance
print("\nMoney Balance:", player_money.total)
# Ask to play again
new_game = input("Would you like to continue playing? Enter 'y' or 'n'")
if new_game[0].lower() == 'y':
playing = True
continue
else:
print('Thank you for playing at the Foundations Casino! ')
break | ba1019/resume-projects | Blackjack.py | Blackjack.py | py | 6,011 | python | en | code | 0 | github-code | 90 |
41154446861 | from rest_framework import serializers
from faces.models import Event, EventNotification
class EventSerializer(serializers.ModelSerializer):
"""Event serializer to read."""
class Meta:
model = Event
fields = (
'id',
'img',
'confidence',
'meta',
'camera',
'detector',
'datetime',
'face_link',
)
class EventNotificationSerializer(serializers.ModelSerializer):
"""Event Notification serializer to read."""
event = EventSerializer()
class Meta:
model = EventNotification
fields = (
'id',
'event',
'is_read',
)
| vitasoftua/findface | src/faces/ws_serializers.py | ws_serializers.py | py | 714 | python | en | code | 0 | github-code | 90 |
35729198528 | #1. Дано целое число N(>0). Найти значение выражения
# 1.1-1.2+1.3-...(N слагаемых, знаки чередуются).
# Условный оператор не использовать.
# term_p_sum = 0
# term_p = 1.1
# term_o_sum = 0
# term_o = -1.2
# N_num=int(input("Введите число больше 0: "))
# for i in range(0, N_num // 2):
# term_p_sum += term_p
# term_p += 0.2
# for i in range(0, N_num // 2):
# term_o_sum += term_o
# term_o -= 0.2
# value = (term_p_sum + term_o_sum) + (term_p * (N_num % 2))
# print(value)
#Дополнительный вариант
znak = -1
term = 1
next_term = 0
value = 0
try:
N_num=int(input("Введите число больше 0: "))
for i in range(0, N_num):
znak = -znak
term += 0.1
next_term = term * znak
value += next_term
print("Значение выражения: ", value)
except:
print("Введите целое число!") | Xqyat/PZ.py | Pz_4/Pz_4.py | Pz_4.py | py | 1,002 | python | ru | code | 0 | github-code | 90 |
9662442092 | import sys, json, os
from threading import Thread
from threading import Semaphore
import queue
writeLock = Semaphore(value=1)
in_queue = queue.Queue()
tree = json.loads(open(sys.argv[1]).read())
mapping = {}
for otu in tree:
for member in tree[otu]["member"]:
shortname = tree[otu]["member"][member].split(" ")[0]
mapping[shortname] = otu
#print mapping
cutoff = 99
def work():
while True:
packet = in_queue.get()
try:
#query name, best id, otu list
query_id_otu = {}
for line in open(packet, 'r'):
fields = line.split("\t")
query = fields[0]
subject = fields[1]
identity = float(fields[2])
#length = int(fields[3])
#evalue = float(fields[-2])
#best_id_of_query = 0
if query not in query_id_otu:
query_id_otu[query] = (identity, [])
otu = mapping[subject]
if otu not in query_id_otu[query][1]:
query_id_otu[query][1].append(otu)
else:
if identity == query_id_otu[query][0]:
otu = mapping[subject]
if otu not in query_id_otu[query][1]:
query_id_otu[query][1].append(otu)
writeLock.acquire()
print (json.dumps(query_id_otu))
writeLock.release()
except:
pass
finally:
in_queue.task_done()
for i in range(70):
t = Thread(target=work)
t.daemon = True
t.start()
top_folder = sys.argv[2]
for (head, dirs, files) in os.walk(top_folder):
for file in files:
if file.endswith(".txt"):
current_file_path = os.path.abspath(os.path.dirname(os.path.join(head, file)))
with_name = current_file_path + "/"+ file
in_queue.put(with_name)
in_queue.join() | dgg32/acido_tree | hpc_map_to_backbone.py | hpc_map_to_backbone.py | py | 2,114 | python | en | code | 0 | github-code | 90 |
43508713497 | import mmcv
import numpy as np
from os import path as osp
import cv2
from mmdet.core.visualization import imshow_gt_det_bboxes
def show_result_mtv2d(data_root,
out_dir,
result,
eval_thresh,
show=True,
show_gt=True,
show_pred=True,
draw_inst_by_inst=True,
tail='',
wait_time=0,
visible_thresh=.5,
show_query=False):
"""Convert results into format that is directly readable for meshlab.
Args:
points (np.ndarray): Points.
gt_bboxes (np.ndarray): Ground truth boxes.
pred_bboxes (np.ndarray): Predicted boxes.
out_dir (str): Path of output directory
filename (str): Filename of the current frame.
show (bool): Visualize the results online. Defaults to False.
snapshot (bool): Whether to save the online results. Defaults to False.
"""
scene_id, gt_bboxes, gt_is_valids, gt_cls, det_bboxes, det_is_valids, det_cls, det_score = result[:8]
query = result[-1]
if (det_score > eval_thresh).sum() == 0:
return
img_list = []
for i, (cam_gt_bboxes, cam_gt_is_valids, cam_det_bboxes, cam_det_is_valids) in enumerate(zip(gt_bboxes, gt_is_valids, det_bboxes, det_is_valids)) :
img_path = osp.join(data_root, '%s-%02d.jpg'%(scene_id, i+1))
img = mmcv.imread(img_path)
cam_gt_is_valids = cam_gt_is_valids > 0 #(num_gt, )
cam_gt_bboxes = cam_gt_bboxes[cam_gt_is_valids] #(num_gt, 4) #(cx cy w h)
cam_gt_cls = gt_cls[cam_gt_is_valids] #(num_gt, ) #str
cam_det_is_valids = (cam_det_is_valids>visible_thresh) & (det_score > eval_thresh) #(num_pred, )
cam_det_bboxes = cam_det_bboxes[cam_det_is_valids] #(num_pred, 4) #(cx cy w h)
cam_det_cls = det_cls[cam_det_is_valids] #(num_pred,) #str
cam_det_score = det_score[cam_det_is_valids] #(num_pred,) #float
cam_query = None if not show_query else query[i][cam_det_is_valids]
img = imshow_gt_det_bboxes(img, cam_gt_bboxes, cam_gt_cls, cam_det_bboxes, cam_det_cls, cam_det_score, show_gt, show_pred, show_query, cam_query)
img_list.append(img)
if len(img_list) == 9 :
height, width, channels = img.shape
combined_image = np.zeros((3 * height, 3 * width, channels), dtype=np.uint8)
for i in range(3):
for j in range(3):
img_index = i * 3 + j
combined_image[i * height:(i + 1) * height, j * width:(j + 1) * width, :] = img_list[img_index]
img = combined_image
else :
img = np.concatenate(img_list, axis=0)
result_path = osp.join(out_dir, '%s%s.jpg'%(scene_id, tail))
mmcv.imwrite(img, result_path)
if draw_inst_by_inst :
if show_pred :
for i in range(len(det_cls)) :
tail = '_%02d'%(i)
#show_gt=False
#result = scene_id, gt_bboxes, gt_is_valids, gt_cls, det_bboxes[:, i:i+1], det_is_valids[:, i:i+1], det_cls[i:i+1], det_score[i:i+1]
show_gt=True
result = [scene_id, gt_bboxes[:, i:i+1], gt_is_valids[:, i:i+1], gt_cls[i:i+1], det_bboxes[:, i:i+1], det_is_valids[:, i:i+1], det_cls[i:i+1], det_score[i:i+1]]
if show_query :
result.append(query[:, i:i+1])
show_result_mtv2d(data_root, out_dir, result, eval_thresh, show=show, show_gt=show_gt, show_pred=show_pred, draw_inst_by_inst=False, tail=tail, wait_time=wait_time, visible_thresh=visible_thresh, show_query=show_query)
else :
for i in range(len(gt_cls)) :
show_gt=True
tail = '_%02d'%(i)
result = [scene_id, gt_bboxes[:, i:i+1], gt_is_valids[:, i:i+1], gt_cls[i:i+1], det_bboxes, det_is_valids, det_cls, det_score]
show_result_mtv2d(data_root, out_dir, result, eval_thresh, show=show, show_gt=show_gt, show_pred=show_pred, draw_inst_by_inst=False, tail=tail, wait_time=wait_time, visible_thresh=visible_thresh)
def imshow_gt_det_bboxes(img, cam_gt_bboxes, cam_gt_cls, cam_det_bboxes, cam_det_cls, cam_det_score, show_gt=True, show_pred=True, show_query=False, cam_query=None):
# Make a copy of the image to draw bounding boxes on
img_with_bboxes = img.copy()
if show_gt :
# Draw ground truth bounding boxes
for bbox, cls in zip(cam_gt_bboxes, cam_gt_cls):
x, y, w, h = bbox
color = (0, 255, 0) # Green color for ground truth
cv2.rectangle(img_with_bboxes, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)), color, 3)
cv2.putText(img_with_bboxes, cls, (int(x - w / 2), int(y - h / 2) - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 3)
# Draw detected bounding boxes
if show_pred :
for bbox, cls, score in zip(cam_det_bboxes, cam_det_cls, cam_det_score):
x, y, w, h = bbox
color = (0, 0, 255) # Red color for detected
cv2.rectangle(img_with_bboxes, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)), color, 2)
cv2.putText(img_with_bboxes, f'{cls}: {score:.2f}', (int(x - w / 2), int(y - h / 2) - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
if show_query :
for query in cam_query:
query = query.astype('int')
color = (0, 0, 255) # Red color for detected
cv2.circle(img_with_bboxes, query, 10, color, -1) # Draw a filled red circle (dot)
return img_with_bboxes
| solapark/ddet | projects/mmdet3d_plugin/core/visualization/show_result_mtv2d.py | show_result_mtv2d.py | py | 5,670 | python | en | code | 0 | github-code | 90 |
70040773738 | import uuid
import datetime
from flask import json, jsonify, request
from app.main import db
from app.main.model import SmeUser
def save_new_sme_user(data):
sme_user = SmeUser.query.filter_by(email=data['email']).first()
if not sme_user:
new_sme_user = SmeUser(
role_id=2,
email=data['email'],
registered_on=datetime.datetime.utcnow(),
public_id=str(uuid.uuid4()),
password=data['password'],
first_name=data['first_name'],
last_name=data['last_name'],
phone=data['phone'],
date_of_birth=data['date_of_birth'],
address=data['address'],
city=data['city'],
country=data['country'],
zip_code=data['zip_code'],
status=data['status'],
gender=data['gender'],
national_id=data['national_id'],
user_role='admin'
)
db.session.add(new_sme_user)
db.session.commit()
return generate_token(new_sme_user)
else:
response_object = {
'status': 'fail',
'message': 'Sme User already exists. Please Log in.'
}
return response_object, 409
def get_all_sme_users():
return SmeUser.query.all()
def get_a_sme_user(id):
return SmeUser.query.get_or_404(id)
def generate_token(sme_user):
try:
# generate the auth token
auth_token = SmeUser.encode_auth_token(sme_user.id)
response_object = {
'status': 'success',
'message': 'Successfully registered.',
'Authorization': auth_token.decode(),
'data': {
'user_id': sme_user.id,
'status': sme_user.status,
'first_name': sme_user.first_name,
'last_name': sme_user.last_name,
'public_id': sme_user.public_id,
'gender': sme_user.gender,
'country': sme_user.country,
'national_id': sme_user.national_id,
'phone': sme_user.phone,
'date_of_birth': sme_user.date_of_birth,
'address': sme_user.address,
'password': sme_user.password_hash,
'email': sme_user.email,
'zip_code': sme_user.zip_code,
'role': sme_user.role_id,
'user_role': sme_user.user_role
}
}
return response_object, 201
except Exception as e:
response_object = {
'status': 'fail',
'message': 'Some error occurred. Please try again.'
}
return response_object, 401
def delete_a_sme_user(id):
sme_user = get_a_sme_user(id)
db.session.delete(sme_user)
db.session.commit()
try:
response_object = {
'status': 'success',
'message': 'Sme User Successfully deleted.'
}
return response_object, 204
except Exception as e:
response_object = {
'status': 'fail',
'message': 'Some error occurred. Please try again.'
}
return response_object, 401
def save_changes(data):
db.session.add(data)
db.session.commit()
| bmaritim/float-transfer-python | app/main/service/sme_user_service.py | sme_user_service.py | py | 3,231 | python | en | code | 0 | github-code | 90 |
21793053582 | import copy
# f = open('./input.txt')
# arr = [[int(x) for x in row.split()] for row in f.readlines()]
arr = []
try:
while True:
e = [int(x) for x in input().split()]
arr.append(e)
except EOFError:
pass
w = 0
h = 0
index = copy.deepcopy(arr)
for i in range(len(arr)):
for j in range(len(arr[0])):
if j - 1 >= 0 and index[i][j] != 0:
index[i][j] = index[i][j-1] + 1
# print(index)
res = 0
for i in range(len(arr)):
for j in range(len(arr[0])):
if index[i][j] == 0:
continue
up = i
down = i
while up >=0 and index[i][j] <= index[up][j]:
up -= 1
while down < len(arr) and index[i][j] <= index[down][j]:
down += 1
res = max(index[i][j]*(down-up-1), res)
print(res)
| kaiwk/playground | online_judge/nju/week-1/2_max_child_matrix.py | 2_max_child_matrix.py | py | 807 | python | en | code | 3 | github-code | 90 |
70803535338 | import warnings
from unicodedata import category
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup
from time import sleep
import datetime
import pandas as pd
import numpy as np
import requests
import axios
import json
# ignore warning message
warnings.filterwarnings("ignore")
# Initialize Crawler
options = webdriver.ChromeOptions()
options.add_experimental_option("excludeSwitches", ["enable-logging"])
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome("/usr/local/bin/chromedriver")
driver.implicitly_wait(3)
# crawling target(1)
url = "https://www.wanted.co.kr/wdlist?country=kr&job_sort=company.response_rate_order&years=-1&locations=all"
driver.get(url)
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
items = soup.select("#__next > div.JobList_cn__t_THp > div > div > div.List_List_container__JnQMS > ul > li > div")
# datas(1)
urls = []
for item in items:
url = item.select_one("a")["href"]
urls.append("https://wanted.co.kr" + url)
print("Urls : ")
print(urls)
print()
datas = []
for url in urls:
# crawling target(2)
driver.get(url)
#driver.implicitly_wait(30)
sleep(1)
# for dynamic contents, scroll the page 0 to 5000
driver.execute_script('window.scrollTo(0, 500);');sleep(0.3);driver.execute_script('window.scrollTo(500, 1000);');sleep(0.3)
driver.execute_script('window.scrollTo(1000, 1500);');sleep(0.3);driver.execute_script('window.scrollTo(1500, 2000);');sleep(0.3)
driver.execute_script('window.scrollTo(2000, 2500);');sleep(0.3);driver.execute_script('window.scrollTo(2500, 3000);');sleep(0.3)
driver.execute_script('window.scrollTo(3000, 3500);');sleep(0.3);driver.execute_script('window.scrollTo(3500, 4000);');sleep(1)
# scroll to the div that location and endDate data exist
try:
element = driver.find_element(By.CSS_SELECTOR, "#__next > div.JobDetail_cn__WezJh > div.JobDetail_contentWrapper__DQDB6 > div.JobDetail_relativeWrapper__F9DT5 > div > div.JobContent_descriptionWrapper__SM4UD > section.JobWorkPlace_className__ra6rp")
location = element.location_once_scrolled_into_view
driver.execute_script('arguments[0].scrollIntoView(true);', element)
sleep(2)
except:
pass
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
items = soup.select("#__next > div.JobDetail_cn__WezJh > div.JobDetail_contentWrapper__DQDB6 > div.JobDetail_relativeWrapper__F9DT5 > div.JobContent_className___ca57")
for item in items:
temp = []
# for missing value
name, position, description, task, qualifications, prefer, welfare, stack, endDate, location = "","","","","","","","","",""
try:
# get data
name = item.select_one("section.JobHeader_className__HttDA > div:nth-child(2) > h6 > a").get_text()
description = item.select_one("div.JobContent_descriptionWrapper__SM4UD > section.JobDescription_JobDescription__VWfcb > p:nth-child(1) > span").get_text()
position = soup.select_one("section.JobHeader_className__HttDA > h2").get_text()
task = soup.select_one("div.JobContent_descriptionWrapper__SM4UD > section > p:nth-child(3)").get_text()
qualifications = soup.select_one("div.JobContent_descriptionWrapper__SM4UD > section > p:nth-child(5)").get_text()
prefer = soup.select_one("div.JobContent_descriptionWrapper__SM4UD > section.JobDescription_JobDescription__VWfcb > p:nth-child(7)").get_text()
welfare = soup.select_one("div.JobContent_descriptionWrapper__SM4UD > section.JobDescription_JobDescription__VWfcb > p:nth-child(9)").get_text()
endDate = soup.select_one("div.JobContent_descriptionWrapper__SM4UD > section.JobWorkPlace_className__ra6rp > div:nth-child(1) > span.body").get_text()
location = soup.select_one("div.JobContent_descriptionWrapper__SM4UD > section.JobWorkPlace_className__ra6rp > div:nth-child(2) > span.body").get_text()
try:
stack = soup.select_one("div.JobContent_descriptionWrapper__SM4UD > section.JobDescription_JobDescription__VWfcb > p:nth-child(11)").get_text()
except:
pass
"""
try:
date_object = datetime.datetime.strptime(endDate, "%Y.%m.%d")
edate = date_object.strftime("%y%m%d") + "T000000"
now = datetime.datetime.now()
sdate = now.strftime("%Y%m%d") + "T000000"
jsondata = {
"title" : name,
"location" : location,
"description" : description,
"position" : position,
"startDate" : sdate,
"endDate" : edate,
"filename" : "filename11237678945"
}
headers = {"Content-Type" : "application/json; charset=utf-8"}
slackAppUrl = "http://server.arc1el.kr:2222/slack/sendMessage"
response = axios.post(slackAppUrl, data=json.dumps(jsondata), headers=headers)
print(response)
except:
print("can't send message. check the sdate, edate")
"""
except:
pass
finally:
# append to datas
temp.append(name)
temp.append(location)
temp.append(position)
temp.append(description)
temp.append(task)
temp.append(qualifications)
temp.append(prefer)
temp.append(welfare)
temp.append(endDate)
temp.append(stack)
datas.append(temp)
print("Data Length : " + str(len(datas)))
# convert python array -> numpy array -> pandas dataframe
datas_narray = np.array(datas)
dataframe = pd.DataFrame(datas_narray)
# rename columns
dataframe.columns = ["회사명", "위치", "포지션", "회사소개", "주요업무", "자격요견", "우대사항", "혜택 및 복지", "마감일", "기술스택"]
# save dataframe to csv using utf-8 encording
dataframe.to_csv("data.csv", mode='w', encoding="utf-8-sig")
"""
# send request to slack app-server
# example data
datas = [
{
"title" : "스윗코리아",
"location" : "서울시 강남구 삼성로 570",
"description" : "[스윗소개]스윗테크놀로지스는 미국 실리콘밸리에 본사를 둔 글로벌 테크 스타트업입니다. 채팅과 업무관리를 결합한 Swit은 'Google Workspace'나 'MS Office365' 'Zoom'등의 업무 필수 앱들과 완벽히 연동되고 전자결재, 프로젝트 관리등의 협업 기능등을 한곳에 모은 플랫폼으로, 단일 기능을 제공하던 기존 협업툴과는 다른 협업 운영체제 입니다. 스윗은 이런 차별화를 바탕으로 2019년 정식 버전 출시이래 글로벌 협업툴로 빠르게 성장하고 있습니다.또한 시리즈A 투자유치를 성공적으로 진행하며, 우수 인재분들을 채용 중에 있습니다. 글로벌 서비스를 함께 만들어갈 실력있는 예비 Switter분들의 지원을 기다립니다[합류 여정]• 1차 서류제출 → 코딩TEST → 2차 직무인터뷰(Online) → 3차 컬쳐핏인터뷰(Offline) → 처우협의/최종합격 → 입사- 해당공고는 수시채용으로, 우수인재 채용 완료시 조기에 마감될수 있습니다 - 각 전형 결과 안내는 모든 지원자분들에게 개별적으로 메일을 통해 안내해드리고 있습니다.- 지원자분의 이력 및 경력 사항에 따라 일부 전형 과정이 생략되거나 추가될 수 있습니다.- 수습기간 3개월[제출서류]• 경력 중심의 이력서 (필수) ※PDF파일 • 포트폴리오 (링크, 파일첨부 모두 가능)",
"position" : "백엔드 개발자 Backend Developer (3년이상)",
"startDate" : "20230122T130000",
"endDate" : "20230122T170000",
"filename" : "filename112345"
}
]
slackAppUrl = "http://server.arc1el.kr:2222/slack/sendMessage"
headers = {"Content-Type" : "application/json; charset=utf-8"}
response = requests.post(slackAppUrl, json=datas, headers=headers)
print(response)
*/
""" | Arc1el/2023AWSCloudBootcamp | Crawler/crawler.py | crawler.py | py | 8,473 | python | en | code | 0 | github-code | 90 |
20260528381 | # Good vs Evil
# https://www.codewars.com/kata/52761ee4cffbc69732000738
from unittest import TestCase
good_values = [1, 2, 3, 3, 4, 10]
evil_values = [1, 2, 2, 2, 3, 5, 10]
def evaluating(counts, target_values):
return sum([target_values[v[0]] * int(v[1]) for v in enumerate(counts.split(" "))])
def goodVsEvil(good, evil):
good_sum = evaluating(good, good_values)
evil_sum = evaluating(evil, evil_values)
result = "No victor on this battle field"
if good_sum > evil_sum:
result = "Good triumphs over Evil"
elif good_sum < evil_sum:
result = "Evil eradicates all trace of Good"
return f"Battle Result: {result}"
TestCase().assertEqual(goodVsEvil('1 1 1 1 1 1', '1 1 1 1 1 1 1'),
'Battle Result: Evil eradicates all trace of Good', 'Evil should win')
TestCase().assertEqual(goodVsEvil('0 0 0 0 0 10', '0 1 1 1 1 0 0'), 'Battle Result: Good triumphs over Evil',
'Good should win')
TestCase().assertEqual(goodVsEvil('1 0 0 0 0 0', '1 0 0 0 0 0 0'), 'Battle Result: No victor on this battle field',
'Should be a tie')
| polyglotm/coding-dojo | coding-challange/codewars/6kyu/2020-04-15~2020-06-01/good-vs-evil/good-vs-evil.py | good-vs-evil.py | py | 1,137 | python | en | code | 2 | github-code | 90 |
41330652624 | import logging
import sys
from logging.handlers import TimedRotatingFileHandler
import os
FORMATTER = logging.Formatter("%(asctime)s - %(name)-20s - %(lineno)d - %(levelname)-8s - %(message)s")
def get_logger(logger_name, log_file_path):
def configure_handlers():
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(FORMATTER)
logger.addHandler(console_handler)
file_handler = TimedRotatingFileHandler(log_file_path, when='midnight')
file_handler.setFormatter(FORMATTER)
logger.addHandler(file_handler)
def create_log_file():
log_dir = os.path.dirname(log_file_path)
if len(log_dir) != 0:
os.makedirs(log_dir, exist_ok=True)
logger = logging.getLogger(logger_name)
logger.propagate = False
logger.handlers.clear()
logger.setLevel(logging.DEBUG)
create_log_file()
configure_handlers()
return logger
| VLU19/sync-folders | logger.py | logger.py | py | 947 | python | en | code | 0 | github-code | 90 |
22126261186 | # You want to create secret messages which can be deciphered by the Decipher this! kata. Here are the conditions:
# Your message is a string containing space separated words.
# You need to encrypt each word in the message using the following rules:
# The first letter needs to be converted to its ASCII code.
# The second letter needs to be switched with the last letter
# Keepin' it simple: There are no special characters in input.
def encrypt_this(text):
newString=''
textSplit= text.split(' ')
wc=1
print(textSplit)
if textSplit[0] != '':
for text in textSplit:
newWord=[]
if len(text) == 2:
newWord.append(str(ord(text[0:1])))
newWord.append(text[-1])
newString += ''.join(newWord)
if wc < len(textSplit): newString+=' '
wc+=1
elif len(text) == 1:
newString += (str(ord(text[0:1])))
if wc < len(textSplit): newString+=' '
wc+=1
else:
newWord.append(str(ord(text[0:1])))
newWord.append(text[-1])
newWord.append(text[2:-1])
newWord.append(text[1:2])
newString += ''.join(newWord)
if wc < len(textSplit): newString+=' '
wc+=1
return newString
return '' | cloudkevin/codewars | encryptThis.py | encryptThis.py | py | 1,139 | python | en | code | 1 | github-code | 90 |
17569479276 | #!/usr/bin/python3
"""Script that takes our Github credentials"""
import requests
from sys import argv
from requests.auth import HTTPBasicAuth
if __name__ == '__main__':
response = requests.get('https://api.github.com/user',
auth=HTTPBasicAuth(argv[1], argv[2]))
if response.status_code == 200:
dictionary = response.json()
print(dictionary['id'])
else:
print(None)
| Miguel22247/holbertonschool-higher_level_programming | 0x11-python-network_1/10-my_github.py | 10-my_github.py | py | 433 | python | en | code | 2 | github-code | 90 |
12132504922 | from Cart import Cart
from ProductInventoryImpl import ProductInventoryImpl
from Product import Product
class CartImpl(Cart):
_userCart = {}
def getAllCartItems(self, userId):
if userId in self._userCart.keys():
return self._userCart[userId]
else:
raise Exception("User Not Found")
def getCartTotal(self, userId):
total = 0.0
if userId in self._userCart.keys():
for each_product in self._userCart[userId]:
total += each_product.getPrice()
return total
else:
raise Exception("User Not Found")
def addCartItem(self, userId, productId):
productInventory = ProductInventoryImpl()
if userId in self._userCart.keys():
self._userCart[userId].append(productInventory.getProduct(productId))
else:
self._userCart.update({userId : [productInventory.getProduct(productId)]})
def removeCartItem(self, userId, productId):
if userId in self._userCart.keys():
for each_product in self._userCart[userId]:
if each_product.getId() == productId:
self._userCart[userId].remove(each_product)
else:
raise Exception("User Not Found") | Vishesh-Mukherjee/Gdzone-V2 | src/main/CartImpl.py | CartImpl.py | py | 1,276 | python | en | code | 0 | github-code | 90 |
43718328981 | # dict = {'Emp': {'GK': {'ID': '001', 'Salary': '2000', 'Designation': 'ASE'},
# 'KK': {'ID': '002', 'Salary': '2500', 'Designation': 'Tech-Lead'},
# 'AK': {'ID': '003', 'Salary': '3000', 'Designation': 'Senior Tech-Lead'},
# 'OK': {'ID': '004', 'Salary': '4000', 'Designation': 'Manager'}}}
# type(dict)
# print(dict)
# type(dict)
#
# import pandas
# asDF=pandas.DataFrame(dict['Emp'])
# print(asDF)
# def func(x):
# return (lambda y: x + y)
# a = func(2)
# b = func(3)
# print(a(10))
# print(b(-1,9))
# my_list=[4,5,6,7,8,9]
# new_list=list(filter(lambda a:(a%2==0),my_list))
# print(new_list)
# my_list=[4,5,6,7,8,9]
# new_list=list(map(lambda a:(a%2==0),my_list))
# print(new_list)
# from functools import reduce
# my_list=[1,2,3,4,5]
# p=reduce(lambda a,b:a/b==0,my_list)
# print(p)
#-----------------------------------------------------------------------------------------------------------------------
def userinput1():
n = int(input("Enter value for n:"))
return n
def userinput2():
k = int(input("Enter value for k:"))
return k
def test(n, k):
# Checking k is prime or not
if k > 2:
for i in range(2, k):
if k % i == 0:
print("Entered invalid 'k' value")
break
else:
print("'k' is prime")
elif k == 2:
print("'k' is prime")
else:
print("Invalid 'k'")
original_list = []
for i in range(1, n + 1):
if (i % k == 0):
abc = i // k
while (abc % k == 0):
cba = abc // k
abc = cba
original_list.append(abc)
else:
original_list.append(i)
#print("original_list is:", original_list)
print(sum(original_list))
def result():
input1 = userinput1()
input2 = userinput2()
primecheck = test(input1, input2)
result()
| KrishnakanthSrikanth/Python_Simple_Projects | Test.py | Test.py | py | 1,990 | python | en | code | 0 | github-code | 90 |
34675700376 | import pandas as pd
import numpy as np
from tqdm import tqdm
import xmltodict, json
from itertools import product
from collections import defaultdict
from utils.utils import timer
import pickle
import pandas as pd
from deuces.evaluator import *
from deuces.deck import *
from collections import defaultdict, OrderedDict, Counter
from itertools import product
from copy import copy
from tqdm import tqdm
import random
from utils.utils import _enc, _dec, multiproc
import time
random.seed(int(time.time()))
all_hero_combs = [
'high_card',
'pair_2',
'pair_1',
'two_pairs',
'trips',
'straight',
'flush',
'nuts',
]
# *** nash-params ***
allin_as_custom = False
# *** nash-params ***
# k_bank_actions = [0.3, 0.5, 0.75, 1]
k_bank_actions = [0.3, 0.5, 0.8]
all_hero_board_actions = ['fold', 'check', 'call'] \
+ ['bet-' + (str(v) if v != 1 else 'bank') for v in k_bank_actions] + ['allin'] + ['none']
all_ranks = ['2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K', 'A']
all_suits = ['s', 'd', 'h', 'c']
all_deck = [r + s for r, s in list(product(all_ranks, all_suits))]
rank2idx = {card: idx for idx, card in enumerate(all_ranks)}
idx2rank = {idx: card for idx, card in enumerate(all_ranks)}
deuces2comb = {
'Straight Flush': 'nuts',
'Four of a Kind': 'nuts',
'Full House': 'nuts',
'Flush': 'flush',
'Straight': 'straight',
'Three of a Kind': 'trips',
'Two Pair': 'two_pairs',
'Pair': 'pair',
'High Card': 'high_card'
}
hero_comb_to_idx = {c: i for i, c in enumerate(all_hero_combs)}
idx_to_hero_comb = {i: c for i, c in enumerate(all_hero_combs)}
hero_board_act_to_idx = {a: i for i, a in enumerate(all_hero_board_actions)}
idx_to_hero_board_act = {i: a for i, a in enumerate(all_hero_board_actions)}
kbet_to_string = {v: 'bet-' + (str(v) if v != 1 else 'bank') for v in k_bank_actions}
street_order = ('preflop', 'flop', 'turn', 'river')
end_action_types = {'fold', 'allin'}
comb2deuces = {v: k for k, v in deuces2comb.items()}
evaluator = Evaluator()
# spectr_bins = 10
spectr_edges = {'nuts': [0, 322], 'flush': [323, 1599], 'straight': [1600, 1609], 'trips': [1610, 2467],
'two_pairs': [2468, 3325], 'pair': [3326, 6185], 'high_card': [6186, 7462]}
# *** nash-params ***
spectr_bins = {'nuts': 10, 'flush': 25, 'straight': 10, 'trips': 13,
'two_pairs': 20, 'pair': 13, 'high_card': 13}
boardcombs_to_combs = \
{'board_pair': 'pair_1',
'board_two_pairs': 'two_pairs',
'board_trips': 'trips',
'board_straight': 'straight',
'board_flash': 'flush',
'board_nuts': 'nuts',
}
def _board_spectr(board):
return _board_spectr_single(board)
def _f_spectr(mem, arr):
arr = _dec(arr)
board = [Card.new(c) for c in mem['board']]
for a in arr:
player_hand = [Card.new(a[:2]), Card.new(a[2:])]
rank = evaluator.evaluate(player_hand, board)
comb = evaluator.class_to_string(evaluator.get_rank_class(rank))
comb = deuces2comb[comb]
mem['comb_hist'][comb] += 1
mem['spectr'][comb].append(rank)
def _board_spectr_multi(board):
def _player_hands():
visited = set(board)
out = []
for player_i in range(len(all_deck)):
if all_deck[player_i] in visited:
continue
for player_j in range(player_i + 1, len(all_deck)):
if all_deck[player_j] in visited:
continue
out.append(all_deck[player_i] + all_deck[player_j])
return out
params = dict()
params['board'] = board
params['comb_hist'] = {c: 0 for c in set(deuces2comb.values())}
params['spectr'] = {c: [] for c in spectr_edges.keys()}
arr = _player_hands()
multiproc(_f_spectr, arr, params)
spectr, comb_hist = params['spectr'], params['comb_hist']
for comb_name in spectr:
spectr[comb_name] = np.histogram(spectr[comb_name], range=spectr_edges[comb_name], bins=spectr_bins[comb_name])[0]
return spectr, comb_hist
def _board_spectr_single(board):
visited = set(board)
board = [Card.new(c) for c in board]
comb_hist = {c: 0 for c in set(deuces2comb.values())}
spectr = {c: [] for c in spectr_edges.keys()}
for player_i in range(len(all_deck)):
if all_deck[player_i] in visited:
continue
for player_j in range(player_i + 1, len(all_deck)):
if all_deck[player_j] in visited:
continue
player_hand = [Card.new(all_deck[player_i]), Card.new(all_deck[player_j])]
rank = evaluator.evaluate(player_hand, board)
comb = evaluator.class_to_string(evaluator.get_rank_class(rank))
comb = deuces2comb[comb]
comb_hist[comb] += 1
spectr[comb].append(rank)
for comb_name in spectr:
spectr[comb_name] = np.histogram(spectr[comb_name], range=spectr_edges[comb_name], bins=spectr_bins[comb_name])[0]
return spectr, comb_hist
def evaluate(hero_hand, board):
hero_hand = [Card.new(hero_hand[0]), Card.new(hero_hand[1])]
board = [Card.new(c) for c in copy(board)]
return evaluator.evaluate(hero_hand, board)
def deuces_comb(pocket, board, add_subrank=False):
rank = evaluate(pocket, board)
comb = evaluator.class_to_string(evaluator.get_rank_class(rank))
comb = deuces2comb[comb]
if add_subrank == False:
return comb
else:
spectr = list(np.histogram([rank], range=spectr_edges[comb], bins=spectr_bins[comb])[0])
edges = spectr_edges[comb]
subrank = {'raw_rank': spectr.index(1),
'irr_rank': (rank - edges[0]) / (edges[1] - edges[0])}
return comb, subrank
def _hero_comb(pocket, board, add_draws=True):
comb, subrank = deuces_comb(pocket, board, add_subrank=True)
draws = []
# 'pair-1-2'
if comb == 'pair':
comb = f'pair_{_pair_rank(pocket, board)}'
if not add_draws:
return comb, subrank
if comb not in {'flush', 'nuts'} and _flash_draw(pocket, board):
draws.append('flash_draw')
if comb not in {'straight', 'flush', 'nuts'}:
draw_or_gut = _straight_draw_or_gutshot(pocket, board)
if draw_or_gut:
draws.append(draw_or_gut)
return comb, subrank, draws
def _straight_draw_or_gutshot(pocket, board):
if len(pocket + board) == 7:
return ''
cards = pocket + board
out = []
for r in all_ranks:
for s in all_suits:
if r + s not in cards:
c = r + s
cur_comb = deuces_comb([c, cards[0]], cards[1:])
if cur_comb in {'flush', 'nuts'}:
continue
if cur_comb == 'straight':
out.append(r)
break
if len(out) == 2:
return 'straight_draw'
elif len(out) == 1:
return 'gutshot'
else:
return ''
def _flash_draw(pocket, board):
if len(pocket + board) == 7:
return False
cnt_suits = Counter([s for _, s in board + pocket])
return max(cnt_suits.values()) == 4
def _pair_rank(pocket, board):
cnt_board = Counter([rank2idx[r] for r, _ in board]).items()
board_max_r, board_max_cnt = max(cnt_board, key=lambda x: x[0])
if board_max_cnt == 2:
return 1
cnt_all = Counter([rank2idx[r] for r, _ in board + pocket])
if cnt_all[board_max_r] == 2:
return 1
if pocket[0][0] != pocket[1][0]:
return 2
pocket_r = rank2idx[pocket[0][0]]
# оверпары - это первая пара, любые карманки ниже первой - вторая пара
if board_max_r > pocket_r:
return 2
else:
return 1
def _board_onehots(board, comb_hist):
# [+] 'high_card'
# [+] '23gutshot', min_delta_rank <= 4
# [+]'2straight (3-4) min_delta_rank <= 3 and != 0
# [+] '2flash' (3-4) cnt_suit=2
# [+] '3straght'(3-5) 13*13 если стрит хотя бы раз то все
# [+] '3flash' (3-5) [3 одной масти]
# [+] '4gutshot' (4-5)
# [+] '4straight' (4-5)
# [+] '4flash' (4-5) [4 одной масти]
# [+] 'pair' (3-5)
# [+] 'two_pairs (4-5)
# [+] 'trips' (3-5)
# [+] 'straight', (5) [1]
# [+] 'flash',(5) [1]
# [+] 'nuts', (5) [1]
# допустимо несколько флагов
out = set()
if len(board) == 5:
dc = deuces_comb(board[:2], board[2:])
if dc != 'high_card':
out.add('board_' + deuces_comb(board[:2], board[2:]))
r_cnt = Counter(Counter([r for r, _ in board]).values())
s_max = max(Counter([s for _, s in board]).values())
if 'board_nuts' not in out:
if r_cnt[3] == 1:
out.add('board_trips')
elif r_cnt[2] == 2:
out.add('board_two_pairs')
elif r_cnt[2] == 1:
out.add('board_pair')
if s_max == 4:
out.add('board_4flash')
if all(not label.endswith('straight') for label in out) and len(board) >= 4:
sg = _straight_draw_or_gutshot(board[:2], board[2:])
if sg:
out.add('board_4straight' if sg == 'straight_draw' else 'board_4gutshot')
if s_max == 3:
out.add('board_3flash')
if all(not label.endswith('straight') for label in out) and comb_hist['straight']:
out.add('board_3straight')
if len(board) != 5 and s_max == 2:
out.add('board_2flash')
if len(board) != 5 and all(not label.endswith('straight') for label in out):
board_ranks = np.array(sorted(list(set([rank2idx[r] for r, _ in board]))))
if len(board_ranks) > 1:
r_diff = np.diff(board_ranks).min()
if r_diff <= 3:
out.add('board_2straight')
elif r_diff == 4:
out.add('board_23gutshot')
if not out:
out.add('high_card')
return out
def _custom_board_action(bank, s0, bet, to_call):
global allin_as_custom
best_act, min_diff = 10000, 10000
action_types = copy(kbet_to_string)
if allin_as_custom:
action_types[s0 / bank] = 'allin'
bet -= to_call
for k, v in action_types.items():
if min_diff > abs(bet / bank - k):
min_diff = abs(bet / bank - k)
best_act = k
return action_types[best_act]
def prefix_dict(dct, prefix):
return {prefix + '_' + k : v for k, v in dct.items()}
def split_df_moves(df):
df_added = []
moves_cols = [c for c in df.columns if 'moves' in c]
for i in range(len(df)):
df_row = df.iloc[i]
row = dict()
for c in moves_cols:
if type(df_row[c]) != dict:
break
if 'regular' in df_row[c].keys():
moves = df_row[c]['regular']
row.update(prefix_dict(moves, c + '_' + 'c#_act'))
row.update(prefix_dict(moves, c + '_' + 'c#_comb'))
else:
moves_act = df_row[c]['c#_NO_hero']
row.update(prefix_dict(moves_act, c + '_' + 'c#_act'))
moves_comb = df_row[c]['c#_WITH_hero']
row.update(prefix_dict(moves_comb, c + '_' + 'c#_comb'))
df_added.append(row)
df_added = pd.DataFrame(df_added)
out = pd.concat([df, df_added], axis=1)
return out.drop(moves_cols, axis=1)
if __name__ == '__main__':
# # # ThTc 5h8s8c 7c 9c
# # # 7h3h 6h4d5h 8d Kd
# pocket, board = ['4d', '2d'], ['Ks', '4s', 'Ts', 'Jd', 'Qs']
#
# # board_3straight
#
# spectr, comb_hist = _board_spectr(board)
#
# board_onehots = _board_onehots(board, comb_hist)
#
# hero_comb, subrank, draws = _hero_comb(pocket, board)
#
# exit(1)
#
# deck = copy(all_deck)
#
# tmp = set()
#
# for _ in range(1000):
# random.shuffle(deck)
# sz_board = random.randint(3, 5)
# pocket, board = deck[:2], deck[2: 2 + sz_board]
# print(pocket, board)
#
# spectr, comb_hist = _board_spectr(board)
# board_onehots = _board_onehots(board, comb_hist)
# hero_comb, subrank, draws = _hero_comb(pocket, board)
#
# tmp |= set(draws)
#
# print(spectr, comb_hist)
# print(board_onehots)
# print(hero_comb, subrank, draws)
#
# print('\n\n\n')
#
#
# print(tmp)
pass
| snowii/nash_srv | ml/a2_board/b0_common.py | b0_common.py | py | 12,479 | python | en | code | 0 | github-code | 90 |
18386542569 | from collections import Counter
n=int(input())
if n==1:
print(1)
exit()
xy=[tuple(map(int,input().split())) for _ in range(n)]
# print(xy)
xy.sort()
# print(xy)
l=[]
for i in range(n-1):
for j in range(i+1,n):
l.append((xy[j][0]-xy[i][0],xy[j][1]-xy[i][1]))
#most_common(): (要素、出現回数)という形のタプルを出現回数順に並べたリストを返す
print(n-Counter(l).most_common()[0][1])
| Aasthaengg/IBMdataset | Python_codes/p03006/s396296767.py | s396296767.py | py | 423 | python | en | code | 0 | github-code | 90 |
21746855518 | import requests
import json
from tqdm import tqdm
TMDB_API_KEY = '9186d5ace54e142f44d4f7e7a96d0043'
with open("../api/fixtures/actor.json", "r", encoding="UTF-8") as f:
actors = json.load(f)
with open("../api/fixtures/director.json", "r", encoding="UTF-8") as f:
directors = json.load(f)
with open("../api/fixtures/movie_ids.json", "r", encoding="UTF-8") as f:
movie_ids = json.load(f)
movie_set = set(movie_ids['movie_ids'])
with open("../api/fixtures/movie_detail.json", "r", encoding="UTF-8") as f:
movie_details = json.load(f)
act_direct_movie_ids = []
for actor in actors:
act_direct_movie_ids += actor['fields']['movies']
for director in directors:
act_direct_movie_ids += director['fields']['movies']
need_movie_ids = list(set(act_direct_movie_ids) - movie_set)
for movie_id in tqdm(need_movie_ids):
MOVIE_DETAIL_API_URL = f'https://api.themoviedb.org/3/movie/{movie_id}?api_key={TMDB_API_KEY}&language=ko-KR'
movie_detail = requests.get(MOVIE_DETAIL_API_URL)
movie_detail_json = json.loads(movie_detail.text)
movie_detail_dict = {"model": "api.movie"}
genre_lst = []
try:
for mv_genre in movie_detail_json['genres']:
genre_lst.append(mv_genre['id'])
country = movie_detail_json['production_countries'][0]['name'] if movie_detail_json['production_countries'] else None
movie_detail_dict['fields'] = {
"adult": False,
"belongs_to_collection": movie_detail_json['belongs_to_collection']['id'] if movie_detail_json['belongs_to_collection'] else None,
"id": movie_detail_json['id'],
"original_language": movie_detail_json['original_language'],
"overview": movie_detail_json['overview'],
"popularity": movie_detail_json["popularity"],
"poster_path": movie_detail_json["poster_path"],
"release_date": movie_detail_json["release_date"],
"runtime": movie_detail_json[ "runtime"],
"title": movie_detail_json["title"],
"vote_average": movie_detail_json["vote_average"],
"vote_count": movie_detail_json["vote_count"],
"genres": genre_lst,
"country": country
}
movie_details.append(movie_detail_dict)
except:
continue
movie_ids['movie_ids'] += need_movie_ids
with open("../api/fixtures/movie_ids_new.json", "w", encoding="UTF-8") as f:
json.dump(movie_ids, f, indent=4, ensure_ascii=False)
with open("../api/fixtures/movie_detail_new.json", "w", encoding="UTF-8") as f:
json.dump(movie_details, f, indent=4, ensure_ascii=False) | vinitus/WhatToWatch | final-pjt-back/request_data/1.py | 1.py | py | 2,638 | python | en | code | 0 | github-code | 90 |
25892960589 | # lista de restaurantes
def busca_restaurantes(lista, categoria, valor):
i = 0
resultado = []
while i < len(lista):
n = lista[i][0]
c = lista[i][1]
a = lista[i][2]
gm = lista[i][3]
if categoria == 'culinaria':
if c == valor:
resultado.append(n)
if categoria == 'ambiente':
if a == valor:
resultado.append(n)
if categoria == 'preco':
if gm <= valor:
resultado.append(n)
i += 1
return resultado
lista = [["Ristorante Italiano", "Italiano", "Elegante", 80],["Cantina Mexicana", "Mexicano", "Descontraído", 50],["Sushi Bar Japonês", "Japonês", "Sofisticado", 120],["Comida Vegana", "Vegano", "Alternativo", 60],["Lanchonete do Zé", "Fast-food", "Popular", 20]]
categoria = 'ambiente'
valor = 'Elegante'
print(busca_restaurantes(lista, categoria, valor))
| kikepuppi/2023.1-Dessoft | Aula 6/listarestaurantes.py | listarestaurantes.py | py | 921 | python | pt | code | 0 | github-code | 90 |
11196858365 | import typer
import os
import time
from githubclass import Github
from typing import Optional
def main(name: str, gith: Optional[str] ) -> str:
typer.secho('Creating Your Project! 📦', fg=typer.colors.BRIGHT_MAGENTA, bold=True)
cuf = os.getcwd()
os.mkdir(name)
pro_file = cuf + f'/{name}'
time.sleep(1)
os.chdir(pro_file)
time.sleep(1)
os.system('touch Readme.md')
time.sleep(3)
os.system('git init')
os.system('git add .')
os.system('git commit -m "Initial Commit"')
try:
if gith == 'github':
usern = typer.prompt('Username')
passwd = typer.prompt('Password', hide_input=True)
choice = typer.prompt('Private/Public')
github = Github(usern, passwd, choice, name)
github.login()
if github.login():
os.system(f"git remote add origin git@github.com:{usern}/{name}.git")
time.sleep(1)
os.system("git branch -M main")
time.sleep(1)
os.system("git push -u origin main")
typer.secho(f'Projecet Created at {pro_file}', fg=typer.colors.BRIGHT_GREEN)
typer.secho(f'Github Repo: https://github.com/{usern}/{name}', fg=typer.colors.BRIGHT_GREEN)
return "Logged In"
else:
typer.secho('Unable To Log In')
else:
typer.secho(f'Project Created at {pro_file}', fg=typer.colors.BRIGHT_GREEN)
return "Created Project Without Github Repo!"
except:
os.rmdir(f'{name}')
typer.secho('Unable To Create Project!', fg=typer.colors.RED)
return "Invalid Credentials!"
if __name__=="__main__":
typer.run(main) | pratushrai0309/cpro | cpro/main.py | main.py | py | 1,571 | python | en | code | 1 | github-code | 90 |
73052606376 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 28 15:11:37 2020
@author: jain
"""
import numpy as np
import matplotlib.pyplot as plt
import time
## Import extra files
import Cal_moments_samples as MOM
import transform_domain as td
import Initial_lam as Initlam
import GLeg_pts as GL
import pdf_MaxEnt as pdf
# import pdf_MaxEnt_extd as pdfext
import MaxEntPdf as MEP
import plot_prop_param as plotprop
import MonteCarlo_propagation as MCp
import moment_propagation as Mp
# import StateCorrelation_MC_PME as plotMCpmeCorrelation
# import StateCorrelation_MC as plotMCCorrelation
import moment_transform as mT
import convert2indexNotation as C2IN
#############################################################################################################################################################################
######## Example Dubins problem
import Parameters_Airplane_gust_2Dsystem as Param
Parameters, InitialConditions, ControlParam, xdim, udim, Distribution = Param.parameters()
orderx, orderu, dt, tf, MC_n = Parameters
ax_0, bx_0, au_0, bu_0 = InitialConditions
au, bu = ControlParam
#############################################################################################################################
########### Monte Carlo Simulation
MC_propagation, MC_Propagation = MCp.MonteCarlo_propagation(dt, tf, MC_n, ax_0, bx_0, au, bu, xdim, udim)
########### Expectation value using Index Notation
First_moment, First_moment_array, Second_moment_raw, Second_moment_central, Sigma3_array, Third_moment_raw, Third_moment_central, Fourth_moment_raw, Fourth_moment_central = Mp.moment_propagation(tf, dt, InitialConditions, ControlParam, Distribution, 'CUT', MC_n, xdim, udim, orderx , orderu)
###########################################################################################################################################################################
#%%%
# Time = [0.5]
# Time = [ 0, 1, 1.5, 2, 2.5, 3 ,3.5, 4.5, 5, 5.5, 6,6.5, 7, 7.5,8,8.5,9,10] ## 0.5, 4, 9.5 are calculated using minimize function (BFGS)
Time = [ 0, 0.5,1, 1.5, 2, 2.5, 3 ,3.5, 4, 4.5, 5, 5.5, 6,6.5, 7, 7.5,8,8.5,9,9.5, 10]
# FIRSTmom = []
# FIRSTmomarr = np.copy(0*First_moment_array)
# SECONDmom = []
# SECONDmomraw = []
# THIRDmom = []
# THIRDmomraw = []
# FOURTHmomraw = []
# SECONDmomrawCUT =[]
# THIRDmomrawCUT =[]
# FOURTHmomrawCUT=[]
for ct in range(len(Time)):
print('.')
print('TimeStep = ',Time[ct])
timestep=int(Time[ct]/dt)
X = MC_propagation[MC_n*timestep:MC_n*(timestep+1), 1:]
ns = np.size(X, 1)
Nsamp = len(X)
w=1/MC_n
s=1
bL = np.amin(X, axis=0)
bU = np.amax(X, axis=0)
y1, mu1 = MOM.Cal_moments_samples(X, w, 1,'central')
y2, m2r = MOM.Cal_moments_samples(X, w, 2, 'raw')
y2, m2 = MOM.Cal_moments_samples(X, w, 2, 'central')
y3, m3 = MOM.Cal_moments_samples(X, w, 3, 'central')
y3, m3r = MOM.Cal_moments_samples(X, w, 3, 'raw')
y4, m4r = MOM.Cal_moments_samples(X, w, 4, 'raw')
y4, m4 = MOM.Cal_moments_samples(X, w, 4, 'central')
# FIRSTmomarr[ct,0] = timestep
# FIRSTmomarr[ct,1:] = mu1
# FIRSTmom.append(mu1)
# SECONDmomraw.append(m2r.T)
# SECONDmom.append(m2.T)
# THIRDmomraw.append(m3r.T)
# THIRDmom.append(m3)
# FOURTHmomraw.append(m4r.T)
Xt, dtt = td.transform_domain(X, bL, bU, -s*np.ones([1,ns]), s*np.ones([1,ns]))
# Xtmc, dtt = td.transform_domain(Xt, -s*np.ones([1,ns]), s*np.ones([1,ns]), bL, bU) ## getting X back
[_, m1t]=MOM.Cal_moments_samples(Xt,w,1,'raw')
[_, m2t]=MOM.Cal_moments_samples(Xt,w,2,'raw')
[_, m3t]=MOM.Cal_moments_samples(Xt,w,3,'raw')
[_, m4t]=MOM.Cal_moments_samples(Xt,w,4,'raw')
E10 = First_moment[timestep]
E20 = Second_moment_raw[timestep]
E30 = Third_moment_raw[timestep]
E40 = Fourth_moment_raw[timestep]
M10, M20, M30, M40 = C2IN.convert2indexNotation(E10, E20, E30, E40)
E11, E22, E33, E44 = mT.moment_transform(bU, bL, E10, E20, E30, E40 )
## Converting it back in "index notation" format
M1, M2, M3, M4 = C2IN.convert2indexNotation(E11, E22, E33, E44)
# SECONDmomrawCUT.append(M20)
# THIRDmomrawCUT.append(M30)
# FOURTHmomrawCUT.append(M40)
# print(mu1); print(M10)
# print(m2r.T); print(M20)
# print(m3r.T); print(M30)
# print(m4r.T); print(M40)
MOMmethod = 'CUT'
MOMmethod = 'MC'
### lam0 calculation
if MOMmethod == 'CUT':
M = np.append( np.append(1, M1), M2)
if MOMmethod == 'MC':
M = np.append( np.append(1, m1t), m2t)
M= np.squeeze(M)
lam00 = np.squeeze(Initlam.Initial_lam(y1, y2, M))
xl = -s*np.ones([1,ns])
xu = s*np.ones([1,ns])
methodd = 'GL'
########################## 2nd MOMS constraint ####################################################################################
y = np.vstack( (np.vstack((np.zeros([1,ns]), y1 )) , y2) )
tic = time.time()
Y2, lam22 = MEP.MaxEntPdf(y, M, xl, xu, lam00, methodd)
lam2 =lam22.x
toc = time.time()
print( toc-tic, 'sec Elapsed for order-2')
print('Moments computed by',MOMmethod ,'method')
print('function value for 2nd order= ', lam22.fun)
##%%
############################ 3rd MOMS constraint ####################################################################################
y = np.vstack( (y , y3) )
if MOMmethod == 'CUT':
M = np.append(M, M3)
if MOMmethod == 'MC':
M = np.append(M, m3t)
lam0 = np.append(lam00, np.zeros([ len(y) - len(lam2) ]))
lam0 = np.append(lam2, np.zeros([ len(y) - len(lam2) ]))
tic = time.time()
Y3, lam33 = MEP.MaxEntPdf(y, M, xl, xu, lam0, methodd)
lam3=lam33.x
toc = time.time()
print( toc-tic, 'sec Elapsed for order-3')
print('function value for 3rd order= ',lam33.fun)
##%%
############################# 4th MOMS constraint ####################################################################################
y = np.vstack( (y , y4) )
if MOMmethod == 'CUT':
M = np.append(M, M4)
if MOMmethod == 'MC':
M = np.append(M, m4t)
# lam0 = np.append(lam00, np.zeros([ len(y) - len(lam2) ]))
lam0 = np.append(lam3, np.zeros([ len(y) - len(lam3) ]))
tic = time.time()
Y4, lam44 = MEP.MaxEntPdf(y, M, xl, xu, lam0, methodd)
lam4=lam44.x
toc = time.time()
print( toc-tic, 'sec Elapsed for order-4')
print('function value for 4th order= ',lam44.fun)
############################# Plotting figures ####################################################################################
Xtmc = Xt
s=1
[xx,zz] = np.meshgrid(np.linspace(-1*s,1*s,100),np.linspace(-1*s,1*s,100));
[xx_real, zz_real] = np.meshgrid(np.linspace(bL[0], bU[0], 100),np.linspace(bL[1], bU[1], 100))
[XXX,WW] = GL.GLeg_pts( np.array([29, 29, 29]), -1*s , 1*s )
XX = np.zeros(np.shape(XXX))
W = np.zeros(np.shape(WW))
for i in range( len(W)):
XX[i, 0] = XXX[len(XXX)-i-1, 0]
W[i] = WW[len(WW)-i-1]
pent2=np.zeros(np.shape(xx));
pent3=np.zeros(np.shape(xx));
pent4=np.zeros(np.shape(xx));
# pent5=np.zeros(np.shape(xx));
# pent6=np.zeros(np.shape(xx));
for i in range( len(xx)):
for j in range(len(xx)):
for k in range(len(W)):
x = np.array([xx[i,j],zz[i,j]])
x= x.reshape([len(x) , 1])
pent2[i,j]= W[k]*pdf.pdf_MaxEnt( x, lam2.T, Y2 )
pent3[i,j]= W[k]*pdf.pdf_MaxEnt( x, lam3.T, Y3 )
pent4[i,j]= W[k]*pdf.pdf_MaxEnt( x, lam4.T, Y4 )
# pent2[i,j]=pent2[i,j] + W[k]*pdfext.pdf_MaxEnt_extd( x, lam2.T, Y2, xl, xu, np.eye(len(lam2)))
# pent3[i,j]=pent3[i,j] + W[k]*pdfext.pdf_MaxEnt_extd( x, lam3.T, Y3, xl, xu, np.eye(len(lam3)))
# pent4[i,j]=pent4[i,j] + W[k]*pdfext.pdf_MaxEnt_extd( x, lam4.T, Y4, xl, xu, np.eye(len(lam4)))
if MOMmethod == 'CUT':
fig = plt.figure(figsize=(20, 25))
# plt.scatter(Xtmc[:,0],Xtmc[:,1], 20, color='cyan')
plt.scatter(X[:,0],X[:,1], 20, color='cyan')
vec= np.linspace(0, np.max(pent2), 50 )
vec1 = np.linspace(vec[0], vec[1], 4)
vec2 = np.hstack( ( vec1,vec[2:]) )
S1 = plt.contour(xx_real, zz_real, pent2, vec2, linewidths=5)
plt = plotprop.plot_prop_param('X','Y', plt)
plt.show()
plt.pause(1)
plt.savefig('RESULTSCUT/PME_CUT_XY_'+str( Time[ct]) +'sec_order2.png' , bbox_inches='tight' )
# plt.clabel(S1,inline=1)
# plt.savefig('RESULTS/RootResultsSqTerms/PME_XY_inline_'+str( Time[ct]) +'sec_order2.png' , bbox_inches='tight' )
plt.close()
plt.figure(figsize=(20, 25))
# plt.scatter(Xtmc[:,0],Xtmc[:,1], 20, color='cyan')
plt.scatter(X[:,0],X[:,1], 20, color='cyan')
vec= np.linspace(0, np.max(pent3), 50 )
vec1 = np.linspace(vec[0], vec[1], 4)
vec2 = np.hstack( ( vec1,vec[2:]) )
S1 = plt.contour(xx_real, zz_real, pent3, vec2, linewidths=5)
plt = plotprop.plot_prop_param('X','Y', plt)
plt.show()
plt.pause(1)
plt.savefig('RESULTSCUT/PME_CUT_XY_'+str( Time[ct]) +'sec_order3.png', bbox_inches='tight' )
# plt.clabel(S1,inline=1)
# plt.savefig('RESULTS/RootResultsSqTerms/PME_XY_inline_'+str( Time[ct]) +'sec_order3.png', bbox_inches='tight' )
plt.close()
plt.figure(figsize=(20, 25))
plt.scatter(X[:,0],X[:,1], 20, color='cyan')
vec= np.linspace(0, np.max(pent4), 50 )
vec1 = np.linspace(vec[0], vec[1], 4)
vec2 = np.hstack( ( vec1,vec[2:]) )
S1 = plt.contour(xx_real, zz_real, pent4, vec2 , linewidths=5)
plt = plotprop.plot_prop_param('X','Y', plt)
plt.show()
plt.pause(1)
plt.savefig('RESULTSCUT/PME_CUT_XY_'+str( Time[ct]) +'sec_order4.png', bbox_inches='tight' )
# plt.clabel(S1,inline=1)
# plt.savefig('RESULTS/RootResultsSqTerms/PME_XY_inline_'+str( Time[ct]) +'sec_order4.png', bbox_inches='tight' )
plt.close()
if MOMmethod == 'MC':
fig = plt.figure(figsize=(20, 25))
# plt.scatter(Xtmc[:,0],Xtmc[:,1], 20, color='cyan')
plt.scatter(X[:,0],X[:,1], 20, color='cyan')
vec= np.linspace(0, np.max(pent2), 50 )
vec1 = np.linspace(vec[0], vec[1], 4)
vec2 = np.hstack( ( vec1,vec[2:]) )
S1 = plt.contour(xx_real, zz_real, pent2, vec2, linewidths=5)
plt = plotprop.plot_prop_param('X','Y', plt)
plt.show()
plt.pause(1)
plt.savefig('RESULTSMC/PME_MC_XY_'+str( Time[ct]) +'sec_order2.png' , bbox_inches='tight' )
# plt.clabel(S1,inline=1)
# plt.savefig('RESULTS/RootResultsSqTerms/PME_XY_inline_'+str( Time[ct]) +'sec_order2.png' , bbox_inches='tight' )
plt.close()
plt.figure(figsize=(20, 25))
# plt.scatter(Xtmc[:,0],Xtmc[:,1], 20, color='cyan')
plt.scatter(X[:,0],X[:,1], 20, color='cyan')
vec= np.linspace(0, np.max(pent3), 50 )
vec1 = np.linspace(vec[0], vec[1], 4)
vec2 = np.hstack( ( vec1,vec[2:]) )
S1 = plt.contour(xx_real, zz_real, pent3, vec2, linewidths=5)
plt = plotprop.plot_prop_param('X','Y', plt)
plt.show()
plt.pause(1)
plt.savefig('RESULTSMC/PME_MC_XY_'+str( Time[ct]) +'sec_order3.png', bbox_inches='tight' )
# plt.clabel(S1,inline=1)
# plt.savefig('RESULTS/RootResultsSqTerms/PME_XY_inline_'+str( Time[ct]) +'sec_order3.png', bbox_inches='tight' )
plt.close()
plt.figure(figsize=(20, 25))
plt.scatter(X[:,0],X[:,1], 20, color='cyan')
vec= np.linspace(0, np.max(pent4), 50 )
vec1 = np.linspace(vec[0], vec[1], 4)
vec2 = np.hstack( ( vec1,vec[2:]) )
S1 = plt.contour(xx_real, zz_real, pent4, vec2 , linewidths=5)
plt = plotprop.plot_prop_param('X','Y', plt)
plt.show()
plt.pause(1)
plt.savefig('RESULTSMC/PME_MC_XY_'+str( Time[ct]) +'sec_order4.png', bbox_inches='tight' )
# plt.clabel(S1,inline=1)
# plt.savefig('RESULTS/RootResultsSqTerms/PME_XY_inline_'+str( Time[ct]) +'sec_order4.png', bbox_inches='tight' )
plt.close()
| axj307/Moment-Calculation | Airplane2D/Airplane2D.py | Airplane2D.py | py | 12,540 | python | en | code | 0 | github-code | 90 |
72810199977 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Flask, request, Response
import json
import traceback
from toy.module_a.logic import cut
from toy.utils.exception import *
from toy.utils.logger import LoggerFactory
from toy.utils.timer import Timer
app = Flask(__name__) # create a Flask instance
@app.route('/')
def index():
return "Hello, World!"
@app.route('/cut/para/<string:content>') # syntax warning: must not have space beside ":"
def paraCut(content):
word_str, word_num = cut(content)
return "words: {}; number of words: {}".format(word_str, word_num)
@app.route('/cut/json', methods=['POST'])
def jsonCut():
log = LoggerFactory.getDebugLogger()
status = None
message = None
output_data = {}
docid = None
try:
# get JSON in the request
json_dict = request.get_json()
log.info('Processing starts, input: {}'.format(json_dict))
# check input
docid = json_dict['docid']
content = json_dict['content']
extra = json_dict.get('extra', None)
if content == '':
raise EmptyContentException('Content is empty')
if extra is None:
log.warning('docid:{}, API Internal Warn: '.format(docid) + 'Missing attribute "extra"')
# run business logic
with Timer() as t:
word_str, word_num = cut(content, docid)
log.info("docid:{}, Processing time for cut is: {}".format(docid, t.elapse), extra={'elapse':t.elapse}) # @fields.elapse in kibana
# prepare Response info
output_data = {'word_str': word_str, 'word_num': word_num}
status = ErrorCode.SUCCESS.value
message = 'OK'
except Exception as e:
if type(e) not in TRACKED_EXCEPTIONS:
status = ErrorCode.ERROR.value
log_level = 'error'
else:
status = e.code.value
log_level = e.log_level
message = '{}: {}'.format(type(e).__name__, str(e))
if log_level == 'error':
log.error('docid:{}, API Internal Error:\n'.format(docid) + str(traceback.format_exc()))
elif log_level == 'warning':
log.warning('docid:{}, API Internal Warn: '.format(docid) + str(message))
finally:
output = {'message': message, 'data': output_data}
js = json.dumps(output)
response = Response(js, status=status, mimetype='application/json')
log.info("docid:{}, Processing finished, result is: {}".format(docid, output))
return response
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0")
| simoncos/practical-python | rest-api-deploy/toy_project_v2/toy/toy/__init__.py | __init__.py | py | 2,673 | python | en | code | 7 | github-code | 90 |
42227143700 | from edge import DummyEdgeEnd
from simulation_event import AbstractSimulationEvent
from stats import TripStats
class AbstractAntMove(AbstractSimulationEvent):
def __init__(self, ant, origin, destination, end_time, pheromone_to_drop, trip_stats):
self.ant = ant
self.origin = origin
self.destination = destination
if self.origin is not None and self.destination is not None:
if self.origin.edge is not None and self.destination.edge is not None:
#print 'origin', self.origin
#print 'destination', self.destination
assert self.origin.edge == self.destination.edge
self.end_time = end_time
self.pheromone_to_drop = pheromone_to_drop
self.trip_stats = trip_stats
def process_start(self):
self.origin.drop_pheromone(self.pheromone_to_drop)
return frozenset((self.origin.edge, self.origin.point))
def process_end(self, reality, stats):
changed = [self.destination.edge]
self.trip_stats.edge_visited(self.destination.edge)
self.destination.drop_pheromone(self.pheromone_to_drop)
if not self.destination.point.is_anthill() and self.destination.point.food > 0 and not self.ant.food: # ant has found the food
changed.append(self.destination.point)
self.trip_stats.food_found()
self.destination.point.food -= 1
self.ant.food += 1
stats.food_found(self.trip_stats)
stats.present()
elif self.destination.point.is_anthill(): # ant has returned to the anthill
if self.ant.food: # with food
changed.append(self.destination.point)
self.destination.point.food += self.ant.food
self.trip_stats.back_home()
new_ant = self.ant.__class__(self.ant.world_parameters)
return AntRestartMove(new_ant, anthill=DummyEdgeEnd(self.destination.point), end_time=reality.world.elapsed_time), frozenset(changed)
else: # with no food
self.trip_stats.reset_route()
new_destination_edge, pheromone_to_drop = self.ant.tick(self.destination.point)
assert new_destination_edge in (end.edge for end in self.destination.point.edge_ends), 'Illegal ant move'
assert reality.environment_parameters.min_pheromone_dropped_by_ant <= pheromone_to_drop <= reality.environment_parameters.max_pheromone_dropped_by_ant, 'Illegal ant pheromone drop: %s' % (repr(pheromone_to_drop),)
self.trip_stats.normal_move(new_destination_edge.cost)
new_destination = new_destination_edge.get_other_end_by_point(self.destination.point)
origin = new_destination_edge.get_other_end(new_destination)
end_time = reality.world.elapsed_time + new_destination_edge.cost
return AntMove(
ant=self.ant,
origin=origin,
destination=new_destination,
end_time=end_time,
pheromone_to_drop=pheromone_to_drop,
trip_stats=self.trip_stats,
), frozenset(changed)
def __repr__(self):
return '%s@%s' % (self.__class__.__name__, self.end_time,)
class AntRestartMove(AbstractAntMove):
def __init__(self, ant, anthill, end_time):
super(AntRestartMove, self).__init__(ant, None, anthill, end_time=end_time, pheromone_to_drop=0, trip_stats=TripStats())
def process_start(self):
return frozenset()
class AntStartMove(AntRestartMove):
def __init__(self, ant, anthill):
super(AntStartMove, self).__init__(ant, anthill, end_time=0)
class AntMove(AbstractAntMove):
pass
| ppolewicz/ant-colony | antcolony/ant_move.py | ant_move.py | py | 3,648 | python | en | code | 0 | github-code | 90 |
35176446416 | from letters import Letter
from letter_box import LetterBox
from postoffice import PostOffice
class Person:
def __init__(self, addressee=None, letter=None):
self._letter = letter
self._addressee = addressee
def deliver_letter(self, location=False, post_office=None, letter_box=None):
# Will take letter written and assign it to instantiated post office object
if location:
post_office._letter = self._letter
# Will check which address to take letter too and assign the letter boxes letter as the
# letter being carried by person
elif self._addressee == letter_box._address:
letter_box.put_flag_up(self._letter)
def receive_letter(self, letter_box=None, post_office=None, is_charlie=False):
# is_charlie is only turned true when charlie is getting a letter from the post office
if is_charlie:
self._letter = post_office._letter
if not is_charlie:
self._letter = letter_box._letter
letter_box.put_flag_down()
| SamW2121/C-App-Prog-Python-AT2 | C-App-Prog-Python-AT2/person.py | person.py | py | 1,064 | python | en | code | 0 | github-code | 90 |
18028016679 | import sys
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
sys.setrecursionlimit(10 ** 9)
INF = 1 << 60
MOD = 1000000007
def main():
N = int(readline())
S = readline().strip()
x = ans = 0
for s in S:
if s == 'I':
x += 1
if x > ans:
ans = x
else:
x -= 1
print(ans)
return
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03827/s259561193.py | s259561193.py | py | 444 | python | en | code | 0 | github-code | 90 |
28857865135 | # The mono-alphabetic substitution cipher (to encrypt and decrpt text)
#Predefine values
PDC = "XEUADNBKVMROCQFSYHWGLZIJPT"
PDP = "abcdefghijklmnopqrstuvwxyz"
def Decryption(a):
for i in range(len(PDC)):
if a[0]==PDC[i]:
print(PDP[i], end="")
break
def Encription(a):
for i in range(len(PDC)):
if a[0]==PDP[i]:
print(PDC[i], end="")
break
#text to be decrpt
Text = "GDOOKVCXEFLGCD"
for i in range(len(Text)):
Decryption(Text[i])
| vinodkumavat/Cryptography | Mono-alphabetic-substitution-cipher/Main.py | Main.py | py | 516 | python | en | code | 0 | github-code | 90 |
35281076807 | import numpy as np
import torch
class Normalize(object):
def __call__(self, sample):
label, wavname, mfcc = sample['label'], sample['wavname'], sample['mfcc']
mean = np.mean(mfcc[0])
std = np.std(mfcc[0])
if std>0:
mfcc[0] = (mfcc[0]-mean)/std
mean = np.mean(mfcc[1])
std = np.std(mfcc[1])
if std>0:
mfcc[1] = (mfcc[1]-mean)/std
mean = np.mean(mfcc[2])
std = np.std(mfcc[2])
if std>0:
mfcc[2] = (mfcc[2]-mean)/std
#padding = np.zeros(mfcc.shape[0])
#mfcc = np.c_[mfcc, padding]
sample = {'label':label, 'mfcc':mfcc, 'wavname':wavname}
return sample
class ToTensor(object):
def __call__(self, sample):
label, ecg = sample['label'], sample['ecg']
ecg = torch.from_numpy(ecg)
label = torch.from_numpy(np.asarray(label))
sample = {'label':label, 'ecg':ecg}
return sample
class ImageNormalize(object):
def __call__(self, sample):
label, wavname, mfcc = sample['label'], sample['wavname'], sample['mfcc']
mfcc = 0+(255-0)/(np.max(mfcc)-np.min(mfcc)) * (mfcc-np.min(mfcc))
mfcc = np.repeat(mfcc[:, :, np.newaxis], 3, axis=2)
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
mfcc = transform(mfcc)
sample = {'label':label, 'mfcc':mfcc, 'wavname':wavname}
return sample | zili98/ELEC576-Deep-Learning-Final-Project | src/utils/transforms.py | transforms.py | py | 1,377 | python | en | code | 0 | github-code | 90 |
18275268029 | from sys import stdin
from functools import lru_cache
N = int(stdin.readline().rstrip())
K = int(stdin.readline().rstrip())
@lru_cache(None)
def f(N, K):
#大きい桁から小さい桁へ
if K < 0:
return 0
if N < 10:
if K == 0:
# 0のみ
return 1
elif K == 1:
# 例えばN=4 なら0でないものが1,2,3,4の合計4個存在
return N
else:
return 0
q, r = divmod(N, 10)
ret = 0
if K >= 1:
# 1の位が0以外の場合Kを消費する
ret += f(q, K-1) * r
ret += f(q-1, K-1) * (9-r)
# 1の位が0の場合
ret += f(q, K)
return ret
print(f(N, K)) | Aasthaengg/IBMdataset | Python_codes/p02781/s515810955.py | s515810955.py | py | 703 | python | ja | code | 0 | github-code | 90 |
18200230479 | a, v = [int(i) for i in input().split()]
b, w = [int(i) for i in input().split()]
t = int(input())
if w >= v:
print("NO")
exit()
#print((b-a) / (v-w))
if b > a:
if a + v*t >= b + w*t:
print("YES")
else:
print("NO")
else :
if a - v*t <= b - w*t:
print("YES")
else:
print("NO")
| Aasthaengg/IBMdataset | Python_codes/p02646/s549465814.py | s549465814.py | py | 308 | python | en | code | 0 | github-code | 90 |
14412535470 | from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar, Union
import attr
from ..types import UNSET, Unset
if TYPE_CHECKING:
from ..models.code_name_pair import CodeNamePair
from ..models.post_order_line_free_values import PostOrderLineFreeValues
from ..models.post_order_line_prices import PostOrderLinePrices
T = TypeVar("T", bound="PostOrderLine")
@attr.s(auto_attribs=True)
class PostOrderLine:
"""
Attributes:
row_number (str):
article_number (str):
number_of_items (Union[Unset, float]):
comment (Union[Unset, None, str]):
should_be_picked (Union[Unset, None, bool]):
serial_number (Union[Unset, None, str]):
line_total_customs_value (Union[Unset, None, float]):
batch_number (Union[Unset, None, str]):
line_type (Union[Unset, None, CodeNamePair]):
prices (Union[Unset, None, PostOrderLinePrices]):
customer_article_number (Union[Unset, None, str]):
warehouse_instruction (Union[Unset, None, str]):
external_id (Union[Unset, None, str]):
article_item_status (Union[Unset, None, CodeNamePair]):
line_free_values (Union[Unset, None, PostOrderLineFreeValues]):
"""
row_number: str
article_number: str
number_of_items: Union[Unset, float] = UNSET
comment: Union[Unset, None, str] = UNSET
should_be_picked: Union[Unset, None, bool] = UNSET
serial_number: Union[Unset, None, str] = UNSET
line_total_customs_value: Union[Unset, None, float] = UNSET
batch_number: Union[Unset, None, str] = UNSET
line_type: Union[Unset, None, "CodeNamePair"] = UNSET
prices: Union[Unset, None, "PostOrderLinePrices"] = UNSET
customer_article_number: Union[Unset, None, str] = UNSET
warehouse_instruction: Union[Unset, None, str] = UNSET
external_id: Union[Unset, None, str] = UNSET
article_item_status: Union[Unset, None, "CodeNamePair"] = UNSET
line_free_values: Union[Unset, None, "PostOrderLineFreeValues"] = UNSET
def to_dict(self) -> Dict[str, Any]:
row_number = self.row_number
article_number = self.article_number
number_of_items = self.number_of_items
comment = self.comment
should_be_picked = self.should_be_picked
serial_number = self.serial_number
line_total_customs_value = self.line_total_customs_value
batch_number = self.batch_number
line_type: Union[Unset, None, Dict[str, Any]] = UNSET
if not isinstance(self.line_type, Unset):
line_type = self.line_type.to_dict() if self.line_type else None
prices: Union[Unset, None, Dict[str, Any]] = UNSET
if not isinstance(self.prices, Unset):
prices = self.prices.to_dict() if self.prices else None
customer_article_number = self.customer_article_number
warehouse_instruction = self.warehouse_instruction
external_id = self.external_id
article_item_status: Union[Unset, None, Dict[str, Any]] = UNSET
if not isinstance(self.article_item_status, Unset):
article_item_status = self.article_item_status.to_dict() if self.article_item_status else None
line_free_values: Union[Unset, None, Dict[str, Any]] = UNSET
if not isinstance(self.line_free_values, Unset):
line_free_values = self.line_free_values.to_dict() if self.line_free_values else None
field_dict: Dict[str, Any] = {}
field_dict.update(
{
"rowNumber": row_number,
"articleNumber": article_number,
}
)
if number_of_items is not UNSET:
field_dict["numberOfItems"] = number_of_items
if comment is not UNSET:
field_dict["comment"] = comment
if should_be_picked is not UNSET:
field_dict["shouldBePicked"] = should_be_picked
if serial_number is not UNSET:
field_dict["serialNumber"] = serial_number
if line_total_customs_value is not UNSET:
field_dict["lineTotalCustomsValue"] = line_total_customs_value
if batch_number is not UNSET:
field_dict["batchNumber"] = batch_number
if line_type is not UNSET:
field_dict["lineType"] = line_type
if prices is not UNSET:
field_dict["prices"] = prices
if customer_article_number is not UNSET:
field_dict["customerArticleNumber"] = customer_article_number
if warehouse_instruction is not UNSET:
field_dict["warehouseInstruction"] = warehouse_instruction
if external_id is not UNSET:
field_dict["externalId"] = external_id
if article_item_status is not UNSET:
field_dict["articleItemStatus"] = article_item_status
if line_free_values is not UNSET:
field_dict["lineFreeValues"] = line_free_values
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
from ..models.code_name_pair import CodeNamePair
from ..models.post_order_line_free_values import PostOrderLineFreeValues
from ..models.post_order_line_prices import PostOrderLinePrices
d = src_dict.copy()
row_number = d.pop("rowNumber")
article_number = d.pop("articleNumber")
number_of_items = d.pop("numberOfItems", UNSET)
comment = d.pop("comment", UNSET)
should_be_picked = d.pop("shouldBePicked", UNSET)
serial_number = d.pop("serialNumber", UNSET)
line_total_customs_value = d.pop("lineTotalCustomsValue", UNSET)
batch_number = d.pop("batchNumber", UNSET)
_line_type = d.pop("lineType", UNSET)
line_type: Union[Unset, None, CodeNamePair]
if _line_type is None:
line_type = None
elif isinstance(_line_type, Unset):
line_type = UNSET
else:
line_type = CodeNamePair.from_dict(_line_type)
_prices = d.pop("prices", UNSET)
prices: Union[Unset, None, PostOrderLinePrices]
if _prices is None:
prices = None
elif isinstance(_prices, Unset):
prices = UNSET
else:
prices = PostOrderLinePrices.from_dict(_prices)
customer_article_number = d.pop("customerArticleNumber", UNSET)
warehouse_instruction = d.pop("warehouseInstruction", UNSET)
external_id = d.pop("externalId", UNSET)
_article_item_status = d.pop("articleItemStatus", UNSET)
article_item_status: Union[Unset, None, CodeNamePair]
if _article_item_status is None:
article_item_status = None
elif isinstance(_article_item_status, Unset):
article_item_status = UNSET
else:
article_item_status = CodeNamePair.from_dict(_article_item_status)
_line_free_values = d.pop("lineFreeValues", UNSET)
line_free_values: Union[Unset, None, PostOrderLineFreeValues]
if _line_free_values is None:
line_free_values = None
elif isinstance(_line_free_values, Unset):
line_free_values = UNSET
else:
line_free_values = PostOrderLineFreeValues.from_dict(_line_free_values)
post_order_line = cls(
row_number=row_number,
article_number=article_number,
number_of_items=number_of_items,
comment=comment,
should_be_picked=should_be_picked,
serial_number=serial_number,
line_total_customs_value=line_total_customs_value,
batch_number=batch_number,
line_type=line_type,
prices=prices,
customer_article_number=customer_article_number,
warehouse_instruction=warehouse_instruction,
external_id=external_id,
article_item_status=article_item_status,
line_free_values=line_free_values,
)
return post_order_line
| Undefined-Stories-AB/ongoing_wms_rest_api_client | ongoing_wms_rest_api_client/models/post_order_line.py | post_order_line.py | py | 7,972 | python | en | code | 1 | github-code | 90 |
73879761898 | import csv
import random
CostMed = []
DistMed = []
RateMed = []
RecMed = []
def fitness(cost,distance,rating):
return 0.2*float(cost)+0.4*distance+0.4*float(rating)
with open(r"assets/Hospital_Data.csv","r") as file:
readdata=csv.reader(file)
data = list(readdata)
data.pop(0)
costdata = sorted(data,key=lambda x: x[1])
distdata = sorted(data,key=lambda x: random.randint(1,101))
ratedata = sorted(data,key=lambda x: x[4])
recdata = sorted(data,key=lambda x: fitness(x[1],random.randint(1,101),x[4]))
for i in costdata[1:]:
CostMed.append(f"""Hospital(
hospName: '{i[0]}',
hospPrice: '{i[1]}-{i[3]}',
stars: {i[4]},
image: 'images/hosp.jpg',
),\n""")
for i in distdata[1:]:
DistMed.append(f"""Hospital(
hospName: '{i[0]}',
hospPrice: '{i[1]}-{i[3]}',
stars: {i[4]},
image: 'images/hosp.jpg',
),\n""")
for i in ratedata[1:]:
RateMed.append(f"""Hospital(
hospName: '{i[0]}',
hospPrice: '{i[1]}-{i[3]}',
stars: {i[4]},
image: 'images/hosp.jpg',
),\n""")
for i in recdata[1:]:
RecMed.append(f"""Hospital(
hospName: '{i[0]}',
hospPrice: '{i[1]}-{i[3]}',
stars: {i[4]},
image: 'images/hosp.jpg',
),\n""")
with open(r"lib/data.dart",'a') as file:
file.write("List<Hospital> costhosp = [\n")
for i in CostMed:
file.write(i)
file.write("];\n")
file.write("List<Hospital> disthosp = [\n")
for i in DistMed:
file.write(i)
file.write("];\n")
file.write("List<Hospital> ratehosp = [\n")
for i in RateMed:
file.write(i)
file.write("];\n")
file.write("List<Hospital> rechosp = [\n")
for i in RecMed:
file.write(i)
file.write("];\n") | AdvaySanketi/MedIQal | assets/datatime.py | datatime.py | py | 1,685 | python | en | code | 0 | github-code | 90 |
5865595937 | import dataclasses
import uuid
import typing
import io
import struct
import cbor2
import json
import base64
import binascii
import hashlib
import certvalidator
import datetime
import cryptography.hazmat.primitives.asymmetric.rsa
import cryptography.hazmat.primitives.asymmetric.padding
import cryptography.hazmat.primitives.asymmetric.ec
import cryptography.hazmat.primitives.serialization
import cryptography.x509
import cryptography.exceptions
from .errors import WebAuthnError
from . import utils, types, data, metadata
@dataclasses.dataclass
class AttestedCredentialData:
aaguid: uuid.UUID
credential_id: bytes
public_key_alg: int
public_key: typing.Union[
cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePublicKey,
cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey,
]
@dataclasses.dataclass
class AuthenticatorData:
rp_hash: bytes
user_presence: bool
user_verification: bool
sign_count: int
attested_data: typing.Optional[AttestedCredentialData]
extensions: typing.Optional[dict]
data_bytes: bytes
@classmethod
def from_bytes(cls, data_bytes: bytes):
data = io.BytesIO(data_bytes)
try:
rp_hash, flags, sign_count = struct.unpack("!32sBI", data.read(37))
except struct.error:
raise WebAuthnError("Invalid data")
up = bool(flags & 0b00000001)
uv = bool(flags & 0b00000100)
at = bool(flags & 0b01000000)
ed = bool(flags & 0b10000000)
if at:
try:
aaguid, cred_id_len = struct.unpack("!16sH", data.read(18))
except struct.error:
raise WebAuthnError("Invalid data")
aaguid = uuid.UUID(bytes=aaguid)
cred_id = data.read(cred_id_len)
try:
pubkey = cbor2.CBORDecoder(data).decode()
except (IndexError, cbor2.CBORDecodeError):
raise WebAuthnError("Invalid data")
pubkey, alg = utils.load_cose_key(pubkey)
attested_data = AttestedCredentialData(
aaguid=aaguid,
credential_id=cred_id,
public_key_alg=alg,
public_key=pubkey
)
else:
attested_data = None
if ed:
try:
extensions = cbor2.CBORDecoder(data).decode()
except (IndexError, cbor2.CBORDecodeError):
raise WebAuthnError("Invalid data")
else:
extensions = None
return cls(
rp_hash=rp_hash,
user_presence=up,
user_verification=uv,
sign_count=sign_count,
attested_data=attested_data,
extensions=extensions,
data_bytes=data_bytes
)
def verify_attestation(
attestation_statement: dict, authenticator_data: AuthenticatorData, client_data_hash: bytes,
fido_metadata: metadata.FIDOMetadata
):
fmt = attestation_statement.get("fmt", None)
if not fmt:
raise WebAuthnError("Invalid data")
attestation_statement = attestation_statement.get("attStmt", None)
if attestation_statement is None:
raise WebAuthnError("Invalid data")
if not authenticator_data.attested_data:
raise WebAuthnError("Invalid data")
signed_data = authenticator_data.data_bytes + client_data_hash
if fmt == types.AttestationMode.Packed.value:
alg = attestation_statement.get("alg", None)
if alg not in data.SUPPORTED_ALGORITHMS:
raise WebAuthnError("Invalid data")
sig = attestation_statement.get("sig", None)
if not sig:
raise WebAuthnError("Invalid data")
x5c = attestation_statement.get("x5c", None)
if x5c:
if len(x5c) < 1:
raise WebAuthnError("Invalid data")
try:
certs = [cryptography.x509.load_der_x509_certificate(c) for c in x5c]
except ValueError:
raise WebAuthnError("Invalid data")
except cryptography.exceptions.UnsupportedAlgorithm:
raise WebAuthnError("Unsupported authenticator")
out_certs = list(certs)
attestation_cert: cryptography.x509.Certificate = certs.pop(0)
try:
utils.verify_signature(attestation_cert.public_key(), signed_data, sig, alg)
except cryptography.exceptions.InvalidSignature:
raise WebAuthnError("Verification failed")
try:
if attestation_cert.version != cryptography.x509.Version.v3:
raise WebAuthnError("Invalid data")
except cryptography.x509.InvalidVersion:
raise WebAuthnError("Invalid data")
subject: cryptography.x509.Name = attestation_cert.subject
subject_cc = subject.get_attributes_for_oid(cryptography.x509.NameOID.COUNTRY_NAME)
subject_o = subject.get_attributes_for_oid(cryptography.x509.NameOID.ORGANIZATION_NAME)
subject_ou = subject.get_attributes_for_oid(cryptography.x509.NameOID.ORGANIZATIONAL_UNIT_NAME)
subject_cn = subject.get_attributes_for_oid(cryptography.x509.NameOID.COMMON_NAME)
if len(subject_cc) != 1 or len(subject_cc[0].value) != 2:
raise WebAuthnError("Verification failed")
if len(subject_o) != 1:
raise WebAuthnError("Verification failed")
if len(subject_ou) != 1 or subject_ou[0].value != "Authenticator Attestation":
raise WebAuthnError("Verification failed")
if len(subject_cn) != 1:
raise WebAuthnError("Verification failed")
try:
extensions = attestation_cert.extensions
except (cryptography.x509.DuplicateExtension, cryptography.x509.UnsupportedGeneralNameType):
raise WebAuthnError("Invalid data")
try:
basic_constraints = extensions.get_extension_for_oid(
cryptography.x509.ExtensionOID.BASIC_CONSTRAINTS
)
if basic_constraints.value.ca:
raise WebAuthnError("Verification failed")
except cryptography.x509.ExtensionNotFound:
raise WebAuthnError("Verification failed")
try:
aaguid_ext = extensions.get_extension_for_oid(
cryptography.x509.ObjectIdentifier("1.3.6.1.4.1.45724.1.1.4")
)
if aaguid_ext.critical:
raise WebAuthnError("Verification failed")
aaguid = aaguid_ext.value.value
if not aaguid.startswith(b"\x04\x10"):
raise WebAuthnError("Invalid data")
aaguid = aaguid[2:]
if len(aaguid) != 16:
raise WebAuthnError("Invalid data")
aaguid = uuid.UUID(bytes=aaguid)
if aaguid != authenticator_data.attested_data.aaguid:
raise WebAuthnError("Verification failed")
except cryptography.x509.ExtensionNotFound:
pass
metadata_index = fido_metadata.aaguid_map.get(authenticator_data.attested_data.aaguid)
if metadata_index is not None:
metadata = fido_metadata.entries[metadata_index]
if metadata.protocol != "fido2":
raise WebAuthnError("Verification failed")
if metadata.is_revoked:
raise WebAuthnError("Verification failed")
validator_context = certvalidator.ValidationContext(
trust_roots=[c.public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.DER
) for c in metadata.root_cas], allow_fetching=True
)
cert_validator = certvalidator.CertificateValidator(
end_entity_cert=x5c[0],
intermediate_certs=x5c[1:] if len(x5c) > 1 else None,
validation_context=validator_context
)
try:
path = cert_validator.validate_usage(key_usage=set())
except certvalidator.errors.ValidationError as e:
raise WebAuthnError("Verification failed")
path = [cryptography.x509.load_der_x509_certificate(c.dump()) for c in list(path)]
return types.AttestationResult(
type=types.AttestationType.AttestationCA,
mode=types.AttestationMode.Packed,
root_ca=path[0],
cert_chain=path[1:],
safety_net_cts=None,
fido_metadata=metadata,
)
else:
return types.AttestationResult(
type=types.AttestationType.Basic,
mode=types.AttestationMode.Packed,
root_ca=None,
cert_chain=out_certs,
safety_net_cts=None,
fido_metadata=None,
)
else:
if alg != authenticator_data.attested_data.public_key_alg:
raise WebAuthnError("Verification failed")
try:
utils.verify_signature(authenticator_data.attested_data.public_key, signed_data, sig, alg)
except cryptography.exceptions.InvalidSignature:
raise WebAuthnError("Verification failed")
return types.AttestationResult(
type=types.AttestationType.Self,
mode=types.AttestationMode.Packed,
root_ca=None,
cert_chain=[],
safety_net_cts=None,
fido_metadata=None,
)
elif fmt == types.AttestationMode.AndroidSafetynet.value:
try:
response: bytes = attestation_statement.get("response", b"\xFF")
except UnicodeDecodeError:
raise WebAuthnError("Invalid data")
if response.count(b".") != 2:
raise WebAuthnError("Invalid data")
jwt_sig_data, signature_str = response.rsplit(b".", 1)
header_str, body_str = jwt_sig_data.split(b".", 1)
try:
header = json.loads(base64.urlsafe_b64decode(header_str + b"==").decode())
body = json.loads(base64.urlsafe_b64decode(body_str + b"==").decode())
signature = base64.urlsafe_b64decode(signature_str + b"==")
certificates_bytes = [base64.b64decode(c) for c in header.get("x5c", [])]
certificates = [cryptography.x509.load_der_x509_certificate(c) for c in certificates_bytes]
except (ValueError, binascii.Error, json.JSONDecodeError):
raise WebAuthnError("Invalid data")
except cryptography.exceptions.UnsupportedAlgorithm:
raise WebAuthnError("Unsupported authenticator")
if len(certificates) < 1:
raise WebAuthnError("Invalid data")
cert_validator = certvalidator.CertificateValidator(
end_entity_cert=certificates_bytes[0],
intermediate_certs=certificates_bytes[1:] if len(certificates_bytes) > 1 else None
)
try:
path = list(cert_validator.validate_tls("attest.android.com"))
except certvalidator.errors.ValidationError:
raise WebAuthnError("Verification failed")
root_ca = cryptography.x509.load_der_x509_certificate(path.pop(0).dump())
cert_chain = [cryptography.x509.load_der_x509_certificate(c.dump()) for c in path]
alg_id = utils.jwt_alg_id_to_cose(header.get("alg"))
if alg_id is None:
raise WebAuthnError("Unsupported authenticator")
try:
utils.verify_signature(certificates[0].public_key(), jwt_sig_data, signature, alg_id)
except cryptography.exceptions.InvalidSignature:
raise WebAuthnError("Verification failed")
signed_data_b64 = base64.b64encode(hashlib.sha256(signed_data).digest()).decode()
if body.get("nonce") != signed_data_b64:
raise WebAuthnError("Verification failed")
now = datetime.datetime.utcnow()
timestamp = datetime.datetime.fromtimestamp(body.get("timestampMs", 0) / 1000.0)
if now - timestamp > datetime.timedelta(minutes=5):
raise WebAuthnError("Verification failed")
if not body.get("basicIntegrity", False):
raise WebAuthnError("Unsupported authenticator")
return types.AttestationResult(
type=types.AttestationType.Basic,
mode=types.AttestationMode.AndroidSafetynet,
root_ca=root_ca,
cert_chain=cert_chain,
safety_net_cts=body.get("ctsProfileMatch", False),
fido_metadata=None
)
elif fmt == types.AttestationMode.FIDOU2F.value:
x5c = attestation_statement.get("x5c", [])
if len(x5c) != 1:
raise WebAuthnError("Verification failed")
sig = attestation_statement.get("sig", None)
if not sig:
raise WebAuthnError("Verification failed")
try:
cert = cryptography.x509.load_der_x509_certificate(x5c[0])
except ValueError:
raise WebAuthnError("Invalid data")
except cryptography.exceptions.UnsupportedAlgorithm:
raise WebAuthnError("Unsupported authenticator")
public_key = cert.public_key()
if not isinstance(public_key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePublicKey):
raise WebAuthnError("Verification failed")
if public_key.curve.name != "secp256r1":
raise WebAuthnError("Verification failed")
cred_key_bytes = authenticator_data.attested_data.public_key.public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.X962,
cryptography.hazmat.primitives.serialization.PublicFormat.UncompressedPoint
)
verification_data = bytearray([0x00])
verification_data.extend(authenticator_data.rp_hash)
verification_data.extend(client_data_hash)
verification_data.extend(authenticator_data.attested_data.credential_id)
verification_data.extend(cred_key_bytes)
try:
public_key.verify(sig, verification_data, cryptography.hazmat.primitives.asymmetric.ec.ECDSA(
cryptography.hazmat.primitives.hashes.SHA256()
))
except cryptography.exceptions.InvalidSignature:
raise WebAuthnError("Verification failed")
fingerprint = binascii.hexlify(
cryptography.x509.SubjectKeyIdentifier.from_public_key(cert.public_key()).digest
).decode()
metadata_index = fido_metadata.cki_map.get(fingerprint)
if metadata_index is not None:
metadata = fido_metadata.entries[metadata_index]
if metadata.protocol != "u2f":
raise WebAuthnError("Verification failed")
if metadata.is_revoked:
raise WebAuthnError("Verification failed")
validator_context = certvalidator.ValidationContext(
trust_roots=[c.public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.DER
) for c in metadata.root_cas], allow_fetching=True
)
cert_validator = certvalidator.CertificateValidator(
end_entity_cert=x5c[0],
validation_context=validator_context,
)
try:
path = cert_validator.validate_usage(key_usage=set())
except certvalidator.errors.ValidationError:
raise WebAuthnError("Verification failed")
return types.AttestationResult(
type=types.AttestationType.AttestationCA,
mode=types.AttestationMode.FIDOU2F,
root_ca=cryptography.x509.load_der_x509_certificate(path[0].dump()),
cert_chain=[cert],
safety_net_cts=None,
fido_metadata=metadata,
)
return types.AttestationResult(
type=types.AttestationType.Basic,
mode=types.AttestationMode.FIDOU2F,
root_ca=None,
cert_chain=[cert],
safety_net_cts=None,
fido_metadata=None
)
elif fmt == types.AttestationMode.Apple.value:
x5c = attestation_statement.get("x5c", [])
if len(x5c) < 1:
raise WebAuthnError("Invalid data")
validator_context = certvalidator.ValidationContext(
trust_roots=[data.APPLE_WEBAUTHN_ROOT]
)
cert_validator = certvalidator.CertificateValidator(
end_entity_cert=x5c[0],
intermediate_certs=x5c[1:] if len(x5c) > 1 else None,
validation_context=validator_context
)
try:
certs = [cryptography.x509.load_der_x509_certificate(c) for c in x5c]
except ValueError:
raise WebAuthnError("Invalid data")
except cryptography.exceptions.UnsupportedAlgorithm:
raise WebAuthnError("Unsupported authenticator")
attestation_cert: cryptography.x509.Certificate = certs.pop(0)
nonce = hashlib.sha256(signed_data).digest()
try:
if attestation_cert.version != cryptography.x509.Version.v3:
raise WebAuthnError("Invalid data")
except cryptography.x509.InvalidVersion:
raise WebAuthnError("Invalid data")
try:
extensions = attestation_cert.extensions
except (cryptography.x509.DuplicateExtension, cryptography.x509.UnsupportedGeneralNameType):
raise WebAuthnError("Invalid data")
try:
nonce_ext = extensions.get_extension_for_oid(
cryptography.x509.ObjectIdentifier("1.2.840.113635.100.8.2")
)
except cryptography.x509.ExtensionNotFound:
raise WebAuthnError("Invalid data")
cert_nonce = nonce_ext.value.value
asn1_prefix = b"\x30\x24\xa1\x22\x04\x20"
if not cert_nonce.startswith(asn1_prefix):
raise WebAuthnError("Invalid data")
if cert_nonce[len(asn1_prefix):] != nonce:
raise WebAuthnError("Verification failed")
if not utils.key_equal(authenticator_data.attested_data.public_key, attestation_cert.public_key()):
raise WebAuthnError("Verification failed")
try:
path = cert_validator.validate_usage(
key_usage={"digital_signature"}
)
except certvalidator.errors.ValidationError:
raise WebAuthnError("Verification failed")
path = [cryptography.x509.load_der_x509_certificate(c.dump()) for c in list(path)]
return types.AttestationResult(
type=types.AttestationType.AnonymizationCA,
mode=types.AttestationMode.Apple,
root_ca=path[0],
cert_chain=path[1:],
safety_net_cts=None,
fido_metadata=None
)
elif fmt == types.AttestationMode.NoneAttestation.value:
return types.AttestationResult(
type=types.AttestationType.NoneAttestation,
mode=types.AttestationMode.NoneAttestation,
root_ca=None,
cert_chain=[],
safety_net_cts=None,
fido_metadata=None
)
else:
raise WebAuthnError("Unsupported authenticator")
| AS207960/python-webauthn | src/webauthn/attestation.py | attestation.py | py | 19,606 | python | en | code | 4 | github-code | 90 |
3492060911 | import pandas as pd
import numpy as np
df1 = pd.read_csv("rvtoolcom.csv")
df2 = pd.read_csv("limpa_usa.csv")
df3 = pd.read_csv("limpa_canada.csv")
result = df1.append(df2).append(df3)
#df = df.drop_duplicates(subset='favorite_color', keep="first")
result = result.drop_duplicates(subset='address', keep="first")
result.to_csv('rv dealers.csv', index=False)
| polltter/RV | Proxy+Tool/junta.py | junta.py | py | 362 | python | en | code | 0 | github-code | 90 |
19514626195 | from psycopg2 import sql
import bcrypt
def query_select_fields_from_table(table: str, columns: list = None) -> sql.Composed:
"""Returns an executable SQL SELECT statement. At execution, it fetches the data
from the selected column(s) from a table.
Parameters:
table (str): Table name
columns (list): Column names
Returns:
Composed SQL object
"""
if columns:
if type(columns) is str:
columns = [columns]
elif type(columns) is not list:
columns = list(columns)
return sql.SQL('select {columns} from {table} ').format(table=sql.Identifier(table),
columns=sql.SQL(', ').join(map(sql.Identifier, columns)))
return sql.SQL('select * from {table} ').format(table=sql.Identifier(table))
def add_order_by_to_query(columns: list, reverse: bool = True) -> sql.Composable:
"""Returns a composable SQL ORDER BY clause based on given column(s).
Default order DESC can be modified with reverse
Parameters:
columns (list): Column name(s)
reverse (bool): True for descending, False for ascending
Returns:
Composable SQL object
"""
if type(columns) is str:
columns = [columns]
elif type(columns) is not list:
columns = list(columns)
if reverse:
return sql.SQL("order by {columns} desc ").format(columns=sql.SQL(', ')
.join(map(sql.Identifier, columns)))
return sql.SQL("order by {columns} asc ").format(columns=sql.SQL(', ')
.join(map(sql.Identifier, columns)))
def add_order_by_smt_desc_or_args(arguments=None):
"""add 'order by submission_time descending' to a query by default,
arguments should be in dict format like {'sort_by': '<column_name>','order': '<asc/desc>'}"""
if arguments:
reverse = True if arguments.get('order').lower() == 'desc' else False
order_by = add_order_by_to_query(arguments.get('sort_by'), reverse)
else:
order_by = add_order_by_to_query('submission_time', reverse=True)
return order_by
def add_limit_to_query(limit: int) -> sql.Composable:
"""Returns a composable SQL LIMIT clause with the given value
Parameters:
limit (int):
Returns:
Composable SQL object
"""
return sql.SQL("limit {limit} ").format(limit=sql.Literal(limit))
def add_where_to_query(identifier, operator, value):
return sql.SQL("where {first_operand} {operator} {second_operand} ").format(
first_operand=sql.Identifier(identifier),
operator=sql.SQL(operator),
second_operand=sql.Literal(value)
)
def add_and_to_query(identifier, operator, value):
return sql.SQL("and {first_operand} {operator} {second_operand} ").format(
first_operand=sql.Identifier(identifier),
operator=sql.SQL(operator),
second_operand=sql.Literal(value)
)
def query_delete_from_table_by_identifier(table, value, identifier, operator='='):
return sql.SQL("delete from {table} ").format(table=sql.Identifier(table)) + \
add_where_to_query(identifier, operator, value)
def add_inner_join_to_query(table, first_identifier, second_identifier):
return sql.SQL("inner JOIN {table} ON {first_identifier} = {second_identifier} ")\
.format(table=sql.Identifier(table),
first_identifier=sql.Identifier(first_identifier),
second_identifier=sql.Identifier(second_identifier))
def query_insert(table, columns, values):
if type(columns) is str:
columns = [columns]
values = [values]
elif type(columns) is not list:
columns = list(columns)
values = list(values)
return sql.SQL("insert into {table}({columns}) values({values}) ").format(
table=sql.Identifier(table),
columns=sql.SQL(', ').join(map(sql.Identifier, columns)),
values=sql.SQL(', ').join(map(sql.Literal, values))
)
def query_update(table, key_value_dict):
query = sql.SQL('update {table} set ').format(table=sql.Identifier(table))
columns = []
for key in key_value_dict:
if key != 'id':
columns.append(sql.SQL('{column} = {value} ').format(column=sql.Identifier(key),
value=sql.Literal(key_value_dict[key])))
return query+sql.SQL(',').join(columns)
def get_records_by_search(word, sort_by=None, order=None):
query ="""
select q.id,a.id as a_id,title,
q.message ,a.message as a_message,
q.view_number,
q.vote_number,a.vote_number as a_vote_number,
q.submission_time,a.submission_time as a_submission_time
from question as q
left join
(select id,question_id,message,vote_number,submission_time from answer
where message ilike '%{word}%') as a on q.id=a.question_id
where title ilike '%{word}%'
or q.message ilike '%{word}%'
or a.message ilike '%{word}%'
"""
if sort_by:
order = 'asc' if order.lower() == 'asc' else 'desc'
null_handler = "nulls first" if order == "asc" else "nulls last"
query += """ order by {sort_by} {order} {null_handler}""".format(sort_by=sort_by,
order=order,
null_handler=null_handler)
return sql.SQL(query).format(word=sql.SQL(word))
# hashing
def hash_password(plain_text_password):
hashed_bytes = bcrypt.hashpw(plain_text_password.encode('utf-8'), bcrypt.gensalt())
return hashed_bytes.decode('utf-8')
def verify_password(plain_text_password, hashed_password):
hashed_bytes_password = hashed_password.encode('utf-8')
return bcrypt.checkpw(plain_text_password.encode('utf-8'), hashed_bytes_password)
def get_tag_page_data():
query = """select name, count(tag_id) from question_tag
right join tag t on question_tag.tag_id = t.id
group by name"""
return sql.SQL(query)
def modify_reputation(value, id):
query = """UPDATE users
SET reputation_level = reputation_level + {value}
WHERE user_id = {id}"""
return sql.SQL(query).format(value=sql.Literal(value), id=sql.Literal(id))
def accept_answer(status, answer_id):
query = '''
UPDATE answer
SET accepted = {status}
WHERE id = {answer_id}'''
return sql.SQL(query).format(status=sql.Literal(status),
answer_id=sql.Literal(answer_id))
def gain_view_number(question_id):
query = '''
UPDATE question
SET view_number = view_number + 1
WHERE id = {question_id}'''
return sql.SQL(query).format(question_id=sql.Literal(question_id))
| CodecoolGlobal/ask-mate-3-python-mllorand | util.py | util.py | py | 7,013 | python | en | code | 0 | github-code | 90 |
32619910768 | # Eliza Knapp, Rachel Xiao, Thomas Yu
# SoftDev
# K05 -- Print A SoftDev Student's Name (Amalgamated)
# 2021-09-27
'''
Summary:
- How to approach the list of names
- Read in names from a text file instead of having list created with names
- Allows the lists to be changed easily
- One text file per period
- This had now been changed to precoded lists stored in a dictionary
- Any exceptions to watch out for
- The file not existing in the directory
- The file being empty
- Solve these issues with try and except
- Would have to print outputs separately (to check for each list individually)
- Exceptions no longer are an issue (aren't reading lines from a text file)
- How to choose the name
- Shuffle the list or randint to generate random index within bounds
Discoveries:
- Try catch blocks in python are try: except:
- without line.strip() it will keep the \n with the name, creating a new line in the output
- Python dictionaries and how to access the keys and values within the dictionary
Questions:
- How would we make it work for an infinite number of periods?
Comments:
- Could still possibly use shuffle as each key in the dictionary is still a list.
- However randint would be more effective as the list grows in size.
'''
import random
def generate_name():
#NAMES = Python dictionary
NAMES = {
"pd1": ["Reng geng Zheng", "Edwin Zheng", "Angela Zhang", "Owen Yaggy",
"Oscar Wang", "Lucas Tom wong", "Tami Takada", "Rayat Roy",
"Tina Nguyen", "Julia Nelson", "Naomi Naranjo", "Justin Morrill",
"Iwan Mijackia", "Gavin Mcginley", "Ishraq Mahid", "Deven Maheshwari",
"Michelle Lo", "Christopher Liu", "Zhao yu Lin", "Lucas Lee", "Ivan Lam",
"Ella Krechmer", "aryaman Goenka", "Sean Ging", "Haotian Gan", "Theodore Fahey",
"Sadid Ethun", "Aaron Contreras", "Shyne Choi", "William Chen", "Emma Buller",
"Shriya Anand", "Alejanro Alonso", "Tomas Acuna"],
"pd2": ["Justin Zou", "Mark Zhu", "Han Zhang", "Annabel Zhang", "Thomas Yu",
"Raymond Yeung", "Jessie Xie", "Rachel Xiao", "Yuqing Wu",
"Jonathan Wu", "Liesel Wong", "Ryan Wang", "Daniel Sooknanan",
"Roshani Shrestha", "Shadman Rakib", "Austin Ngan", "Cameron Nelson",
"Sophie Liu", "Qina Liu", "Andy Lin", "Yaying Liang Li",
"Josephine Lee", "Joshua Kloepfer", "Andrew Juang", "Hebe Huang"
"Eric Guo", "Patrick Ging", "Wen hao Dong", "David Chong", "Yoonah Chang",
"Kevin Cao", "Michael Borczuk", "Noakai Aronesty", "Alif Abdullah"]
}
# Generate random index within bounds for each list
index1 = random.randint(0, len(NAMES["pd1"])-1)
index2 = random.randint(0, len(NAMES["pd2"])-1)
# Print names corresponding to indices generated earlier
print("pd1: " + (NAMES["pd1"])[index1])
print("pd2: " + (NAMES["pd2"])[index2])
generate_name()
| thomasyu21/Workshop | 05_py/printNameAmalgamate.py | printNameAmalgamate.py | py | 2,940 | python | en | code | 0 | github-code | 90 |
72273825896 | import json
from channels.generic.websocket import AsyncWebsocketConsumer
from asgiref.sync import sync_to_async
from django.contrib.auth.models import Permission
from chatapp.models import Room,Message,User
from ventes.models import Comment, Vente
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.room_name = self.scope['url_route']['kwargs']['room_slug']
self.roomGroupName = 'chat_%s' % self.room_name
await self.channel_layer.group_add(
self.roomGroupName,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
await self.channel_layer.group_discard(
self.roomGroupName,
self.channel_name
)
async def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json["message"]
username = text_data_json["username"]
room_name = text_data_json["room_name"]
await self.save_message(message, username, room_name)
await self.channel_layer.group_send(
self.roomGroupName, {
"type": "sendMessage",
"message": message,
"username": username,
"room_name": room_name,
}
)
async def sendMessage(self, event):
message = event["message"]
username = event["username"]
await self.send(text_data=json.dumps({"message": message, "username": username}))
@sync_to_async
def save_message(self, message, username, room_name):
print(username,room_name,"----------------------")
user=User.objects.get(username=username)
room=Room.objects.get(name=room_name)
Message.objects.create(user=user,room=room,content=message)
class CommentConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.vente_id = self.scope['url_route']['kwargs']['vente_id']
self.room_group_name = f'comments_{self.vente_id}'
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
async def receive(self, text_data):
data = json.loads(text_data)
comment = data['comment']
user_id = self.scope['user'].id
await self.save_comment(comment, user_id)
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'send_comment',
'comment': comment,
'user_id': user_id
}
)
async def send_comment(self, event):
comment = event['comment']
user_id = event['user_id']
await self.send(text_data=json.dumps({
'comment': comment,
'user_id': user_id
}))
@sync_to_async
def save_comment(self, comment, user_id):
vente = Vente.objects.get(id=self.vente_id)
user = User.objects.get(id=user_id)
permission = Permission.objects.get(codename='change_comment')
if vente.vendeur == user or user.has_perm(permission):
Comment.objects.create(
vente_id=self.vente_id,
user_id=user_id,
content=comment
)
| vanelleNgadjui/CaPotage | core/consumers.py | consumers.py | py | 3,511 | python | en | code | 0 | github-code | 90 |
23557134053 | # 练习代码
# coding=utf-8
import requests
from bs4 import BeautifulSoup
import pickle
#
# url = 'http://www.mzitu.com/26685'
# header = {
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '
# '(KHTML, like Gecko) Chrome/50.0.2661.102 UBrowser/6.1.2107.204 Safari/537.36',
# 'Referer':'https://www.mzitu.com/'
# }
# # 反爬机制1:Refer
# # 图片网站有反爬机制,是识别爬虫的"Refer”字段,这个字段是用来判断请求的来源
# # 也就是通过headr来判断的,所以我们要模拟正常从主页进入,要在hearder中加入"Referer"键,值为主页网址
# #
#
#
# html = requests.get(url, headers=header)
# soup = BeautifulSoup(html.text, 'html.parser')
# # print(html.text)
# #
# # # # 最大页数在span标签中的第10个
# pic_max = soup.find_all('span')[9].text
# # print(pic_max)
# title = soup.find('h2',class_='main-title').text
# #
# # # # 输出每个图片页面的地址
# for i in range(1, int(pic_max) + 1):
# href = url + '/' + str(i)
# html = requests.get(href, headers=header)
# mess = BeautifulSoup(html.text, "html.parser")
# #虽然html.text也可以显示源码,但是BeautifulSoup(html.text,"html.parser")更快,文档容错能力强
#
# pic_url = mess.find('img',alt = title) #获取img标签,其中包含图片地址
#
# html = requests.get(pic_url['src'],headers = header)
# file_name = pic_url['src'].split(r'/')[-1]
# print(file_name)
# # print(html.content)
# f = open(f"E:\plus_len\练习\爬虫\{file_name}", 'wb')
# f.write(html.content)
# f.close()
# 完整代码
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import os
import time
all_url = 'https://www.mzitu.com'
# http请求头
Hostreferer = {
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
'Referer': 'https://www.mzitu.com/'
}
# 此请求头Referer破解盗图链接
# Picreferer = {
# 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
# 'Referer': 'http://i.meizitu.net'
# }
Picreferer = {
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
'Referer': 'https://www.mzitu.com/'
}
# 对mzitu主页all_url发起请求,将返回的HTML数据保存,便于解析
start_html = requests.get(all_url, headers=Hostreferer)
# Linux保存地址
# path = '/home/Nick/Desktop/mzitu/'
# Windows保存地址
path = r'E:\plus_len\练习\爬虫\资源放置\\' #要在后面加上\\来表示在资源放置中,而不是与资源放置目录同级
# E:\plus_len\练习\爬虫\资源放置 +标题名 创建
# E:\plus_len\练习\爬虫\资源放置\标题名 创建
# 获取最大页数
soup = BeautifulSoup(start_html.text, "html.parser")
page = soup.find_all('a', class_='page-numbers')
print(page)
max_page = page[-2].text #-1是下一页
# same_url = 'http://www.mzitu.com/page/' # 主页默认最新图片
# 获取每一类MM的网址
same_url = 'https://www.mzitu.com/mm/page/' # 也可以指定《qingchun MM系列》
for n in range(1, int(max_page) + 1):
# for n in range(1,2):
# 拼接当前类MM的所有url
# 修改?拼接转换页
ul = same_url + str(n)
# #
# # 分别对当前类每一页第一层url发起请求
#
# # 修改?分别对当前页url发起请求
start_html = requests.get(ul, headers=Hostreferer)
#
# # # 提取所有MM的标题
soup = BeautifulSoup(start_html.text, "html.parser")
all_a = soup.find('div', class_='postlist').find_all('a', target='_blank')
print(all_a)
# #
# # 遍历所有MM的标题
for a in all_a:
# 提取标题文本,作为文件夹名称
title = a.get_text()
# print(a)
# print(a.get_text())
# 有问题点 具体分析吧
# 问题点:get_text如果返回了相同的文字的话,就会用后面的覆盖前面相同的
# 最终结果就是一串文字而不是相同的几串
if (title != ''):
# print(path)
print("准备扒取:" + title)
#
# # windows不能创建带?的目录,添加判断逻辑
# if (os.path.exists(path + title.strip().replace('?', ''))):
if (os.path.exists(path + title.strip())):
print('目录已存在')
flag = 1
else:
# os.makedirs(path + title.strip().replace('?', ''))
os.makedirs(path + title.strip())
flag = 0
# # 切换到上一步创建的目录
# 切换到刚刚创建的目录中要加入图片了
# os.chdir(path + title.strip().replace('?', ''))
os.chdir(path + title.strip())
#
# 提取第一层每一个MM的url,并发起请求
href = a['href']
html = requests.get(href, headers=Hostreferer)
mess = BeautifulSoup(html.text, "html.parser")
#
# # 获取第二层最大页数
# 进入图片网址的那一层,里层
pic_max = mess.find_all('span')
pic_max = pic_max[9].text
# print(pic_max)
if (flag == 1 and len(os.listdir(path + title.strip().replace('?', ''))) >= int(pic_max)):
# 必须满足文件目录存在并且文件目录中的图片全部下载完毕才可以跳过,虽然文件中有图片,但是
# 没有下载完的话要从头下载进行现在的数据对之前没下载完的数据进行覆盖,虽然影响效率
# 但是确保准确性,有待提升(从已经下载的数据后面在下载,相当于追加下载)
print('已经保存完毕,跳过')
continue
#
# 遍历第二层每张图片的url
# 如果没有下载完则执行这里
for num in range(1, int(pic_max) + 1):
# 拼接每张图片的url
pic = href + '/' + str(num)
# # 发起请求
html = requests.get(pic, headers=Hostreferer)
mess = BeautifulSoup(html.text, "html.parser")
pic_url = mess.find('img', alt=title)
print(pic_url['src'])
# print(pic_url)
html = requests.get(pic_url['src'], headers=Picreferer)
#
# # 提取图片名字
file_name = pic_url['src'].split(r'/')[-1]
# split将字符串https://i3.mmzztt.com/2020/03/09a01.jpg转换成列表
#
# # 保存图片
f = open(file_name, 'wb')
f.write(html.content)
f.close()
# time.sleep(0.7)
print('完成')
print('第', n, '页完成')
| programday/crawler | 练习小例子/爬取妹子网.py | 爬取妹子网.py | py | 6,800 | python | zh | code | 6 | github-code | 90 |
30790482268 | # -*- coding: utf-8 -*-
# 因为是学习 python, 所以用了很多 print() 来观察
import os, shutil, glob
source_dir = "images/"
# 透过 os 模组来取得 / 目录相关状态
disk = os.statvfs("/")
print(disk)
# 目前可用空间计算
freespace = disk.f_bsize * disk.f_blocks
print(freespace)
# 透过 glob 模组来取得目前档案, 以 list 型态存放
pngfiles = glob.glob(source_dir+"*.png")
jpgfiles = glob.glob(source_dir+"*.jpg")
giffiles = glob.glob(source_dir+"*.gif")
allfiles = pngfiles + jpgfiles + giffiles
print(allfiles)
allfilesize = 0
# allfilesize 型态是 int, 所以用 str转型来显示
print("allfilesize: " + str(allfilesize))
# 使用 for 来计算目前档案大小
for f in allfiles:
allfilesize += os.path.getsize(f)
print("allfilesize: " + str(allfilesize))
# 如果所有档案大小 > 目前可用空间 显示空间不足, 离开程式
if allfilesize > freespace:
print("磁盘空间不足")
exit(1)
# 设定输出资料夹, 预设在该目录下的 output
target_dir = source_dir + "output"
print("target_dir: " + target_dir)
# 检查 target_dir 是否存在, 存在就离开
if os.path.exists(target_dir):
print("资料夹已存在")
exit(1)
# 建立 target_dir
os.mkdir(target_dir)
imageno = 0
for f in allfiles:
# split() 将字串根据指定字元切成 list
# 这边最主要要取出档案名称 filename
print("---- loop start ----")
dirname, filename = f.split('/')
print(dirname)
print(filename)
# 使用 split() 切出档案名称还有副档名
mainname, extname = filename.split('.')
print(mainname)
print(extname)
# 定义输出的档案名称在 target_dir 下, 以 imageno 为序号加副档名
targetfile = target_dir + '/' + str(imageno) + '.' + extname
print(targetfile)
# 使用 shutil 复制档案
shutil.copyfile(f, targetfile)
# 将 imageno 加1
imageno += 1
print("---- loop end ----") | LukaHuang/LearnPython | books/Python程式設計實務_博碩/chapter5/5-2.py | 5-2.py | py | 1,961 | python | zh | code | null | github-code | 90 |
39750084101 | import calendar
from io import StringIO
import boto3
import discord
from discord.ext import commands, tasks
import numpy as np
import pandas as pd
import datetime
import re
import asyncio
import os
target_guild_id = 730215239760740353
target_channel_id = 806691390372708362
main_embed = None
access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
bucket_name = os.environ["S3_BUCKET_NAME"]
s3client = boto3.client('s3', aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
class Checking(commands.Cog):
def __init__(self, client):
self.client = client
# self.cycle.start()
@commands.Cog.listener()
async def on_ready(self):
print('CHECKING READY.')
await self.base_embed()
# @tasks.loop(seconds=86400)
# async def cycle(self):
# try:
# await self.base_embed()
# except AttributeError:
# pass
async def base_embed(self):
points = fromBucket("dssdollars.csv")
top10 = points.sort_values('points', ascending=False)[:10]
top10string = "".join([f"{self.client.get_guild(target_guild_id).get_member(top10.iloc[row, 0]).display_name} - {top10.iloc[row, 1]}\n" for row in np.arange(len(top10))])
embed = discord.Embed(title="🤑 DSS DOLLASSSS 🤑",
description=f"**Click hyperlink for rewards**\nLast Updated: {datetime.datetime.now().replace(microsecond=0)}",
color=0x4e7a27, url="https://docs.google.com/spreadsheets"
"/d/1e3AyLUqBiZzejdhbBXw3HPL9S7jmf3P4mEE-15nfWrI/edit")
embed.add_field(name="Top 10 Ballers ⛹️️:", value=top10string, inline=False)
embed.add_field(name="Most Recent Checker's Balance:", value="N/A", inline=False)
embed.add_field(name="Earn!",
value="+1 Dollar for Every Minute in Call!\n2x Dollars if you have cam on :0\n0 - 0.5x Points "
"if you're AFK/Muted/Deafened :(\n+0.10 Dollars for reacting to exec announcements\n+0.25 "
"Dollars for just sending messages\nCulture Com/Exec might randomly give free points at events;)\n \nHappy Spending :)")
embed.set_footer(
text="React to this message to check your own balance!\nDon't spam when its unresponsive🤬, its just a bit slow🤧"
"\nMessage Elton when you wanna redeem prizes.\n")
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/457065146519388173/805905097979854898/unknown.png")
global main_embed
main_embed = await self.client.get_guild(target_guild_id).get_channel(target_channel_id).send(embed=embed)
for i in ['🤑', '💰', '💲', '💵', '💸', '🧧', '😎']:
await self.react(main_embed, i)
@staticmethod
async def react(message, emoji):
await message.add_reaction(emoji)
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
if not payload.member.bot and payload.message_id == main_embed.id:
dollar_thesaurus = np.random.choice(['BIG BUCKS', 'DSS Dollars', 'Units of Monetary Currency', 'Dollars of Cold Hard Cash', '$DSS Stonks📈'])
points = fromBucket("dssdollars.csv")
top10 = points.sort_values('points', ascending=False)[:10]
top10string = "".join([
f"{self.client.get_guild(target_guild_id).get_member(top10.iloc[row, 0]).display_name} - {top10.iloc[row, 1]}\n"
for row in np.arange(len(top10))])
embed = discord.Embed(title="🤑 DSS DOLLASSSS 🤑",
description=f"**Click hyperlink for rewards**\nLast Updated: {datetime.datetime.now().replace(microsecond=0)}",
color=0x4e7a27, url="https://docs.google.com/spreadsheets"
"/d/1e3AyLUqBiZzejdhbBXw3HPL9S7jmf3P4mEE-15nfWrI/edit")
embed.add_field(name="Top 10 Ballers ⛹️️:", value=top10string, inline=False)
if payload.member.id in list(points['id']):
embed.add_field(name=f"Most Recent Checker's Balance ({payload.member.display_name}):",
value=f"{points[points['id'] == payload.member.id].iloc[0, 1]} {dollar_thesaurus}",
inline=False)
else:
embed.add_field(name=f"Most Recent Checker's Balance ({payload.member.display_name}):",
value=f"You Don't Have Any Points Yet :(\nHang out in calls to get started :)",
inline=False)
embed.add_field(name="Earn!",
value="+1 Dollar for Every Minute in Call!\n2x Dollars if you have cam on :0\n0 - 0.5x Points "
"if you're AFK/Muted/Deafened :(\n+0.10 Dollars for reacting to exec announcements\n+0.25 "
"Dollars for just sending messages\nCulture Com/Exec might randomly give free points at events ;)\n \nHappy Spending :)")
embed.set_footer(text="React to this message to check your own balance!\nDon't spam when its unresponsive🤬, its just a bit slow🤧"
"\nMessage Elton when you wanna redeem prizes.\n")
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/457065146519388173/805905097979854898/unknown.png")
await main_embed.edit(embed=embed)
def fromBucket(key):
obj = s3client.get_object(Bucket=bucket_name, Key=key)
body = obj['Body']
csv_string = body.read().decode('utf-8')
data = pd.read_csv(StringIO(csv_string), index_col=0)
return data
def setup(client):
client.add_cog(Checking(client))
| chanelton/dssdiscordbot | cogs/checkingPoints.py | checkingPoints.py | py | 5,977 | python | en | code | 1 | github-code | 90 |
21484335652 | #Project Euler Number 25
#What is the first term in the Fibonacci sequence to contain 1000 digits?#
#David Etler
#22 NOV 2011
f1=1
f2=1
c=2
d=0
while d == 0:
i=f1+f2
f1=f2
f2=i
if len(str(i)) == 1000:
d=1
c+=1
print [c, len(str(i))]
| davidretler/Project-Euler-Solutions | python/p25.py | p25.py | py | 259 | python | en | code | 0 | github-code | 90 |
28066292217 | import pytest
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
driver = None
# from selenium.webdriver.firefox.service import Service
# Parsing switches to the python cmd line
def pytest_addoption(parser):
parser.addoption(
"--browser_name", action="store", default = "chrome"
)
@pytest.fixture(scope ="class")
# Use request to use the driver object without the need to use return
def setup(request):
# Request to use global driver, not the one declared in setup
global driver
# This cmd request.config.getOption will extract the value from parser.addoption declared before
browser_name = request.config.getoption("--browser_name")
if browser_name == "chrome":
s = Service(executable_path=ChromeDriverManager().install())
#s = Service("C:\\Users\\victo\\Documents\\Browsers\\chromedriver_win32\\chromedriver.exe")
driver = webdriver.Chrome(service=s)
elif browser_name == "firefox":
s = Service("C:\\Users\\victo\\Documents\\Browsers\\geckodriver-v0.31.0-win64\\geckodriver.exe")
driver = webdriver.Firefox(service=s)
driver.get("https://rahulshettyacademy.com/angularpractice/")
driver.maximize_window()
# request.cls.driver means that we are sending our local object driver to the class that is requesting
# it as object driver, by the way driver is just a variable name they dont need to be the same name
# Now you dont need to return anything
request.cls.remotedriver = driver
# Remember to add yield for any action you want to occur after test finish
# We cant use return driver before yield using driver command or it will fail
yield
driver.close()
# This method will get do 'something' whenever test fails, in this example we capture screenshot into html report
@pytest.mark.hookwrapper
def pytest_runtest_makereport(item):
"""
Extends the PyTest Plugin to take and embed screenshot in html report, whenever test fails.
:param item:
"""
pytest_html = item.config.pluginmanager.getplugin('html')
outcome = yield
report = outcome.get_result()
extra = getattr(report, 'extra', [])
if report.when == 'call' or report.when == "setup":
xfail = hasattr(report, 'wasxfail')
if (report.skipped and xfail) or (report.failed and not xfail):
file_name = report.nodeid.replace("::", "_") + ".png"
_capture_screenshot(file_name)
if file_name:
html = '<div><img src="%s" alt="screenshot" style="width:304px;height:228px;" ' \
'onclick="window.open(this.src)" align="right"/></div>' % file_name
extra.append(pytest_html.extras.html(html))
report.extra = extra
def _capture_screenshot(name):
driver.get_screenshot_as_file(name)
| dechan84/PythonSeleniumFrontEnd | test/conftest.py | conftest.py | py | 2,907 | python | en | code | 1 | github-code | 90 |
2366610636 |
from django.urls import path , include
from django.contrib import admin
from .views import saludo, resgistroPaciente, resgistroMedico, modificar, modificar2,modificarMedico,modificarMedico2,elimindarcli,registroContacto
from .views import saludo2, gestion, servicios, nosotros
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('',saludo, name="incicio"),
path('saludo/',saludo2, name="paciente"),
path('pacientesregistro/',resgistroPaciente, name="pacientes"),
path('registromedico/',resgistroMedico, name="medicos"),
path('editar/',modificar, name="modifi"),
path('editar2/<id>/',modificar2, name="modifi_2"),
path('editarmedico/',modificarMedico, name="modifiMe"),
path('editarmedico2/<id>/',modificarMedico2, name="modifi_Me2"),
path('deleteclie/<id>',elimindarcli,name="elimindar1"),
path('contacto/',registroContacto,name="contactar"),
path('accounts/', include('django.contrib.auth.urls')),
path('gestion/',gestion, name="gestion"),
path('servicios/',servicios, name="servi"),
path('nosotros/',nosotros, name="nosotro"),
]
| aj130142/pagina-web | web/usuarios/urls.py | urls.py | py | 1,162 | python | es | code | 0 | github-code | 90 |
34871900749 | from typing import List
from collections import Counter
num_friends = [100.0, 49, 41, 40, 25, 21, 21, 19, 19, 18, 18, 16, 15, 15, 15, 15, 14, 14, 13, 13, 13, 13, 12, 12, 11,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1]
# Define a function to calculate the mean
def mean(xs: List[float]) -> float:
return sum(xs) / len(xs)
def _median_odd(xs: List[float]) -> float:
"""If len of xs is odd, median is the middle point"""
return sorted(xs)[len(xs) // 2]
def _median_even(xs: List[float]) -> float:
"""If len of xs is even, median is the average of the two middle elements"""
sorted_x = sorted(xs)
hi_midpoint = len(xs) // 2
return (sorted_x[hi_midpoint - 1] + sorted_x[hi_midpoint]) / 2
def median(xs: List[float]) -> float:
"""Finds the middle-most value of v"""
return _median_even(xs) if len(xs) % 2 == 0 else _median_odd(xs)
def quantile(xs: List[float], p: float) -> float:
"""Returns the pth-percentile value in x"""
p_index = int(p * len(xs)) # p_index is always the floor
return sorted(xs)[p_index]
def mode(xs: List[float]) -> List[float]:
"""Returns a list since there may be more than one mode"""
counts = Counter(xs)
max_count = max(counts.values())
return [x_i for x_i, count in counts.items() if count == max_count]
print(f"The mean of the number of friends is {mean(num_friends)}")
print(f"The median of the number of friends is {median(num_friends)}")
print(f"The 10th percentile in number of friends is {quantile(num_friends, 0.1)}")
print(f"The 25th percentile in number of friends is {quantile(num_friends, 0.25)}")
print(f"The 50th percentile in number of friends is {quantile(num_friends, 0.50)}")
print(f"The 75th percentile in number of friends is {quantile(num_friends, 0.75)}")
print(f"The mode of number of friends is {set(mode(num_friends))}")
| ilirsheraj/DataScienceScratch | chapter_05_Statistics/central_tendency.py | central_tendency.py | py | 2,344 | python | en | code | 0 | github-code | 90 |
18285608389 | #pdf見た
n = int(input())
xl = [list(map(int,input().split())) for i in range(n)]
sf = []
choice = -1e+9
for x,l in xl:
sf.append([x-l,x+l])
sf.sort(key=lambda x:x[1])
cnt = 0
for s,f in sf:
if choice <= s:
cnt += 1
choice = f
print(cnt) | Aasthaengg/IBMdataset | Python_codes/p02796/s341082845.py | s341082845.py | py | 272 | python | en | code | 0 | github-code | 90 |
18487936739 | import math
import sys
MOD = 1000000007
n, m = map(int, input().split())
if n == 1:
print(m)
sys.exit()
ans = 1
for i in range(1, int(math.sqrt(m))+1):
if m % i == 0:
j = m / i
if i <= m/n:
ans = max(ans, i)
if j <= m/n:
ans = max(ans, j)
print(int(ans))
| Aasthaengg/IBMdataset | Python_codes/p03241/s037429924.py | s037429924.py | py | 319 | python | en | code | 0 | github-code | 90 |
16314392443 | import argparse
from pathlib import Path
import mrcfile
from skimage import exposure
import skimage
import skimage.io
import tifffile
parser = argparse.ArgumentParser(description="Convert MRC files")
parser.add_argument("--raw", default="raw", help="The input location of the raw MRC files")
parser.add_argument("--tif", default="input", help="The output location of the TIFF files")
parser.add_argument("--png", default="filament_picking", help="The output location of the PNG files")
args = parser.parse_args()
input_files = list(Path(args.raw).glob("*.mrc"))
for i, f in enumerate(input_files):
print(f"{i/len(input_files)*100:3.0f}% Reading {f}")
with mrcfile.open(f, permissive=True) as mrc:
h = mrc.header
d = mrc.data
d = exposure.equalize_hist(d)
tif_output_filename = Path(args.tif) / f"{f.stem}.tif"
tifffile.imsave(tif_output_filename, d)
normalised = skimage.img_as_ubyte(d)
png_output_filename = Path(args.png) / f"{f.stem}.png"
skimage.io.imsave(png_output_filename, normalised)
| milliams/thin_filament_data | convert_tiff.py | convert_tiff.py | py | 1,049 | python | en | code | 0 | github-code | 90 |
43622968401 | import nltk
import string
import pandas as pd
import seaborn as sns
from sklearn import svm
from nltk import tokenize
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from nltk.corpus import stopwords
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
data = pd.read_csv("Sample.csv", delimiter='\t', encoding='utf-8')
# Data Preparation
# Removing title and id
data.drop(["title"], axis=1, inplace=True)
data.drop(["public_id"], axis=1, inplace=True)
# Converting to lowercase
data['text'] = data['text'].apply(lambda x: x.lower())
# Removing punctuation signs
def punctuation_removal(text):
all_list = [char for char in text if char not in string.punctuation]
clean_str = ''.join(all_list)
return clean_str
data['text'] = data['text'].apply(punctuation_removal)
# Removing Stopwords (words filtered out before natural language processing)
stop = stopwords.words('english')
data['text'] = data['text'].apply(lambda x: ' '.join([word for word in x.split() if word not in stop]))
# Removing dashes and underscores
def line_removal(text):
all_list = [char for char in text if char not in "-–—_"]
clean_str = ''.join(all_list)
return clean_str
data['text'] = data['text'].apply(line_removal)
# print(data)
# Data Exploration
data.groupby(['our rating'])['text'].count().plot(kind="bar")
# plt.show()
# Word Cloud View (Fake)
fake_data = data[data["our rating"] == "FALSE"]
all_words = ' '.join([text for text in fake_data.text])
word_cloud = WordCloud(width=1000, height=1000, max_font_size=110, collocations=False).generate(all_words)
plt.figure(figsize=(10, 7))
plt.imshow(word_cloud, interpolation='bilinear')
plt.axis("off")
# plt.show()
# Word Cloud View (True)
true_data = data[data["our rating"] == "TRUE"]
all_words = ' '.join([text for text in true_data.text])
word_cloud = WordCloud(width=1000, height=1000, max_font_size=110, collocations=False).generate(all_words)
plt.figure(figsize=(10, 7))
plt.imshow(word_cloud, interpolation='bilinear')
plt.axis("off")
# plt.show()
# Word Cloud View (Partially False)
partially_false_data = data[data["our rating"] == "partially false"]
all_words = ' '.join([text for text in partially_false_data.text])
word_cloud = WordCloud(width=1000, height=1000, max_font_size=110, collocations=False).generate(all_words)
plt.figure(figsize=(10, 7))
plt.imshow(word_cloud, interpolation='bilinear')
plt.axis("off")
# plt.show()
# Separating by White Spaces
token_space = tokenize.WhitespaceTokenizer()
# Most Frequent Words
def counter(text, column_text, quantity):
words = ' '.join([text for text in text[column_text]])
token_phrase = token_space.tokenize(words)
frequency = nltk.FreqDist(token_phrase)
df_frequency = pd.DataFrame({"Word": list(frequency.keys()), "Frequency": list(frequency.values())})
df_frequency = df_frequency.nlargest(columns="Frequency", n=quantity)
plt.figure(figsize=(12, 8))
ax = sns.barplot(data=df_frequency, x="Word", y="Frequency", color='blue')
ax.set(ylabel="Count")
plt.xticks(rotation='vertical')
plt.show()
# Fake News
counter(data[data["our rating"] == "FALSE"], "text", 20)
# True News
counter(data[data["our rating"] == "TRUE"], "text", 20)
# Partially Fake
counter(data[data["our rating"] == "partially false"], "text", 20)
# Split Data
x_train, x_test, y_train, y_test = train_test_split(data['text'], data['our rating'], test_size=0.2, random_state=42)
# Decision Tree
decision_tree_pipe = Pipeline([('vector', CountVectorizer()),
('tfidf', TfidfTransformer()),
('model', DecisionTreeClassifier
(criterion='entropy', max_depth=20, splitter='best', random_state=42))])
decision_tree_model = decision_tree_pipe.fit(x_train, y_train)
decision_tree_prediction = decision_tree_model.predict(x_test)
print("Decision Tree")
print(confusion_matrix(y_test, decision_tree_prediction))
print(classification_report(y_test, decision_tree_prediction, zero_division=0))
# K-Nearest Neighbors
k_neighbors_pipe = Pipeline([('vector', CountVectorizer()),
('tfidf', TfidfTransformer()),
('model', KNeighborsClassifier())])
k_neighbors_model = k_neighbors_pipe.fit(x_train, y_train)
k_neighbors_prediction = k_neighbors_model.predict(x_test)
print("K-Nearest Neighbors")
print(confusion_matrix(y_test, k_neighbors_prediction))
print(classification_report(y_test, k_neighbors_prediction, zero_division=0))
# Support Vector Machine
support_vector_machine_pipe = Pipeline([('vector', CountVectorizer()),
('tfidf', TfidfTransformer()),
('model', svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto'))])
support_vector_machine_model = support_vector_machine_pipe.fit(x_train, y_train)
support_vector_machine_prediction = support_vector_machine_model.predict(x_test)
print("Support Vector Machine")
print(confusion_matrix(y_test, support_vector_machine_prediction))
print(classification_report(y_test, support_vector_machine_prediction, zero_division=0))
# Naive-Bayes
naive_bayes_pipe = Pipeline([('vector', CountVectorizer()),
('tfidf', TfidfTransformer()),
('model', MultinomialNB())])
naive_bayes_model = naive_bayes_pipe.fit(x_train, y_train)
naive_bayes_prediction = naive_bayes_model.predict(x_test)
print("Naive-Bayes")
print(confusion_matrix(y_test, naive_bayes_prediction))
print(classification_report(y_test, naive_bayes_prediction, zero_division=0))
| Fake-News-Detection-2B5/ai-1 | Week 1 Run/main.py | main.py | py | 5,982 | python | en | code | 0 | github-code | 90 |
26079305035 | import numpy as np
import matplotlib.pyplot as plt
import logging
import sys
import src.config as config
def get_console_handler():
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(config.LOG_FORMATTER)
return console_handler
def get_file_handler(file_path):
file_handler = logging.FileHandler(file_path)
file_handler.setFormatter(config.LOG_FORMATTER)
return file_handler
def get_logger(logger_name, file_path):
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG) # better to have too much log than not enough
logger.addHandler(get_console_handler())
logger.addHandler(get_file_handler(file_path))
# with this pattern, it's rarely necessary to propagate the error up to parent
logger.propagate = False
return logger
# common functions used in the project
def group_mask_tnbc_classes(mask):
"""
Groups the minor mask classes into major ones according to the info given in the paper
:param mask:
"""
# tumor cluster
mask[mask == 19] = 1
mask[mask == 20] = 1
# inflammatory
mask[mask == 10] = 3
mask[mask == 11] = 3
# others
mask[mask == 5] = 5
mask[mask == 6] = 5
mask[mask == 7] = 5
mask[mask == 8] = 5
mask[mask == 9] = 5
mask[mask == 10] = 5
mask[mask == 11] = 5
mask[mask == 12] = 5
mask[mask == 13] = 5
mask[mask == 14] = 5
mask[mask == 15] = 5
mask[mask == 16] = 5
mask[mask == 17] = 5
mask[mask == 18] = 5
mask[mask == 19] = 5
mask[mask == 20] = 5
mask[mask == 21] = 5
mask[mask == 5] = 0
return mask
def create_mask(pred_mask):
pred_mask = np.argmax(pred_mask, axis=-1)
return pred_mask
def plot_image_prediction(images, masks, outputs, names, num_of_images, num_of_classes, font_size=14):
# convert to numpy array
images = np.array(images)
masks = np.array(masks)
outputs = np.array(outputs)
# permute the images
images = images.transpose(0, 2, 3, 1)
outputs = outputs.transpose(0, 2, 3, 1)
pred_mask = create_mask(outputs)
# masks = create_mask(masks)
fig, ax = plt.subplots(num_of_images, 4, figsize=(8, 24))
ax[0, 0].set_title("Image", fontsize=font_size)
ax[0, 1].set_title("Mask ", fontsize=font_size)
ax[0, 2].set_title("Pred Mask", fontsize=font_size)
ax[0, 3].set_title("Overlay", fontsize=font_size)
for i in range(num_of_images):
ax[i, 0].imshow(images[i, ...])
mask_i = np.copy(masks[i, ...])
pred_mask_i = np.copy(pred_mask[i, ...])
mask_i[0, 0] = 0
mask_i[0, 1] = 1
mask_i[0, 2] = 2
mask_i[0, 4] = 3
mask_i[0, 5] = 4
ax[i, 1].imshow(mask_i.astype('uint8'))
pred_mask_i[0, 0] = 0
pred_mask_i[0, 1] = 1
pred_mask_i[0, 2] = 2
pred_mask_i[0, 4] = 3
pred_mask_i[0, 5] = 4
ax[i, 2].imshow(pred_mask_i.astype('uint8'))
pred_mask_i_overlay = np.ma.masked_where(pred_mask_i == 0, pred_mask_i)
ax[i, 3].imshow(images[i, ...])
ax[i, 3].imshow(pred_mask_i_overlay.astype('uint8'), interpolation='none', cmap='jet', alpha=0.5)
ax[i, 0].set_ylabel(names[i], fontsize=6)
ax[i, 0].set_yticks([])
ax[i, 0].set_xticks([])
ax[i, 1].set_yticks([])
ax[i, 1].set_xticks([])
ax[i, 2].set_yticks([])
ax[i, 2].set_xticks([])
ax[i, 3].set_yticks([])
ax[i, 3].set_xticks([])
return fig
def plot_confusion_matrix(confusion_matrix, classes, title=None, cmap=plt.cm.Blues):
if not title:
title = 'Confusion matrix'
# Print Confusion matrix
fig, ax = plt.subplots(figsize=(4, 4))
im = ax.imshow(confusion_matrix, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(confusion_matrix.shape[1]),
yticks=np.arange(confusion_matrix.shape[0]),
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f'
thresh = confusion_matrix.max() / 2.
for i in range(confusion_matrix.shape[0]):
for j in range(confusion_matrix.shape[1]):
ax.text(j, i, format(confusion_matrix[i, j], fmt),
ha="center", color="white"
if confusion_matrix[i, j] > thresh else "black")
plt.tight_layout()
return fig | isaadbashir/coarse_segmentation | src/utils.py | utils.py | py | 4,726 | python | en | code | 0 | github-code | 90 |
18410074689 | import sys
#input = sys.stdin.buffer.readline
def main():
N = int(input())
AB = [0,0,0]
ans = 0
for _ in range(N):
s = input()
l = len(s)
if s[0] == "B" and s[-1] == "A":
AB[2] += 1
else:
if s[0] == "B":
AB[1] += 1
if s[-1] == "A":
AB[0] += 1
for i in range(l-1):
if s[i] == "A" and s[i+1] == "B":
ans += 1
if (AB[0]+AB[1] == 0 and AB[2] != 0):
print(ans+AB[2]-1)
else:
print(ans+min(AB[0],AB[1])+AB[2])
if __name__ == "__main__":
main() | Aasthaengg/IBMdataset | Python_codes/p03049/s482614109.py | s482614109.py | py | 624 | python | en | code | 0 | github-code | 90 |
42970484847 | # BH1750 Documentation: https://www.mouser.com/datasheet/2/348/bh1750fvi-e-186247.pdf
# SG90 Documentation : http://www.ee.ic.ac.uk/pcheung/teaching/DE1_EE/stores/sg90_datasheet.pdf
from machine import I2C, Pin, PWM
import time
class BH1750NotFoundError(Exception): pass
class BH1750(): # light intensity sensor
def __init__(self, id=0, scl=17, sda=16):
"""
Initialize the BH1750 light intensity sensor. Use the Raspberry Pi
Pico Pinout to wire the sensor.
Input:
id : id of the I2C connection. Ex: for an I2C0 connection, id = 0
scl : number of the SCL Pin, for the 22th pin, use 17 (see pinout)
sda : same as scl
"""
self.HIGH_RESOLUTION = 0x11 # 0.5 Lux precision, measure time: 120 ms
self.HIGH_RESOLUTION_2 = 0x10 # 1 Lux precision, measure time: 120 ms
self.LOW_RESOLUTION = 0x13 # # 4 Lux précision, measure time: 16s
self.ONE_HIGH = 0x21 # 1 measure then idle
self.ONE_HIGH_2 = 0x20
self.ONE_LOW = 0x23
self.i2c = I2C(id, scl=Pin(scl), sda=Pin(sda))
self.count = 0
if self.detect():
if self.address == 0x5C:
self.mode = self.LOW_RESOLUTION
else:
self.mode = self.HIGH_RESOLUTION
self.reset()
else:
raise BH1750NotFoundError("Please check the connections of the BH1750 or the scl and sda arguments sent to the class.")
def detect(self):
"""
Returns a boolean on whether or not the sensor is detected.
"""
i2c_peripherals = self.i2c.scan()
for i2c_peripheral in i2c_peripherals:
if i2c_peripheral in [0x5C, 0x23]: # low vs high mode
self.address = i2c_peripheral
print("BH1750 address " + str(self.address))
return True
return False
def reset(self):
"""Reset the connection. Use it to lower the power consumption once
you do not use the sensor.
Returns nothing.
"""
data = bytearray(1)
data[0] = 0x01 # Power on
self.i2c.writeto(self.address, data)
time.sleep(0.01)
data[0] = 0x07 # reset
self.i2c.writeto(self.address, data)
time.sleep(0.01)
if self.mode == self.LOW_RESOLUTION:
self.measure(self.ONE_LOW)
time.sleep(0.024)
else:
self.measure(self.ONE_HIGH)
time.sleep(0.18)
def measure(self, mode=False):
"""Returns the light intensity that the sensor receives.
Input:
mode : the measurement mode (see self.__init__)
Returns:
lux : light intensity
"""
if not mode:
mode = self.mode
data_mode = bytearray(1)
lux = bytearray(2)
delay = 0
if mode in (self.ONE_HIGH, self.ONE_HIGH_2):
delay = 0.12
if mode == self.ONE_LOW:
delay = 0.016
data_mode[0] = mode
self.i2c.writeto(self.address, data_mode)
time.sleep(delay)
self.i2c.readfrom_into(self.address, lux)
lux = lux[0] * 256 + lux[1]
lux = round(lux / 1.2, 1)
return lux
class SG90(): # servomotor
def __init__(self, pin=0):
"""
Initialize the servo motor.
Input:
pin: the number in which the PWM wire of the sensor is plugged into
"""
self.servo = PWM(Pin(pin))
self.servo.freq(50)
duty = self.servo.duty_u16()
self.position = round((duty-3276) * 90 / 3277)
def move(self, angle=0):
"""
Move the servomotor to a certain degree. The range is 0-90 degrees
because my servomotor doesn’t support 180 degrees rotations.
Input:
angle: the angle in which the servo motor should be.
"""
if angle < 0 or angle > 90:
return "Wrong angle."
self.position = angle
duty = 3276 + angle * 3277 / 90 # 3277 = 6553 - 3276
self.servo.duty_u16(round(duty))
| Math3mat1x/sloth-curtains | sensors.py | sensors.py | py | 4,145 | python | en | code | 1 | github-code | 90 |
23338482714 | import requests
from bs4 import BeautifulSoup
class GoNaver():
def sijak(self):
url = "https://datalab.naver.com/keyword/realtimeList.naver?ahe=all"
page = requests.get(url, headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'})
# 서비스에 접속할 수 없습니다. - 이렇게 못읽는 경우가 있다, 이렇땐 page에서 headers=~~ 붙여넣기
soup = BeautifulSoup(page.text, 'lxml')
# print(soup)
title = soup.select('span.item_title')
# print(title)
print('네이버 실시간 검색어')
count = 0
for i in title:
count += 1
print(str(count) + "위 : " + i.string)
if __name__ == '__main__':
GoNaver().sijak()
| SolDDAENG/py_pandas | pack2/bs09.py | bs09.py | py | 796 | python | ko | code | 0 | github-code | 90 |
40707845070 | class InputCheck:
@classmethod
def check_location_coordinate(cls, request) -> str:
"""
取得 request 驗證參數並回傳
:params request (flask.request): request instance
"""
request_body = request.get_json()
location = request_body.get('location')
lat, long = map(lambda cor: cor.strip(), location.split(','))
try:
lat = float(lat)
long = float(long)
except ValueError:
raise AssertionError('非合法經緯度參數')
return location
| Real-time-On-street-Parking-System/Backend | src/InputCheck.py | InputCheck.py | py | 565 | python | en | code | 0 | github-code | 90 |
18566355619 | from collections import deque
Y, X = map(int, input().split())
Map = list(input() for _ in range(Y))
def bfs(sy, sx, gy, gx):
seen = list([-1]*X for _ in range(Y))
queue = deque()
queue.append((sy, sx))
seen[sy][sx] = 0
while queue:
y, x = queue.popleft()
search_around(y, x, seen, queue)
if seen[gy][gx] > 0:
return seen[gy][gx]
def search_around(y, x, seen, queue):
count = seen[y][x]
for u,t in [(y-1,x), (y+1,x), (y,x-1), (y,x+1)]:
if u < 0 or t < 0 or u >= Y or t >= X:
continue
elif Map[u][t] == '#' or seen[u][t] != -1:
continue
queue.append((u, t))
seen[u][t] = count + 1
cnt = 0
for M in Map:
for m in M:
if m == '#':
cnt += 1
b = bfs(0,0,Y-1,X-1)
if b == None:
print(-1)
else:
print(Y*X - cnt - b - 1) | Aasthaengg/IBMdataset | Python_codes/p03436/s232465155.py | s232465155.py | py | 876 | python | en | code | 0 | github-code | 90 |
5543987452 | # 복습 횟수:0, 00:10:00, 복습필요X
import sys
si = sys.stdin.readline
N = int(si())
myset = set()
li = list(map(int, si().split()))
for elem in li:
myset.add(elem)
print(len(myset)) | SteadyKim/Algorism | language_PYTHON/codetree/lv5_hashset_서로_다른_숫자.py | lv5_hashset_서로_다른_숫자.py | py | 196 | python | en | code | 0 | github-code | 90 |
18428506569 | # D - We Like AGC
from collections import defaultdict
MOD = 10**9+7
N = int(input())
charactors = ['A', 'G', 'C', 'T']
# dp[l][s] := 長さがlで、末尾3文字がsである文字列の個数
dp = [defaultdict(int) for _ in range(N+1)]
# 無関係の文字で初期化しておく
dp[0]['ZZZ'] = 1
# NGケース: AGC, ACG, GAC, A?GC, AG?C
def check(s, c):
if c=='C':
if s[1:]=='AG' or s[1:]=='GA' or \
(s[0]=='A' and (s[1]=='G' or s[2]=='G')):
return False
else:
return True
elif c=='G':
if s[1:]=='AC':
return False
else:
return True
else:
return True
for i in range(N):
for s in dp[i].keys():
for c in charactors:
# NGケースに該当しなければ、カウントする
if check(s, c):
dp[i+1][s[1:]+c] += dp[i][s]
dp[i+1][s[1:]+c] %= MOD
print(sum(dp[N].values())%MOD)
| Aasthaengg/IBMdataset | Python_codes/p03088/s394609379.py | s394609379.py | py | 965 | python | ja | code | 0 | github-code | 90 |
73844657576 | import matplotlib.pyplot as plt
def plotData(x, y):
"""Plots the data points x and y into a new figure """
training_data_plot = plt.plot(x, y, linestyle='None', color='red', marker='x', markersize=10, label="Training data")
plt.xlabel('Profit in $10,000')
plt.ylabel('Population of city in 10,000s')
return training_data_plot
| hzitoun/machine_learning_from_scratch_matlab_python | algorithms_in_python/week_2/ex1/plotData.py | plotData.py | py | 382 | python | en | code | 30 | github-code | 90 |
28846335793 | import json
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
# from matplotlib.font_manager import _rebuild
#
# _rebuild() #reload一下
mpl.use('agg')
plt.rcParams['font.sans-serif'] = ['SimSun']
plt.rcParams['axes.unicode_minus'] = False # 解决负号'-'显示为方块的问题
train_final_lozz = []
val_final_lozz = []
test_lozz = []
train_lozzz = []
val_lozzz = []
with open('train_k_fold_AttnRNN_info.txt', 'r', encoding='utf-8') as f:
for line in f.readlines():
tmp = json.loads(line)
train_lozz = tmp['train_lozz']
train_lozzz.append(train_lozz)
train_final_lozz.append(train_lozz[-1])
val_lozz = tmp['val_lozz']
val_lozzz.append(val_lozz)
val_final_lozz.append(val_lozz[-1])
test_loss = tmp['test_loss']
test_lozz.append(test_loss)
df = pd.DataFrame({'train_lozz': train_final_lozz, 'val_lozz': val_final_lozz, 'test_lozz': test_lozz})
print(df)
print(df.describe())
mean_train_lozz = []
for i in range(len(train_lozzz[0])):
num = 0
for j in range(10):
num += train_lozzz[j][i]
mean_train_lozz.append(num / 10)
mean_val_lozz = []
for i in range(len(val_lozzz[0])):
num = 0
for j in range(10):
num += val_lozzz[j][i]
mean_val_lozz.append(num / 10)
plt.title(u'损失下降曲线')
# plt.plot(mean_train_lozz, color='green', label=u'训练损失')
plt.plot(mean_val_lozz, color='red', label=u'验证损失')
plt.xlabel(u'迭代轮数')
plt.ylabel(u'BCE损失')
plt.show()
plt.savefig('AttnRNN_val.png') # 保存图片
| NileZhou/NHG | summarunner_weather/little/checkpoints/stat.py | stat.py | py | 1,579 | python | en | code | 1 | github-code | 90 |
72596056937 | #link https://leetcode.com/problems/boats-to-save-people/
class Solution:
def numRescueBoats(self, people, limit):
people.sort()
left = 0
right = len(people)-1
boats_number = 0
while(left<=right):
if(left==right):
boats_number+=1
break
if(people[left]+people[right]<=limit):
left+=1
right-=1
boats_number+=1
return boats_number | chandanverma07/Python_LeetCodeSolution | BoatstoSavePeople_881.py | BoatstoSavePeople_881.py | py | 498 | python | en | code | 0 | github-code | 90 |
652577689 | from django.shortcuts import redirect, render
from django.http import HttpResponse
from django.views import View
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.contrib.auth.models import User
from .forms import ContactForm
from .models import Category, Post, Subscriber
class IndexView(View):
""" TODO """
def get(self, request):
return render(request, 'blog/index.html')
def post(self, request):
"""If the email is valid, check if it already exists
in the database, if it exists, change the secret
code and send the email, otherwise create a new
subscriber and send an email.
Note:
This code is written like this for better understanding.
"""
email = str(request.POST.get('email'))
if email and '@' in email and 5 < len(email) <= 128:
subscriber_in_db = Subscriber.objects.filter(email=email).first()
if subscriber_in_db:
subscriber_in_db.generate_new_secret_code()
subscriber_in_db.save()
subscriber_in_db.send_subscription_email()
else:
new_subscriber = Subscriber(email=email)
new_subscriber.save()
new_subscriber.send_subscription_email()
return HttpResponse('We have sent you a link, check your email.')
class IndexView(ListView):
model = Post
paginate_by = 6
class SubscriberView(View):
def get(self, request, email, secret_code):
"""Check if the email and secret_code is valid."""
subscriber = Subscriber.objects.filter(email=email).first()
if subscriber and not subscriber.verified and \
subscriber.check_secret_code(email, secret_code):
subscriber.verified = True
subscriber.save()
return HttpResponse('Your email has been validated.')
subscriber.delete_subscriber()
return HttpResponse('Your email could not be validated, try again later.')
class PrivacyView(View):
def get(self, request):
context = {
'section_name': 'Privacy',
'sections': [
{'name': 'Privacy', 'url': '/privacy/'}
]
}
return render(request, 'blog/privacy.html', context)
class TermsAndConditionsView(View):
def get(self, request):
context = {
'section_name': 'Terms & Conditions',
'sections': [
{'name': 'Terms & Conditions', 'url': '/terms-and-conditions/'}
]
}
return render(request, 'blog/terms_and_conditions.html', context)
class ContactView(View):
form = ContactForm
context = {
'form': None,
'section_name': 'Contact',
'sections': [
{'name': 'Contact', 'url': '/contact/'}
]
}
def get(self, request):
self.context['form'] = self.form()
return render(request, 'blog/contact.html', self.context)
def post(self, request):
contact_form = self.form(request.POST)
if contact_form.is_valid():
contact_form.save()
contact_form.instance.send_email()
contact_form = self.form()
self.context['form'] = contact_form
return render(request, 'blog/contact.html', self.context)
class CategoryListView(ListView):
model = Category
paginate_by = 6
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['section_name'] = 'Category'
context['sections'] = [{'name': 'Category', 'url': '/category/'}]
context['posts'] = Post.get_latest_posts()
return context
class CategoryDetailView(DetailView):
model = Category
slug_field = "title"
slug_url_kwarg = "title"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
title = self.object.title
context['section_name'] = f'Showing posts from {title.capitalize()}'
context['sections'] = [
{'name': 'Category', 'url': '/category/'},
{'name': title, 'url': title}
]
context['posts'] = self.object.get_latest_posts()
return context
class TagListView(ListView):
model = Post
paginate_by = 6
template_name = 'blog/tag_list.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['section_name'] = 'Tags'
context['sections'] = [{'name': 'Tags', 'url': '/tags/'}]
context['posts'] = Post.get_latest_posts()
context['tags'] = Post.get_all_tags()
return context
class TagDetailView(View):
def get(self, request, title):
context = {}
context['section_name'] = f'Showing posts from {title.capitalize()}'
context['sections'] = [
{'name': 'Tags', 'url': '/tags/'},
{'name': title, 'url': title}
]
context['posts'] = Post.get_latest_posts_by_tag(str(title))
return render(request, 'blog/tag_detail.html', context)
class PostDetailView(DetailView):
model = Post
slug_field = "url"
slug_url_kwarg = "url"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['posts'] = self.object.get_similar_posts()
return context
class AuthorListView(ListView):
model = User
template_name = 'blog/author_list.html'
paginate_by = 6
def get_queryset(self):
return User.objects.order_by('date_joined')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['section_name'] = 'Authors'
context['sections'] = [{'name': 'Authors', 'url': '/authors/'}]
return context
class AuthorDetailView(DetailView):
model = User
template_name = 'blog/author_detail.html'
slug_field = "username"
slug_url_kwarg = "username"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user = context['object']
context['posts'] = user.profile.get_latest_posts()[:5]
return context
class SearchList(View):
def get(self, request):
request_s = request.GET.get('s')
if request_s:
query = Post.objects.filter(description__icontains=request_s)[:5]
context = {}
context['posts'] = query
return render(request, 'blog/search_list.html', context)
return redirect('categories')
| 27b/django-modern-blog | blog/views.py | views.py | py | 6,604 | python | en | code | 0 | github-code | 90 |
15992961654 | def minimum_score(mt1_score, mt2_score, desired_grade, ec_points = 0, recovery_points = 0):
grade_bins = {'A+':0, 'A':15,'A-':30,'B+':50,'B':75,'B-':95,'C+':105,'C':115,'C-':125,'D+':130,'D':135,'D-':140} # max points that can be lost for each letter grade
assert desired_grade in grade_bins, "Your desired_grade must be an uppercase string. For example,'b+' should be inputted as 'B+'"
assert mt1_score <= 30 and mt2_score <= 60, 'mt1_score and mt2_score should be ints or floats that are less than 30 and 60, respectively'
list_of_scenario_outcomes = []
max_points_missed = grade_bins[desired_grade] + ec_points + recovery_points
#scenario 1: assumes no clobbering necessary:
for i in reversed(range(76)):
if ((75-i)+(30-mt1_score)+(60-mt2_score)) >= max_points_missed or i == 0 :
list_of_scenario_outcomes.append(i)
break
#scenario 2: assumes mt1 is being clobbered:
for i in reversed(range(76)):
if ((75-i) + (30-((i/75)*30)) + (60-mt2_score)) >= max_points_missed or i == 0:
list_of_scenario_outcomes.append(i)
break
#scenario 3: assumes mt2 is being clobbered:
for i in reversed(range(76)):
if ((75-i) + (60-((i/75)*60)) + (30-mt1_score)) >= max_points_missed or i == 0:
list_of_scenario_outcomes.append(i)
break
#scenario 4: assumes BOTH midterms are being clobbered:
for i in reversed(range(76)):
if ((75-i) + (30-((i/75)*30)) + (60-((i/75)*60))) >= max_points_missed or i == 0:
list_of_scenario_outcomes.append(i)
break
score_needed = min(list_of_scenario_outcomes)
print('__________________________')
print('The minimum score you need on the final exam to earn a(n) ' + desired_grade + ' in CS61A is:' )
print('\n'+str(score_needed) + ' out of 75')
print('\nKeep in mind that this calculator does not take into account any beneficial grade bin shifts \nthat may occur at the end, and that there are still 4-5 EC points left in the semester.')
print('__________________________ \nMade by shoumik :D')
| shoumikc/cs61a-final-exam-calculator | calculator.py | calculator.py | py | 2,166 | python | en | code | 0 | github-code | 90 |
72910589416 | from yogi import read
from turtle import *
# Dibuixa un cercle.
def cercle(r):
circle(r)
# Dibuixa un quadrat.
def quadrat(c):
i = 1
while i <= 4:
forward(c)
left(90)
i = i + 1
# Dibuixa un rectangle.
def rectangle(a, b):
i = 1
while i <= 2:
forward(a)
left(90)
forward(b)
left(90)
i = i + 1
# Lectura de les dades d'entrada en funció de la forma a dibuixar
# i execució del dibuix.
forma = read(str)
if forma == "cercle":
r = read(int)
cercle(r)
elif forma == "quadrat":
c = read(int)
quadrat(c)
elif forma == "rectangle":
a = read(int)
b = read(int)
rectangle(a, b)
else:
print("Forma no reconeguda.")
done() | lluc-palou/ap1-jutge | beginning/P33134.py | P33134.py | py | 780 | python | ca | code | 0 | github-code | 90 |
4670558059 | #!/usr/bin/python3
"""a class Student that defines a studen"""
class Student:
"""Public instance attributes"""
def __init__(self, first_name, last_name, age):
self.first_name = first_name
self.last_name = last_name
self.age = age
def to_json(self):
dictionnary = {}
if hasattr(self, "__dict__"):
dictionnary = self.__dict__.copy()
return dictionnary
| Nadely/holbertonschool-higher_level_programming | python-input_output/9-student.py | 9-student.py | py | 426 | python | en | code | 0 | github-code | 90 |
18242224069 | #!/usr/bin python3
# -*- coding: utf-8 -*-
import sys
input = sys.stdin.readline
def make_divisors(n):
divisors = []
for i in range(2, int(n**0.5)+1):
if n % i == 0:
divisors.append(i)
if i != n // i:
divisors.append(n//i)
return divisors
def main():
N=int(input())
ret=0
if N==2:
ret=1
else:
div=make_divisors(N)
div=div+make_divisors(N-1)
div.append(N)
div.append(N-1)
for k in div:
n=N
while n%k==0:
n=n//k
ret += n%k==1
if N==2:
ret = 1
print(ret)
if __name__ == '__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p02722/s530279636.py | s530279636.py | py | 692 | python | en | code | 0 | github-code | 90 |
74091629737 | """
csv_to_catalog is used to parse, validate and import item data from a CSV file
into a json peji catalog.
"""
import os
import csv
import json
import sys
import requests
from datetime import date
from peji import buttons
PUBLISH_DATE_ENV_VAR = 'PUBLISH_DATE'
IMAGE_URL_PREFIX_ENV_VAR = 'IMAGE_URL_PREFIX'
IMAGE_URL_PREFIX = 'non/existent/image/url/prefix'
def get_data(csvfile):
"""Get data converts the data in a CSV file into the data format of items in
a catalog. Used with v0.0.3 CSV files.
"""
image_url_prefix = IMAGE_URL_PREFIX
# Get image url prefix from env var.
if IMAGE_URL_PREFIX_ENV_VAR in os.environ:
image_url_prefix = os.environ[IMAGE_URL_PREFIX_ENV_VAR]
today = date.today()
with open(csvfile) as f:
reader = csv.reader(f)
next(reader, None)
items = []
for row in reader:
item = {
'id': row[4],
'image': f"{image_url_prefix}/{row[4]}.jpeg",
'title': row[0],
'description': f"{row[3]} - {row[1]} inches",
'publishDate': today.strftime('%d %B, %Y'),
'available': True,
'price': row[2],
'button': '',
}
item = validate_and_sanitize(item)
items.append(item)
return items
def get_data_from_csv(csvfile):
"""Get data from CSV converts the data in a CSV file into the data format of
items in a catalog. Used with post v0.0.3 CSV files.
"""
image_url_prefix = IMAGE_URL_PREFIX
# Get image url prefix from env var.
if IMAGE_URL_PREFIX_ENV_VAR in os.environ:
image_url_prefix = os.environ[IMAGE_URL_PREFIX_ENV_VAR]
custom_date = False
today = date.today()
# Check if a custom date is specified in env vars.
if PUBLISH_DATE_ENV_VAR in os.environ:
custom_date = True
today = os.environ[PUBLISH_DATE_ENV_VAR]
with open(csvfile) as f:
reader = csv.reader(f)
next(reader, None)
items = []
for row in reader:
item = {
'id': row[4],
'image': f"{image_url_prefix}/{row[4]}.jpeg",
'title': row[0],
'description': f"{row[3]} - {row[1]} inches",
# 'publishDate': today.strftime('%d %B, %Y'),
'available': bool(int(row[6])),
'price': row[2],
'button': '',
'catalog': row[5],
}
# Set the date. This is needed in tests.
if custom_date:
item['publishDate'] = today
else:
item['publishDate'] = today.strftime('%d %B, %Y')
item = validate_and_sanitize(item)
items.append(item)
return items
def validate_and_sanitize(item):
# Ensure the price is a number.
try:
# TODO: Add float support and ensure paypal accepts decimal values.
price = int(float(item['price']))
except ValueError as error:
sys.exit('data validation failed for ID %s: %r' % (item['id'], error))
item['price'] = price
return item
def update_data(data, csvfile, catalog_id):
"""Used in v0.0.3 for updating an existing config file with a CSV data file,
given a catalog ID. This updated data of only one catalog because the CSV
file didn't had the catalog ID info.
"""
targetCatalog = None
# Get the target catalog.
for cat in data['catalog']:
if cat['id'] == catalog_id:
targetCatalog = cat
break
if targetCatalog == None:
sys.exit('catalog id %s not found' % catalog_id)
updated_data = get_data(csvfile)
# Go through the items and update or add items.
for item in updated_data:
# Get the item with same ID.
existing_items = [
i for i in targetCatalog['items'] if i['id'] == item['id']
]
if len(existing_items) > 1:
sys.exit('found more than one item with same id %s' %
item['id'])
if len(existing_items) > 0:
# update the data, existing item.
existing_item = existing_items[0]
existing_item['title'] = item['title']
existing_item['description'] = item['description']
existing_item['price'] = item['price']
else:
# add the item, new addition to items.
targetCatalog['items'].append(item)
# Return the updated data.
return data
def update_release_data(repo_name, csvfile):
"""Pulls previous release data from the given repo, updates that with the
latest version of the CSV data and generates paypal buttons for any new
entries. The final json data file can be later posted to the latest release.
"""
release_url = f"https://api.github.com/repos/{repo_name}/releases"
r = requests.get(release_url)
rel_data = r.json()
data = update_release_data_with_csv(rel_data, csvfile)
return data
def update_release_data_with_csv(rel_data, csvfile):
"""This processes the release info and updates the latest released data with
the CSV file and returns the new updated data.
"""
# Final updated data.
data = []
# Data from the CSV file.
updated_data = get_data_from_csv(csvfile)
download_url_key = 'browser_download_url'
# Analyze the release data and update the existing data if available.
if len(rel_data) == 0:
# No previous releases. Use the CSV to create first data file.
print("no previous release found, generating data from CSV only")
elif len(rel_data) == 1:
# Check if there's any data file in the assets of the release. If not,
# create a data file using the CSV. If there's a data file, download the
# data file and use it along with the CSV to create a new data file.
print("found a release")
assets = rel_data[0]['assets']
if len(assets) > 0:
print("found a data file asset in the release, using this as existing data")
# Asset exists. Download the data file and assign it to the data
# var.
dataURL = assets[0][download_url_key]
r = requests.get(dataURL)
data = r.json()
else:
print("no assets found, generating data from CSV only")
else:
# There are more than one releases. Check if the latest release has any
# data file in assets. If not, use the data file asset from the previous
# releases along with the CSV file to create a new data file.
print("found multiple releases")
# Checking assets of the releases one by one until data file asset is
# found.
dataURL = ""
for rel in rel_data:
assets = rel['assets']
if len(assets) > 0:
print(
"found a data file asset in release %r, using this as existing data" % rel['name'])
dataURL = assets[0][download_url_key]
break
else:
print(
"found no data file asset in release %r, checking the previous release" % rel['name'])
if dataURL:
r = requests.get(dataURL)
data = r.json()
else:
print("no assets found, generating data from CSV only")
data = get_catalog_data(data, updated_data)
return data
def get_catalog_data(cat_data, data):
"""Given an existing catalog data and a list of new data, convert the list
into catalog data format with items separated based on their catalog ID and
update any existing items with the CSV data.
"""
# Parse through each of the items and add them into their own catalog.
for item in data:
cat_id = item["catalog"]
# Remove the catalog info from the item because it'll be placed under
# a catalog.
del item["catalog"]
# Check if there's an existing catalog of the ID found in the item. If
# not, create the catalog entry and then add the item under that
# catalog.
existing_catalog = [
i for i in cat_data if i["id"] == cat_id
]
if len(existing_catalog) > 1:
sys.exit('found more than one catalog with same id %s' % cat_id)
if len(existing_catalog) > 0:
# Found existing catalog, append the item in the catalog.
catalog = existing_catalog[0]
existing_items = [
i for i in catalog['items'] if i['id'] == item['id']
]
if len(existing_items) > 1:
sys.exit('found more than one item with same id %s' %
item['id'])
if len(existing_items) > 0:
# update the data, existing item.
existing_item = existing_items[0]
existing_item['title'] = item['title']
existing_item['description'] = item['description']
existing_item['price'] = item['price']
existing_item['available'] = item['available']
existing_item['publishDate'] = item['publishDate']
else:
catalog['items'].append(item)
else:
# No existing catalog. Create a new catalog and append the item in
# the catalog.
cat = {
"id": cat_id,
"category": "",
"items": [],
}
cat['items'].append(item)
cat_data.append(cat)
return cat_data
| darkowlzz/peji | peji/csv_to_catalog.py | csv_to_catalog.py | py | 9,633 | python | en | code | 0 | github-code | 90 |
72555450858 | import os
import pathlib
import sys
def prepare_plaidml():
# Linux if installed plaidml with pip3 install --user
if sys.platform.startswith("linux"):
local_user_plaidml = pathlib.Path("~/.local/share/plaidml/").expanduser().absolute()
if local_user_plaidml.exists():
os.environ["RUNFILES_DIR"] = str(local_user_plaidml)
os.environ["PLAIDML_NATIVE_PATH"] = str(pathlib.Path("~/.local/lib/libplaidml.so").expanduser().absolute())
# Mac if using python3 from homebrew
elif sys.platform == "darwin":
local_user_plaidml = pathlib.Path("/usr/local/share/plaidml")
if local_user_plaidml.exists():
os.environ["RUNFILES_DIR"] = str(local_user_plaidml)
os.environ["PLAIDML_NATIVE_PATH"] = str(pathlib.Path("/usr/local/lib/libplaidml.dylib").expanduser().absolute())
elif sys.platform == "win32":
if 'VIRTUAL_ENV' in os.environ:
local_user_plaidml = pathlib.Path(os.environ["VIRTUAL_ENV"]).joinpath("share/plaidml")
plaidml_dll = pathlib.Path(os.environ["VIRTUAL_ENV"]).joinpath("library/bin/plaidml.dll")
if local_user_plaidml.exists():
os.environ["RUNFILES_DIR"] = str(local_user_plaidml)
if plaidml_dll.exists():
os.environ["PLAIDML_NATIVE_PATH"] = str(plaidml_dll)
def prepare_ambient(backend, device_id, use_gpu):
if backend.lower() == 'plaidml':
os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
os.environ["PLAIDML_DEVICE_IDS"] = device_id
prepare_plaidml()
elif backend.lower() == 'theano':
os.environ["KERAS_BACKEND"] = "theano"
if use_gpu:
os.environ["THEANO_FLAGS"] = "device=cuda0"
print("Use GPU theano", os.environ["THEANO_FLAGS"])
else:
os.environ["THEANO_FLAGS"] = "device=cpu"
else:
raise TypeError("Wrong backend")
def get_plaidml_devices(gpu=False):
prepare_plaidml()
import plaidml
ctx = plaidml.Context()
plaidml.settings._setup_for_test(plaidml.settings.user_settings)
plaidml.settings.experimental = True
devices, _ = plaidml.devices(ctx, limit=100, return_all=True)
out_devices = []
for device in devices:
points = 0
if b"cuda" in device.description.lower():
points += 1
if b"opencl" in device.description.lower():
points += 1
if b"nvidia" in device.description.lower():
points += 1
if b"amd" in device.description.lower():
points += 1
out_devices.append((points, device))
out_devices.sort(reverse=True)
return {device.description.decode("utf8"): device.id.decode("utf8") for points, device in out_devices }
| invesalius/invesalius3 | invesalius/segmentation/deep_learning/utils.py | utils.py | py | 2,761 | python | en | code | 536 | github-code | 90 |
10485148994 | import csv
# READ
# with open("data/addresses.csv") as csvfile:
# reader = csv.reader(csvfile, skipinitialspace=True)
# for row in reader:
# print(row[1])
# with open("data/biostats.csv") as csvfile:
# reader = csv.DictReader(csvfile, skipinitialspace=True)
# for row in reader:
# # row is a collections.OrderedDict
# print(row["Name"], row["Sex"], int(row["Age"]))
# WRITE
beatles = [
{"first_name": "John", "last_name": "lennon", "instrument": "guitar"},
{"first_name": "Ringo", "last_name": "Starr", "instrument": "drums"},
]
with open("data/beatles.csv", "w") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=beatles[0].keys())
writer.writeheader()
for beatle in beatles:
writer.writerow(beatle)
| trouni/batch-719 | lectures/data-sourcing/csv_demo.py | csv_demo.py | py | 781 | python | en | code | 1 | github-code | 90 |
21034024432 | from tkinter import *
expression = ""
def press(num):
global expression
expression = expression + str(num)
equation.set(expression)
def equalpress():
try:
global expression
total = str(eval(expression))
equation.set(total)
expression = ""
except:
equation.set(" error ")
expression = ""
def clear():
global expression
expression = ""
equation.set("")
if __name__ == "__main__":
gui = Tk()
gui.configure(background="light green")
| helenamagaldi/projects_python | Calculator/main.py | main.py | py | 528 | python | en | code | 3 | github-code | 90 |
18363674879 | N = int(input())
LP = list(map(int, input().split()))
LPS = []
LPS = sorted(LP)
cnt = 0
for i in range(N):
if LP[i] != LPS[i]:
cnt += 1
if cnt == 3:
print("NO")
exit()
print("YES") | Aasthaengg/IBMdataset | Python_codes/p02958/s082951606.py | s082951606.py | py | 227 | python | en | code | 0 | github-code | 90 |
18302396679 | #!/usr/bin/env python
n = int(input())
if n%2 == 1:
print(0)
exit()
mp = tmp = 0
while True:
if 5**tmp > n:
break
mp = tmp
tmp += 1
ans = 0
for i in range(1, mp+1):
ans += n//(2*(5**i))
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02833/s114723796.py | s114723796.py | py | 237 | python | en | code | 0 | github-code | 90 |
2895246832 | quant=int(input("сколько билетов планируете купить:"))
count=0
for i in range(1, quant+1):
text='возраст ' + str(i) + ' клиента:'
age = int(input (text))
if age >25:
count += 1390
print ("стоимость билета 1390 рублей")
else:
if age>=18:
count += 990
print("стоимость билета 990 рублей")
else:
print("бесплатный билет")
print ("сумарная стоимость билетов: %s рублей" %(count))
if quant>3 and count>0:
sale=int (count/10)
count=count-sale
print ("у вас 10% скидка она состовляет {} рублей итого с вас {} рублей".format(sale, count)) | frankenhtejn/origin | main.py | main.py | py | 814 | python | ru | code | 0 | github-code | 90 |
18103299019 | def gcd(a, b):
"""calculate the greatest common divisor of a, b
>>> gcd(54, 20)
2
>>> gcd(147, 105)
21
>>> gcd(100000002, 39273000)
114
"""
if a > b:
a, b = b, a
if a == 0:
return b
return gcd(b % a, a)
def run():
a, b = [int(i) for i in input().split()]
print(gcd(a, b))
if __name__ == '__main__':
run()
| Aasthaengg/IBMdataset | Python_codes/p02256/s422498097.py | s422498097.py | py | 385 | python | en | code | 0 | github-code | 90 |
18360592139 | N = int(input())
H = list(map(int, input().split()))
flg = 1
for i in range(N-1):
if H[i+1] - 1 >= H[i]:
H[i+1] -= 1
elif H[i+1] < H[i]:
flg = 0
break
if flg == 1:
print('Yes')
elif flg == 0:
print('No') | Aasthaengg/IBMdataset | Python_codes/p02953/s235802736.py | s235802736.py | py | 244 | python | en | code | 0 | github-code | 90 |
29450566289 | #!/usr/bin/env python
from operator import itemgetter
"""
conflates 'objective' into neutral
"""
def conflate (tweetData, trainingExList, conf):
dictSent = {}
neg = 0.0
pos =0.0
neutral =0.0
obj =0.0
objNeut =0.0
for t in trainingExList:
if len(tweetData['tweets'][t]['answers']) == 2:
objNeut +=1
elif 'negative' in tweetData['tweets'][t]['answers']:
neg += 1
elif 'positive' in tweetData['tweets'][t]['answers']:
pos += 1
elif 'neutral' in tweetData['tweets'][t]['answers']:
neutral +=1
elif 'objective' in tweetData['tweets'][t]['answers']:
obj += 1
total = (neg+pos+neutral+obj+objNeut)
dictSent['negative'] = neg, (neg/total)*100
dictSent['positive'] = pos, (pos/total)*100
if conf:
dictSent['neutral'] = (neutral+obj+objNeut), \
100*(neutral+obj+objNeut)/total
else:
dictSent['neutral'] = neutral, (neutral/total)*100
dictSent['objective'] = obj , (obj/total)*100
dictSent['objective-OR-neutral'] = objNeut , (objNeut/total)*100
return dictSent
#finds accuracy if we randomly guess
def randomGuess(trainingExList, dictSent):
acc = 0.0
totalInstance = 0.0
numEx = len(trainingExList)
a = (1.0/len(dictSent))*numEx
acc = a/numEx
return acc
"""
finds the most frequent semantic
"""
def MFS(dictSent):
mfsense = max(dictSent.iteritems(), key=itemgetter(1))[0]
return mfsense
| dyelsey/SemEval | helper.py | helper.py | py | 1,518 | python | en | code | 0 | github-code | 90 |
1383961785 | #
# Sorteador de facturas para 'x' día de la semana
# levanta los datos de un json, lee los datos y determina
# de acuerdo al día parametrizado y la cantidad de gente,
# los días que se debe llevar facturas y quien las lleva
import json
# from datetime import datetime
import modules.facturas_classes as fClasses
fApp = fClasses.FacturasApp()
# Logica para levantar los datos del archivo
with open('participantes.json') as json_file:
participantes = json.load(json_file)
day_to_bring = fApp.get_day_number(participantes.get('bdat'))
persons_lst = participantes.get('persons')
try:
d2b = fApp.get_bring_days(day_to_bring)
asigned_dates = fApp.assign_people(persons_lst, d2b)
except ValueError as ex_las_fechas:
print(ex_las_fechas)
| andresj-io/facturas | facturas.py | facturas.py | py | 771 | python | es | code | 0 | github-code | 90 |
495505821 | import requests
from lxml import etree
baseurl = "https://huggingface.co/models?"
if __name__ == '__main__':
#初始化拼接请求url
url = ''
#初始化结果列表
result_list = []
#封装的api前缀
base_api = "https://api-inference.huggingface.co/models/"
model_url = "https://huggingface.co/"
# 定制请求头,加入cookie防止网站出现cookie反爬
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (HTML, like Gecko) '
'Chrome/110.0.0.0 Safari/537.36 ',
'cookie': '_ga=GA1.2.1567546240.1678100568; _gid=GA1.2.2114067746.1678100568; '
'__stripe_mid=4ffa3d97-66a6-4bfd-8561-e456f94e4adac00681 '
}
f = open('./model.txt', mode='w', encoding='utf-8', )
for i in range(1, 4886):
# 生成每页访问的url
if i == 1:
# https://huggingface.co/models?sort=downloads
url = baseurl + "sort=downloads"
else:
url = baseurl + "p=" + str(i) + "?sort=downloads"
# 发送请求
response = requests.get(url, headers)
# 得到响应
content = response.text
# 使用过xpath解析html
html = etree.HTML(content)
# 找到想要的数据
tmp_list = html.xpath("/html/body/div/main/div/div/section/div[2]/div/article/a/header/h4/text()")
for model in tmp_list:
# 保存至model.txt文件,
f.write(base_api + model + '\n')
f.close()
# result_list.append(tmp_list)
# print(result_list)
| xglds99/pythonSpider | spider/huggingface.py | huggingface.py | py | 1,596 | python | en | code | 0 | github-code | 90 |
11254225459 | from nltk.corpus import wordnet as wn
from nltk import pos_tag,ne_chunk
from nltk.tokenize import word_tokenize,wordpunct_tokenize,sent_tokenize
import re, collections
from nltk.stem import WordNetLemmatizer
from nltk.tag import pos_tag
from collections import Counter
from nltk import FreqDist
import nltk
from nltk import ngrams
from operator import itemgetter
# Loading input file
with open('three.txt' , 'r') as f:
lines = f.readlines()
print("lines",lines)
frm=''
#converting multiline to single string
for m in lines:
frm=frm+m
print(frm)
#tokenize word
frm_word = word_tokenize(frm)
frm_sent = sent_tokenize(frm)
# Applying lemmitization
lemmatizer = WordNetLemmatizer()
frm_lemma = []
for word in frm_word:
fr_lema = lemmatizer.lemmatize(word.lower())
frm_lemma.append(fr_lema)
print("\n_____Lemitization Output ________ ")
print(frm_lemma)
frm_pos = pos_tag(frm_lemma)
print("_____Bigram Output_____")
n = 2
gram=[]
bigrams = ngrams(frm_lemma, n)
for grams in bigrams:
gram.append(grams)
print(gram)
str1 = " ".join(str(x) for x,y in frm_pos)
str1_word = word_tokenize(str1)
print("______Word frequency Bigrams_______")
fdist1 = nltk.FreqDist(gram)
top_fiv = fdist1.most_common()
top_5 = fdist1.most_common(5)
top=sorted(top_fiv,key=itemgetter(0))
print(top)
print('_______Top 5 words Bigrams_______')
print(top_5)
sent1 = sent_tokenize(frm)
rep_sent1 = []
for sent in sent1:
for word,words in gram:
for ((c,m), l) in top_5:
if (word,words == c,m):
rep_sent1.append(sent) # Creating sentences containing the most common words
print ("\n____Top 5 Bigrams sentences______")
print(max(rep_sent1,key=len)) | arvindtota/Python-Lab-Assignments | Lab Assignment 3/Source/three.py | three.py | py | 1,681 | python | en | code | 0 | github-code | 90 |
18402817069 | N,M,K =map(int, input().split())
mod = 10**9 + 7
# cmbが10**10くらいだけど求められるか?って感じ
# 問題読み違えていた。。。N*M <= 2*10**5だ。。。まあ普通だ。
# もうライブラリ使おう。昔nCrのrが小さい時の工夫とかあったけど今回は大丈夫だ。
# https://ikatakos.com/pot/programming_algorithm/number_theory/mod_combination
import numpy as np
def prepare(n, MOD):
nrt = int(n ** 0.5) + 1
nsq = nrt * nrt
facts = np.arange(nsq, dtype=np.int64).reshape(nrt, nrt)
facts[0, 0] = 1
for i in range(1, nrt):
facts[:, i] = facts[:, i] * facts[:, i - 1] % MOD
for i in range(1, nrt):
facts[i] = facts[i] * facts[i - 1, -1] % MOD
facts = facts.ravel().tolist()
invs = np.arange(1, nsq + 1, dtype=np.int64).reshape(nrt, nrt)
invs[-1, -1] = pow(facts[-1], MOD - 2, MOD)
for i in range(nrt - 2, -1, -1):
invs[:, i] = invs[:, i] * invs[:, i + 1] % MOD
for i in range(nrt - 2, -1, -1):
invs[i] = invs[i] * invs[i + 1, 0] % MOD
invs = invs.ravel().tolist()
return facts, invs
facts, invs = prepare(N*M+10,mod)
def cmb(n,r,MOD):
return (((facts[n] * invs[n-r]) % mod) * invs[r]) % mod
## シグマを分離すること。2点i,jの選び方数、かけることのi,jの距離とする。あとはこれに2点以外の選び方のパターン数をかければ良い
# X,Yを独立して考える
# 例えばXについて考えると、使う列の選び方がN*2, 使う行の選び方はM-dis(1ならM-1だし、M-1離れてるのは1個しか選べない)
# disは距離が答えに寄与するから
# cmb()はその2点以外の選び方の個数の話。
# もちろん他のi,jについて見たら同じ盤面何度も見てるけど、今のi,jに関する値しか計算してないからOK
ans = 0
for dis in range(M):
ans += N**2 * (M-dis) * dis * cmb(N*M-2, K-2, mod)
ans %= mod
for dis in range(N):
ans += M**2 * (N-dis) * dis * cmb(N*M-2, K-2, mod)
ans %= mod
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03039/s550189886.py | s550189886.py | py | 2,054 | python | ja | code | 0 | github-code | 90 |
30590851909 | import asyncio
from aiogram import types
from aiogram.dispatcher.filters import Text
from loader import dp, bot
with open("pictures/lyceum.jpg", "rb") as file:
photo = file.read()
@dp.message_handler(Text(equals="TKTI qoshidagi akademik litsey"))
async def lyceum_uz(message: types.Message):
await bot.send_chat_action(message.chat.id, "typing")
await bot.send_photo(
chat_id=message.chat.id,
photo=photo,
caption=f"<b>Toshkent kimyo-texnologiya instituti akademik litseyi\n\n"
f"Ishonch telefonlari:\n"
f"(71) 224-87-17 | (71) 224-88-55 | (71) 221-61-33\n\n"
f"Manzil | <a href='https://bit.ly/tcti_lyceum'>Toshkent shahri, Yunusobod tumani, 3-mavze, 7-uy</a></b>\n\n\n"
)
await asyncio.sleep(2)
await bot.send_chat_action(message.chat.id, "typing")
await message.answer(
text=f"<b>Akademik litsey haqida ba'zi ma'lumotlar:\n\n"
f"<i>Toshkent kimyo-texnologiya instituti akademik litseyiga 2006-yil 2-sentabrda asos solingan.\n\n"
f"O‘qish muddati ikki yildan iborat.\n\n"
f"Akademik litsey direktori - Xolmirzayev Zulfiqor Jo‘rayevich</i></b>"
)
@dp.message_handler(Text(equals="Академический лицей при ТКТИ"))
async def lyceum_ru(message: types.Message):
await bot.send_chat_action(message.chat.id, "typing")
await bot.send_photo(
chat_id=message.chat.id,
photo=photo,
caption=f"<b>Академический лицей Ташкентского химико-технологического института\n\n"
f"Телефоны доверия:\n"
f"(71) 224-87-17 | (71) 224-88-55 | (71) 221-61-33\n\n"
f"Адрес | <a href='https://bit.ly/tcti_lyceum'>Город Ташкент, Юнусабадский район, 3 квартал, 7 дом</a></b>\n\n\n"
)
await asyncio.sleep(2)
await bot.send_chat_action(message.chat.id, "typing")
await message.answer(
text=f"<b>Немного информации об академическом лицее:\n\n"
f"<i>Академический лицей Ташкентского химико-технологического института основан 2 сентября 2006 года.\n\n"
f"Срок обучения - два года.\n\n"
f"Директор академического лицея - Холмирзаев Зулфикор Джораевич.</i></b>"
)
@dp.message_handler(Text(equals="Academic lyceum under TCTI"))
async def lyceum_en(message: types.Message):
await bot.send_chat_action(message.chat.id, "typing")
await bot.send_photo(
chat_id=message.chat.id,
photo=photo,
caption=f"<b>Academic Lyceum of the Tashkent Chemical-Technological Institute\n\n"
f"Helplines:\n"
f"(71) 224-87-17 | (71) 224-88-55 | (71) 221-61-33\n\n"
f"Address | <a href='https://bit.ly/tcti_lyceum'>Tashkent, Yunusabad district, 3rd district, 7th house</a></b>\n\n\n"
)
await asyncio.sleep(2)
await bot.send_chat_action(message.chat.id, "typing")
await message.answer(
text=f"<b>Some information about the academic lyceum:\n\n"
f"<i>The Academic Lyceum of the Tashkent Chemical-Technological Institute was founded on September 2, 2006.\n\n"
f"Duration of study is two years.\n\n"
f"The director of the academic lyceum is Kholmirzaev Zulfiqor Jorayevich</i></b>"
)
| dostonbokhodirov/tktiuzbot | handlers/structure_handlers/lyceum_handler.py | lyceum_handler.py | py | 3,655 | python | en | code | 1 | github-code | 90 |
18370421248 | # -*- coding: utf-8 -*-
from PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QDialog, QApplication, QHBoxLayout, QListWidgetItem, QListWidget, QMessageBox
from Ui_InputDlg import Ui_InputDlg
from data_handling.material import MaterialListLibrary
from widgetparam import WidgetParam
from data_handling.material import *
from ShowResultDlg import ShowResultDlg
import pickle
import os
import copy
import sys
from PyQt5.QtGui import QIcon
import traceback
class InputDlg(QDialog, Ui_InputDlg):
def __init__(self, parent=None):
super(InputDlg, self).__init__(parent)
self.setupUi(self)
self.matlib = MaterialListLibrary()
self.init_ui_data()
self.resultdlgs = []
self.tableWidgetMatComposition.init_mat_table()
self.setWindowIcon(QIcon('fds.ico'))
def init_ui_data(self):
while self.listWidgetMaterialLib.count() > 0:
item = self.listWidgetMaterialLib.takeItem(0)
self.listWidgetMaterialLib.clear()
self.matlib.load_material_list()
text = self.textMaterialSearch.text()
split_text = [a for a in text.split(' ') if len(a) > 0 and a != '\n']
spectra_name_list = BasicPath.get_spectra_list()
for spectrum_name in sorted(spectra_name_list):
self.comboBoxSelectSpectra.addItem(spectrum_name)
for mat_name in sorted(self.matlib.materials.keys(), key=lambda a: a):
allin = True
for word in split_text:
if not (word in mat_name):
allin = False
break
if allin:
self.listWidgetMaterialLib.addItem(mat_name)
def current_material(self):
mat_ret = self.tableWidgetMatComposition.ui_to_mat_info()
if mat_ret is None:
return None
else:
mat_ret.name = self.textMatName.text()
return mat_ret
def show_message(self, msg):
msg_box = QMessageBox()
msg_box.setText(msg)
msg_box.exec()
@pyqtSlot()
def on_listWidgetMaterialLib_itemSelectionChanged(self):
selected_row_idx = self.listWidgetMaterialLib.currentRow()
if selected_row_idx >= 0:
mat_name = self.listWidgetMaterialLib.item(selected_row_idx).text()
self.tableWidgetSelectedMatComposition.mat_info_to_ui(self.matlib.materials[mat_name])
else:
self.tableWidgetSelectedMatComposition.init_mat_table()
@pyqtSlot(str)
def on_textMaterialSearch_textChanged(self, text):
if len(text) <= 0:
keys = self.matlib.materials.keys()
for matname in sorted(keys, key=lambda a: a):
self.listWidgetMaterialLib.addItem(matname)
return
while self.listWidgetMaterialLib.count() > 0:
item = self.listWidgetMaterialLib.takeItem(0)
self.listWidgetMaterialLib.clear()
split_text = [a for a in text.split(' ') if len(a) > 0 and a != '\n']
for mat_name in sorted(self.matlib.materials.keys(), key=lambda a: a):
allin = True
for word in split_text:
if not(word in mat_name):
allin = False
break
if allin:
self.listWidgetMaterialLib.addItem(mat_name)
@pyqtSlot()
def on_pushButtonLoad_clicked(self):
if self.tableWidgetSelectedMatComposition.cur_material:
new_mat = copy.deepcopy(self.tableWidgetSelectedMatComposition.cur_material)
self.tableWidgetMatComposition.mat_info_to_ui(new_mat)
self.textMatName.setText(new_mat.name)
@pyqtSlot()
def on_pushButtonSave_clicked(self):
if self.tableWidgetMatComposition.cur_material:
material = self.tableWidgetMatComposition.ui_to_mat_info()
material.name = self.textMatName.text()
if material:
try:
self.matlib.add_material(material)
except YSPException as err:
self.show_message(err.message)
return
except Exception:
pass
self.init_ui_data()
@pyqtSlot()
def on_pushButtonUpdateMaterialLib_clicked(self):
self.matlib.save_material_list()
@pyqtSlot()
def on_pushButtonDeleteMatFromLib_clicked(self):
selected_item = self.listWidgetMaterialLib.currentItem()
if selected_item:
reply = QMessageBox.information(self, "Question", "Do you really want to delete "
"the material \"{}\" from the meterial library?".format(selected_item.text()),
QMessageBox.Ok |QMessageBox.Cancel)
if reply == QMessageBox.Ok:
self.matlib.del_material(selected_item.text())
self.on_pushButtonUpdateMaterialLib_clicked()
self.init_ui_data()
self.matlib.save_material_list()
@pyqtSlot()
def on_pushButtonClearCurrentMat_clicked(self):
self.tableWidgetMatComposition.init_mat_table()
@pyqtSlot()
def on_pushButtonShowResult_clicked(self):
cur_mat = copy.deepcopy(self.tableWidgetSelectedMatComposition.cur_material) #self.current_material()
if cur_mat is None:
self.show_message('Please select the material needed to'
' be displayed from the material libraray')
return
#spectra_names = BasicPath.get_spectra_list()
cur_spectrum_name = self.comboBoxSelectSpectra.currentText()
try:
self.setCursor(Qt.WaitCursor)
#cur_mat_cache = cur_mat.get_cached_material()
index = self.comboBoxSelectSpectra.currentIndex()
cur_mat_cache = cur_mat.get_cached_spectrum_material(cur_spectrum_name)
if cur_mat_cache is not None: # in case different material with the same name
cur_mat.normalize()
for elem in cur_mat.elements.keys():
if not(elem in cur_mat_cache.elements.keys()):
cur_mat_cache = None
break
elif abs(cur_mat.elements[elem] - cur_mat_cache.elements[elem]) > 1e-5:
cur_mat_cache = None
break
if cur_mat_cache is None:
if cur_mat.calculate_spectrum_activation(cur_spectrum_name):
print("calculate activation of material: "+cur_mat.name)
cur_mat_cache = cur_mat
cur_mat_cache.cache_spectrum_material(cur_spectrum_name)
else:
print("loaded cached material: " + cur_mat.name)
#if cur_mat.calculate_activation():
if cur_mat_cache is not None:
self.resultdlgs.append(ShowResultDlg(cur_mat_cache))
self.resultdlgs[-1].setWindowTitle('Activation data of material \"{0}\", spectrum \"{1}\"'
.format(cur_mat_cache.name, cur_spectrum_name))
self.resultdlgs[-1].show()
else:
self.show_message('The activation data of \'{0}\' calculation failed.'.format(cur_mat.name))
self.setCursor(Qt.ArrowCursor)
except YSPException as error:
self.show_message("Error in calculating activation properties for material: "
+cur_mat.name+" under spectrum: "+cur_spectrum_name+", info: "+error.message
+"\nPlease check if the files under " + BasicPath.getspectra_dir(cur_spectrum_name))
except Exception as err:
self.show_message("Unknow error. Please check the console for more information.")
print(err)
traceback.print_exc()
finally:
self.setCursor(Qt.ArrowCursor)
if __name__ == "__main__":
# try:
app = QApplication(sys.argv)
resultDlg = InputDlg()
resultDlg.show()
sys.exit(app.exec_())
# except Exception as err:
# print(err.args) | theysp/FDSNMH_GUI | src/InputDlg.py | InputDlg.py | py | 8,120 | python | en | code | 1 | github-code | 90 |
29416857899 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import csv
import logging
import os
import re
import sys
from os import path
from typing import List
import h5py
import numpy
# noinspection PyPackageRequirements
import progressbar
_CSV_FILE_PATTERN = r'\.?\d{8}T\d{6}-\d{8}T\d{6}(?:-\d+)?(?:-\d{8}T\d{6,12})?\.csv'
_DATASET_EXTENSION = '.hdf5'
def _main():
progressbar.streams.wrap_stderr()
parser = argparse.ArgumentParser(prog='dataset-creator',
description='Command line utility to create a training dataset in HDF5 format for '
'the track recurrent neural network from CSV files created by the '
'classifier.')
parser.add_argument('input_directory', help='Path of a directory with CSV files to extract tracks.',
type=_directory_path)
parser.add_argument('output',
help='Path of the output training dataset file with {} extension.'.format(_DATASET_EXTENSION),
type=_output_file_path)
parser.add_argument('-a', '--append',
help='Flag to indicate that an existing HDF5 file can be used and new datasets should be '
'appended to it.', action='store_true')
args = parser.parse_args()
DatasetCreator(args.input_directory, args.output, args.append).run()
def _directory_path(directory_path: str) -> str:
"""Parse a directory path argument."""
normalized_directory_path = _normalize_directory_path(directory_path)
if not path.isdir(normalized_directory_path):
raise argparse.ArgumentTypeError("'{}' is not a directory".format(directory_path))
return normalized_directory_path
def _normalize_directory_path(directory_path: str) -> str:
"""Normalize a directory path.
Resolve user path (~), transform to absolute path and ensure that it ends with a separator."""
normalized_directory_path = _normalize_file_path(directory_path)
# Ensure that the path ends with a separator by joining an empty string to it
return path.join(normalized_directory_path, '')
def _normalize_file_path(file_path: str) -> str:
"""Normalize a file path.
Resolve user path (~) and transform to absolute path."""
normalized_file_path = file_path
if normalized_file_path.startswith('~'):
normalized_file_path = path.expanduser(normalized_file_path)
if not path.isabs(normalized_file_path):
normalized_file_path = path.abspath(normalized_file_path)
return normalized_file_path
def _output_file_path(output_file_path: str) -> str:
"""Parse an output file path argument."""
normalized_file_path = _normalize_file_path(output_file_path)
if path.exists(output_file_path) and not path.isfile(output_file_path):
raise argparse.ArgumentTypeError("'{}' already exists and is not a file.".format(output_file_path))
elif path.splitext(output_file_path)[1] != _DATASET_EXTENSION:
raise argparse.ArgumentTypeError("The output file must have the '{}' extension".format(_DATASET_EXTENSION))
return normalized_file_path
class DatasetCreator:
def __init__(self, input_directory_path: str, output_file_path: str, append: bool) -> None:
super().__init__()
self._input_directory_path = input_directory_path
self._output_file_path = output_file_path
self._append = append
self._dataset_name = None
def run(self) -> None:
"""Create the dataset."""
csv_file_paths = self._find_csv_file_paths()
self._create_dataset(csv_file_paths)
def _find_csv_file_paths(self) -> List[str]:
"""Return a list of CSV files used as input to create the dataset."""
print('Buscando archivos CSV para crear el dataset...')
csv_files = []
for root, _, files in os.walk(self._input_directory_path):
for file in files:
if re.fullmatch(_CSV_FILE_PATTERN, file):
file_path = path.join(root, file)
csv_files.append(file_path)
print(' - {}'.format(path.relpath(file_path, self._input_directory_path)))
print('{} archivos encontrados'.format(len(csv_files)))
if not csv_files:
sys.exit(1)
return sorted(csv_files)
def _create_dataset(self, csv_file_paths: List[str]) -> None:
"""Create the dataset in HDF5 format."""
if path.exists(self._output_file_path) and not self._append:
logging.error(
"'{}' already exists. If you want to append new datasets "
"to this file use the -a or --append option.".format(self._output_file_path))
sys.exit(1)
with h5py.File(self._output_file_path, "a") as output_file:
output_file.attrs['col0'] = 'left'
output_file.attrs['col1'] = 'top'
output_file.attrs['col2'] = 'width'
output_file.attrs['col3'] = 'height'
if len(output_file) == 0:
self._dataset_name = 0
else:
self._dataset_name = max(map(int, output_file.keys())) + 1
for csv_file_path in progressbar.progressbar(csv_file_paths):
self._process_csv(csv_file_path, output_file)
def _process_csv(self, csv_file_path: str, output_file: h5py.File) -> None:
"""Read a CSV file, group the bounding boxes by track id and write them in the output file."""
track_id_to_class = {}
track_id_to_bounding_boxes = {}
with open(csv_file_path, newline='') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
track_id = row['Track']
if track_id not in track_id_to_class:
track_id_to_class[track_id] = row['Class']
track_id_to_bounding_boxes[track_id] = []
track_id_to_bounding_boxes[track_id].append(
(max(0, int(row['Left'])), max(0, int(row['Top'])), int(row['Width']), int(row['Height'])))
for track_id, class_name in track_id_to_class.items():
bounding_boxes = track_id_to_bounding_boxes[track_id]
dataset_name = str(self._dataset_name)
self._dataset_name += 1
data = numpy.array(bounding_boxes, dtype='uint16')
dataset = output_file.create_dataset(dataset_name,
dtype='uint16',
data=data)
dataset.attrs['class'] = class_name
if __name__ == '__main__':
_main() | MarlonCajamarca/Keras-LSTM-Trajectory-Prediction | dataset-creator/dataset-creator.py | dataset-creator.py | py | 6,708 | python | en | code | 101 | github-code | 90 |
11698250227 |
import os
import sys
import itertools
import re
adirProj=os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(adirProj)
from lib.resource.altname_paths import *
from lib.diskgenmem import *
from lib.resource.chem_altnames import *
from lib.resource.fetch_mesh_to_unii import *
from lib.mytime import *
from lib.re_sub import sub_all_plain_string
from lib.reservoir import reservoir
import random
dmem = make_dmem()
mesh_to_unii = dmem.cache(lambda: gather_mesh_unii(), relf_mesh_to_unii)
mesh_to_only_unii = DictMem(lambda:
((meshid_find(x["mesh"]), x) for x in
mesh_to_unii()))()
pubtator_chem_altnames = DictMem(lambda:
dmem.cache(
lambda: gather_pubtator_chem_altnames(absf_chemical2pubtatorcentral, lambda n:n),
relf_pubtator_chem_altnames)())
def mesh_chem_altnames():
for (meshid,vs) in pubtator_chem_altnames().items():
if meshid in mesh_to_only_unii:
meshinfo = mesh_to_only_unii[meshid]
unii = meshinfo["r"] if "r" in meshinfo else None
unii2 = meshinfo["rr"] if "rr" in meshinfo else None
yield (unii,unii2,meshid,vs)
def main():
print("pubtator chemicals count={}".format(
mytime(lambda: sum(1 for _ in pubtator_chem_altnames().keys()))))
print("chemical meshids count={}".format(
mytime(lambda: sum(1 for _ in mesh_to_only_unii.keys()))
))
print("mesh_chem_altnames count={}".format(
mytime(lambda: sum(1 for _ in mesh_chem_altnames()))))
print("mesh_chem_altnames with any unii count={}".format(
mytime(lambda: sum(1 for (unii,unii2,meshid,vs) in mesh_chem_altnames() if unii is not None or unii2 is not None))))
print("mesh_chem_altnames with direct unii count={}".format(
mytime(lambda: sum(1 for (unii,unii2,meshid,vs) in mesh_chem_altnames() if unii is not None))))
for (unii,unii2,meshid,v) in reservoir(mesh_chem_altnames(), 20, random.Random(123)):
print("{},{},{} => {}".format(unii,unii2,meshid,v))
if __name__ == '__main__':
main()
| jeffhhk/datriples | bin/benchmarks/mesh_chem_altnames.py | mesh_chem_altnames.py | py | 2,092 | python | en | code | 0 | github-code | 90 |
31429373257 | """
该模块是一个基于 PySide6 的多线程模块,通过创建实例 RunInThread 然后设置函数运行的方式无痛开启多线程
该模块同样可以传递参数给被调用函数,可以直接 return 后在被接受的函数里面定义对应数量的函数参数即可
Examples:
>>> import time
>>> a = RunInThread()
>>> a.set_start_func(lambda: time.sleep(5))
>>> a.set_finished_func(lambda: print('运行结束'))
>>> a.start()
>>> '运行结束' # 非阻塞主线程
"""
import time
from typing import Callable, Optional
import loguru
from PySide6.QtCore import QObject, QThread, Signal
from PySide6.QtWidgets import (QApplication, QLabel, QPushButton, QVBoxLayout,
QWidget)
class WorkThread(QObject):
finished_signal = Signal()
result = Signal(object)
def __init__(self):
super().__init__()
self.kwargs = None
self.args = None
self.func: Optional[Callable] = None
def set_start_func(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def start(self):
if self.args or self.kwargs:
result = self.func(*self.args, **self.kwargs)
else:
result = self.func()
loguru.logger.debug(f'线程函数执行完毕, 返回值为{result}')
self.result.emit(result)
self.finished_signal.emit()
def __del__(self):
loguru.logger.debug('线程对象被删除了,内存已经释放')
class RunInThread(QObject):
def __init__(self):
super().__init__()
self.finished_func: Optional[Callable] = None
self.worker = WorkThread()
self.mythread = QThread()
self.worker.moveToThread(self.mythread)
self.mythread.started.connect(self.worker.start)
self.worker.finished_signal.connect(self.worker.deleteLater)
self.worker.destroyed.connect(self.mythread.quit)
self.mythread.finished.connect(self.mythread.deleteLater)
self.mythread.destroyed.connect(self.deleteLater)
def start(self):
"""当函数设置完毕之后调用start即可"""
self.mythread.start()
def set_start_func(self, func, *args, **kwargs):
"""设置一个开始函数
这部分就是多线程运行的地方,里面可以是爬虫,可以是其他IO或者阻塞主线程的函数
"""
self.worker.set_start_func(func, *args, **kwargs)
def set_finished_func(self, func):
"""设置线程结束后的回调函数"""
self.finished_func = func
self.worker.result.connect(self._done_callback)
def _done_callback(self, *args, **kwargs):
if args != (None,) or kwargs:
self.finished_func(*args, **kwargs)
else:
self.finished_func()
class MyWindow(QWidget):
"""
一个用来测试的窗口
"""
def __init__(self):
super().__init__()
self.btn = QPushButton('按钮')
self.btn.clicked.connect(self.run)
self.lb = QLabel('标签')
self.main_layout = QVBoxLayout()
self.main_layout.addWidget(self.btn)
self.main_layout.addWidget(self.lb)
self.setLayout(self.main_layout)
loguru.logger.debug('窗口对象被创建了')
def run(self):
self.a = RunInThread()
self.a.set_start_func(self.waste_time_func)
self.a.set_finished_func(self.over)
self.a.start()
def waste_time_func(self):
self.btn.setEnabled(False)
self.lb.setText('开始等待3秒')
loguru.logger.debug('开始等待3秒')
time.sleep(3)
loguru.logger.debug('等待结束')
# 注意此处使用了返回值
return '欢迎使用', '你好'
def over(self, result):
# 注意这里需要传递一个参数 result
loguru.logger.debug(f'slot: {result}')
self.lb.setText('线程结束')
self.btn.setEnabled(True)
if __name__ == '__main__':
app = QApplication([])
window = MyWindow()
window.show()
app.exec()
| 271374667/NuitkaGUI | src/utils/run_in_thread.py | run_in_thread.py | py | 4,121 | python | en | code | 12 | github-code | 90 |
15482106525 |
from typing import Union as _Union
from typing import List as _List
from typing import Tuple as _Tuple
from .._network import Network
from .._networks import Networks
from .._population import Population
from .._variableset import VariableSets, VariableSet
from .._outputfiles import OutputFiles
from ._profiler import Profiler
from ._get_functions import MetaFunction
import os as _os
__all__ = ["get_number_of_processes", "run_models"]
def get_number_of_processes(parallel_scheme: str, nprocs: int = None):
"""This function works out how many processes have been set
by the paralellisation system called 'parallel_scheme'
"""
if nprocs is None:
if parallel_scheme == "multiprocessing":
return 1
elif parallel_scheme == "mpi4py":
from mpi4py import MPI
comm = MPI.COMM_WORLD
nprocs = comm.Get_size()
return nprocs
elif parallel_scheme == "scoop":
raise ValueError(
f"You must specify the number of processes for "
f"scoop to parallelise over")
else:
raise ValueError(
f"You must specify the number of processes to "
f"use for parallel scheme '{parallel_scheme}'")
if parallel_scheme == "mpi4py":
from mpi4py import MPI
comm = MPI.COMM_WORLD
n = comm.Get_size()
if n < nprocs:
return n
else:
return nprocs
elif parallel_scheme == "scoop":
return 4
elif parallel_scheme == "multiprocessing":
return nprocs
else:
raise ValueError(
f"Unrecognised parallelisation scheme {parallel_scheme}")
def run_models(network: _Union[Network, Networks],
variables: VariableSets,
population: Population,
nprocs: int, nthreads: int, seed: int,
nsteps: int, output_dir: OutputFiles,
iterator: MetaFunction = None,
extractor: MetaFunction = None,
mixer: MetaFunction = None,
mover: MetaFunction = None,
profiler: Profiler = None,
parallel_scheme: str = "multiprocessing",
debug_seeds=False) \
-> _List[_Tuple[VariableSet, Population]]:
"""Run all of the models on the passed Network that are described
by the passed VariableSets
Parameters
----------
network: Network or Networks
The network(s) to model
variables: VariableSets
The sets of VariableSet that represent all of the model
runs to perform
population: Population
The initial population for all of the model runs. This also
contains the starting date and day for the model outbreak
nprocs: int
The number of model runs to perform in parallel
nthreads: int
The number of threads to parallelise each model run over
seed: int
Random number seed which is used to generate random seeds
for all model runs
nsteps: int
The maximum number of steps to perform for each model - this
will run until the outbreak is over if this is None
output_dir: OutputFiles
The OutputFiles that represents the directory in which all
output should be placed
iterator: str
Iterator to load that will be used to iterate the outbreak
extractor: str
Extractor to load that will be used to extract information
mixer: str
Mixer to load that will be used to mix demographic data
mover: str
Mover to load that will be used to move the population between
different demographics
profiler: Profiler
Profiler used to profile the model run
parallel_scheme: str
Which parallel scheme (multiprocessing, mpi4py or scoop) to use
to run multiple model runs in parallel
debug_seeds: bool (False)
Set this parameter to force all runs to use the same seed
(seed) - this is used for debugging and should never be set
in production runs
Returns
-------
results: List[ tuple(VariableSet, Population)]
The set of adjustable variables and final population at the
end of each run
"""
from ._console import Console
if len(variables) == 1:
# no need to do anything complex - just a single run
if not variables[0].is_empty():
Console.print(f"* Adjusting {variables[0]}", markdown=True)
params = network.params.set_variables(variables[0])
network.update(params, profiler=profiler)
trajectory = network.run(population=population, seed=seed,
nsteps=nsteps,
output_dir=output_dir,
iterator=iterator,
extractor=extractor,
mixer=mixer,
mover=mover,
profiler=profiler,
nthreads=nthreads)
results = [(variables[0], trajectory)]
# perform the final summary
from ._get_functions import get_summary_functions
if extractor is None:
from ..extractors._extract_default import extract_default
extractor = extract_default
else:
from ..extractors._extract_custom import build_custom_extractor
extractor = build_custom_extractor(extractor)
funcs = get_summary_functions(network=network, results=results,
output_dir=output_dir,
extractor=extractor,
nthreads=nthreads)
for func in funcs:
func(network=network, output_dir=output_dir, results=results)
return results
# generate the random number seeds for all of the jobs
# (for testing, we will use the same seed so that I can check
# that they are all working)
seeds = []
if seed == 0:
# this is a special mode that a developer can use to force
# all jobs to use the same random number seed (15324) that
# is used for comparing outputs. This should NEVER be used
# for production code
Console.warning("Using special mode to fix all random number "
"seeds to 15324. DO NOT USE IN PRODUCTION!!!")
for i in range(0, len(variables)):
seeds.append(15324)
elif debug_seeds:
Console.warning(f"Using special model to make all jobs use the "
f"Same random number seed {seed}. "
f"DO NOT USE IN PRODUCTION!")
for i in range(0, len(variables)):
seeds.append(seed)
else:
from ._ran_binomial import seed_ran_binomial, ran_int
rng = seed_ran_binomial(seed)
# seed the rngs used for the sub-processes using this rng
for i in range(0, len(variables)):
seeds.append(ran_int(rng, 10000, 99999999))
# set the output directories for all of the jobs - this is based
# on the fingerprint, so should be unique for each job
outdirs = []
for v in variables:
f = v.output_dir()
d = _os.path.join(output_dir.get_path(), f)
i = 1
base = d
while d in outdirs:
i += 1
d = base + "x%03d" % i
outdirs.append(d)
outputs = []
Console.print(
f"Running **{len(variables)}** jobs using **{nprocs}** process(es)",
markdown=True)
if nprocs == 1:
# no need to use a pool, as we will repeat this calculation
# several times
save_network = network.copy()
Console.rule("Running models in serial")
for i, variable in enumerate(variables):
seed = seeds[i]
outdir = outdirs[i]
with output_dir.open_subdir(outdir) as subdir:
Console.print(
f"Running parameter set {i+1} of {len(variables)} "
f"using seed {seed}")
Console.print(f"All output written to {subdir.get_path()}")
with Console.redirect_output(subdir.get_path(),
auto_bzip=output_dir.auto_bzip()):
Console.print(f"Running variable set {i+1}")
Console.print(f"Random seed: {seed}")
Console.print(f"nthreads: {nthreads}")
# no need to do anything complex - just a single run
params = network.params.set_variables(variable)
Console.rule("Adjustable parameters to scan")
Console.print("\n".join(
[f"* {x}" for x in params.adjustments]),
markdown=True)
Console.rule()
network.update(params, profiler=profiler)
with Console.spinner("Computing model run") as spinner:
try:
output = network.run(population=population,
seed=seed,
nsteps=nsteps,
output_dir=subdir,
iterator=iterator,
extractor=extractor,
mixer=mixer,
mover=mover,
profiler=profiler,
nthreads=nthreads)
spinner.success()
except Exception as e:
spinner.failure()
Console.print_exception()
error = f"FAILED: {e.__class__} {e}"
output = None
if output is not None:
outputs.append((variable, output))
else:
outputs.append((variable, []))
if output is not None:
Console.panel(f"Completed job {i+1} of {len(variables)}\n"
f"{variable}\n"
f"{output[-1]}",
style="alternate")
else:
Console.error(f"Job {i+1} of {len(variables)}\n"
f"{variable}\n"
f"{error}")
# end of OutputDirs context manager
if i != len(variables) - 1:
# still another run to perform, restore the network
# to the original state
network = save_network.copy()
# end of loop over variable sets
else:
from ._worker import run_worker
# create all of the parameters and options to run
arguments = []
if isinstance(network, Networks):
max_nodes = network.overall.nnodes + 1
max_links = max(network.overall.nlinks, network.overall.nplay) + 1
else:
max_nodes = network.nnodes + 1
max_links = max(network.nlinks, network.nplay) + 1
try:
demographics = network.demographics
except Exception:
demographics = None
# give the workers a clean copy of the profiler
if profiler is None:
worker_profiler = None
else:
worker_profiler = profiler.__class__()
for i, variable in enumerate(variables):
seed = seeds[i]
outdir = outdirs[i]
arguments.append({
"params": network.params.set_variables(variable),
"demographics": demographics,
"options": {"seed": seed,
"output_dir": outdir,
"auto_bzip": output_dir.auto_bzip(),
"population": population,
"nsteps": nsteps,
"iterator": iterator,
"extractor": extractor,
"mixer": mixer,
"mover": mover,
"profiler": worker_profiler,
"nthreads": nthreads,
"max_nodes": max_nodes,
"max_links": max_links}
})
if parallel_scheme == "multiprocessing":
# run jobs using a multiprocessing pool
Console.rule("Running models in parallel using multiprocessing")
from multiprocessing import Pool
results = []
with Pool(processes=nprocs) as pool:
for argument in arguments:
results.append(pool.apply_async(run_worker, (argument,)))
for i, result in enumerate(results):
with Console.spinner(
"Computing model run") as spinner:
try:
result.wait()
output = result.get()
spinner.success()
except Exception as e:
spinner.failure()
error = f"FAILED: {e.__class__} {e}"
Console.error(error)
output = None
if output is not None:
Console.panel(
f"Completed job {i+1} of {len(variables)}\n"
f"{variables[i]}\n"
f"{output[-1]}",
style="alternate")
outputs.append((variables[i], output))
else:
Console.error(f"Job {i+1} of {len(variables)}\n"
f"{variable}\n"
f"{error}")
outputs.append((variables[i], []))
elif parallel_scheme == "mpi4py":
# run jobs using a mpi4py pool
Console.rule("Running models in parallel using MPI")
from mpi4py import futures
with futures.MPIPoolExecutor(max_workers=nprocs) as pool:
results = pool.map(run_worker, arguments)
for i in range(0, len(variables)):
with Console.spinner("Computing model run") as spinner:
try:
output = next(results)
spinner.success()
except Exception as e:
spinner.failure()
error = f"FAILED: {e.__class__} {e}"
Console.error(error)
output = None
if output is not None:
Console.panel(
f"Completed job {i+1} of {len(variables)}\n"
f"{variables[i]}\n"
f"{output[-1]}",
style="alternate")
outputs.append((variables[i], output))
else:
Console.error(f"Job {i+1} of {len(variables)}\n"
f"{variable}\n"
f"{error}")
outputs.append((variables[i], []))
elif parallel_scheme == "scoop":
# run jobs using a scoop pool
Console.rule("Running models in parallel using scoop")
from scoop import futures
results = []
for argument in arguments:
try:
results.append(futures.submit(run_worker, argument))
except Exception as e:
Console.error(
f"Error submitting calculation: {e.__class__} {e}\n"
f"Trying to submit again...")
# try again
try:
results.append(futures.submit(run_worker, argument))
except Exception as e:
Console.error(
f"No - another error: {e.__class__} {e}\n"
f"Skipping this job")
results.append(None)
for i in range(0, len(results)):
with Console.spinner("Computing model run") as spinner:
try:
output = results[i].result()
spinner.success()
except Exception as e:
spinner.failure()
error = f"FAILED: {e.__class__} {e}"
Console.error(error)
output = None
if output is not None:
Console.panel(
f"Completed job {i+1} of {len(variables)}\n"
f"{variables[i]}\n"
f"{output[-1]}",
style="alternate")
outputs.append((variables[i], output))
else:
Console.error(f"Job {i+1} of {len(variables)}\n"
f"{variable}\n"
f"{error}")
outputs.append((variables[i], []))
else:
raise ValueError(f"Unrecognised parallelisation scheme "
f"{parallel_scheme}.")
# perform the final summary
from ._get_functions import get_summary_functions
if extractor is None:
from ..extractors._extract_default import extract_default
extractor = extract_default
else:
from ..extractors._extract_custom import build_custom_extractor
extractor = build_custom_extractor(extractor)
funcs = get_summary_functions(network=network, results=outputs,
output_dir=output_dir, extractor=extractor,
nthreads=nthreads)
for func in funcs:
try:
func(network=network, output_dir=output_dir,
results=outputs, nthreads=nthreads)
except Exception as e:
Console.error(f"Error calling {func}: {e.__class__} {e}")
return outputs
| chryswoods/MetaWards | src/metawards/utils/_run_models.py | _run_models.py | py | 18,892 | python | en | code | null | github-code | 90 |
27314725045 | from requests import Request, Session
import json
from novalabs.utils.helpers import interval_to_oanda_granularity, interval_to_milliseconds
import pandas as pd
from datetime import datetime
import time
class Oanda:
def __init__(self,
key: str = "",
secret: str = "",
testnet: bool = False
):
self.api_key = key
self.api_secret = secret
self.based_endpoint = " https://api-fxpractice.oanda.com" if testnet else "https://api-fxtrade.oanda.com"
self._session = Session()
self.historical_limit = 4500
self.pairs_info = self.get_pairs_info()
def _send_request(self, end_point: str, request_type: str, params: dict = None, signed: bool = False):
url = f'{self.based_endpoint}{end_point}'
request = Request(request_type, url, data=json.dumps(params))
prepared = request.prepare()
prepared.headers['Content-Type'] = 'application/json'
prepared.headers['OANDA-Agent'] = 'NovaLabs'
prepared.headers['Authorization'] = f'Bearer {self.api_secret}'
prepared.headers['Accept-Datetime-Format'] = 'UNIX'
response = self._session.send(prepared)
return response.json()
@staticmethod
def get_server_time() -> int:
"""
Note: FTX does not have any server time end point so we are simulating it with the time function
Returns:
the timestamp in milliseconds
"""
return int(time.time() * 1000)
def get_pairs_info(self):
response = self._send_request(
end_point=f"/v3/accounts/{self.api_key}/instruments",
params={"accountID": self.api_key},
request_type="GET"
)['instruments']
pairs_info = {}
for pair in response:
if pair['type'] == 'CURRENCY':
_name = pair['name']
pairs_info[_name] = {}
pairs_info[_name]['maxQuantity'] = float(pair['maximumOrderUnits'])
pairs_info[_name]['minQuantity'] = float(pair['minimumTradeSize'])
pairs_info[_name]['pricePrecision'] = int(pair['displayPrecision'])
pairs_info[_name]['quantityPrecision'] = 1
return pairs_info
def _get_candles(self, pair: str, interval: str, start_time: int, end_time: int):
"""
Args:
pair: pair to get information from
interval: granularity of the candle ['1m', '1h', ... '1d']
start_time: timestamp in milliseconds of the starting date
end_time: timestamp in milliseconds of the end date
Returns:
the none formatted candle information requested
"""
gran = interval_to_oanda_granularity(interval=interval)
_start = start_time/1000
_end = end_time/1000
_args = f"?price=M&granularity={gran}&from={_start}&to={_end}"
return self._send_request(
end_point=f"/v3/instruments/{pair}/candles{_args}",
params={
"price": "M",
"granularity": gran,
"from": str(_start),
"to": str(_end)
},
request_type="GET"
)
def _get_earliest_timestamp(self, pair: str, interval: str):
"""
Note we are using an interval of 4 days to make sure we start at the beginning
of the time
Args:
pair: Name of symbol pair
interval: interval in string
return:
the earliest valid open timestamp in milliseconds
"""
start_year = 2018
starting_date = int(datetime(start_year, 1, 1).timestamp())
gran = interval_to_oanda_granularity(interval=interval)
_args = f"?price=M&granularity={gran}&from={starting_date}&count=10"
response = self._send_request(
end_point=f"/v3/instruments/{pair}/candles{_args}",
params={
"price": "M",
"granularity": gran,
"count": 10,
"from": str(starting_date),
},
request_type="GET"
)['candles'][0]['time']
return int(float(response) * 1000)
def _format_data(self, all_data: list, historical: bool = True) -> pd.DataFrame:
"""
Args:
all_data: output from _combine_history
Returns:
standardized pandas dataframe
"""
final = {
'open_time': [],
'open': [],
'high': [],
'low': [],
'close': [],
'volume': [],
}
for info in all_data:
final['open_time'].append(int(float(info['time']) * 1000))
final['open'].append(float(info['mid']['o']))
final['high'].append(float(info['mid']['h']))
final['low'].append(float(info['mid']['l']))
final['close'].append(float(info['mid']['c']))
final['volume'].append(float(info['volume']))
df = pd.DataFrame(final)
interval_ms = df['open_time'].iloc[1] - df['open_time'].iloc[0]
df['close_time'] = df['open_time'] + interval_ms - 1
for var in ['open_time', 'close_time']:
df[var] = df[var].astype(int)
if historical:
df['next_open'] = df['open'].shift(-1)
return df.dropna().drop_duplicates()
def get_historical_data(self, pair: str, interval: str, start_ts: int, end_ts: int) -> pd.DataFrame:
"""
Note : There is a problem when computing the earliest timestamp for pagination, it seems that the
earliest timestamp computed in "days" does not match the minimum timestamp in hours.
In the
Args:
pair: pair to get information from
interval: granularity of the candle ['1m', '1h', ... '1d']
start_ts: timestamp in milliseconds of the starting date
end_ts: timestamp in milliseconds of the end date
Returns:
historical data requested in a standardized pandas dataframe
"""
# init our list
klines = []
# convert interval to useful value in seconds
timeframe = interval_to_milliseconds(interval)
first_valid_ts = self._get_earliest_timestamp(
pair=pair,
interval=interval
)
start_time = max(start_ts, first_valid_ts)
idx = 0
while True:
print(f'Request # {idx}')
end_t = start_time + timeframe * self.historical_limit
end_time = min(end_t, end_ts)
# fetch the klines from start_ts up to max 500 entries or the end_ts if set
temp_data = self._get_candles(
pair=pair,
interval=interval,
start_time=start_time,
end_time=end_time
)['candles']
# append this loops data to our output data
if temp_data:
klines += temp_data
if len(temp_data) == 0:
break
# handle the case where exactly the limit amount of data was returned last loop
# check if we received less than the required limit and exit the loop
# increment next call by our timeframe
start_time = float(temp_data[-1]['time']) * 1000 + timeframe
# exit loop if we reached end_ts before reaching <limit> klines
if end_time and start_time >= end_ts:
break
# sleep after every 3rd call to be kind to the API
idx += 1
if idx % 3 == 0:
time.sleep(1)
data = self._format_data(all_data=klines)
return data[(data['open_time'] >= start_ts) & (data['open_time'] <= end_ts)]
def update_historical(self, pair: str, interval: str, current_df: pd.DataFrame) -> pd.DataFrame:
"""
Note:
It will automatically download the latest data points (excluding the candle not yet finished)
Args:
pair: pair to get information from
interval: granularity of the candle ['1m', '1h', ... '1d']
current_df: pandas dataframe of the current data
Returns:
a concatenated dataframe of the current data and the new data
"""
end_date_data_ts = current_df['open_time'].max()
df = self.get_historical_data(
pair=pair,
interval=interval,
start_ts=end_date_data_ts,
end_ts=int(time.time() * 1000)
)
return pd.concat([current_df, df], ignore_index=True).drop_duplicates(subset=['open_time'])
| bryansx/nova-backtest-doc | novalabs/clients/oanda.py | oanda.py | py | 8,721 | python | en | code | 0 | github-code | 90 |
18921013212 | from utilities import *
from greedy import greedy
from gym import Env, spaces
import numpy as np
from numpy.random import choice
from copy import deepcopy
def softmax(x):
return np.exp(-x)/np.sum(np.exp(-x))
class MCTS_DAG():
class Node:
action_indexes = {}
UCB = []
N = []
Q = []
P = []
marked = False
value = None
def __init__(self, isFinal=False):
self.isFinal = isFinal
def initialiseNode(self, legal_actions, P, value):
legal_actions = np.array(legal_actions)
n_actions = np.size(legal_actions)
self.action_indexes = {str(action):i for i,action in enumerate(legal_actions)}
self.actions = [str(action) for action in legal_actions]
self.N = np.zeros((n_actions,))
self.Q = np.zeros((n_actions,))
self.P = P
self.value = value
if len(legal_actions) == 0:
self.isFinal = True
self.value = -10
def update(self, action, value):
action_idx = self.action_indexes[str(action)]
self.N[action_idx] += 1
self.Q[action_idx] += (value - self.Q[action_idx])/self.N[action_idx]
def get_UCB_action(self, c=1):
if np.sum(self.N) == 0:
return self.actions[np.random.choice(np.array(range(self.Q.size)))]
self.UCB = self.Q + c*self.P*np.sqrt(np.log(np.sum(self.N))/(1+self.N))
action_idx = np.argmax(self.UCB)
return self.actions[action_idx]
def __init__(self):
self.nodes = {}
def render(self):
print({node:(self.nodes[node].N) for node in self.nodes})
print({node:(self.nodes[node].Q) for node in self.nodes})
def add_node(self, observation, legal_actions, P=None, value=None):
n_actions = np.size(legal_actions)
node = self.Node()
if P is None:
P = np.ones((n_actions,))
node.initialiseNode(legal_actions, P, value)
self.nodes[self.hash(observation)] = node
def get_node(self, observation):
return self.nodes[self.hash(observation)]
def get_childrens(self, observation):
return self.nodes[self.hash(observation)].childrens
def is_in_nodes(self, observation):
return self.hash(observation) in self.nodes
def hash(self, observation):
return str(observation)
class MctsEnv():
def __init__(self, gym_env, model=None, cost=None):
self.env = deepcopy(gym_env)
self.env.early_end = True
self.initial_observation = self.env.reset()
self.tree = MCTS_DAG()
self.cost = cost
self.model = model
self.c = 3
self.reward, self.done = 0, False
def _is_leaf(self, observation):
return self.done or np.sum(self.tree.get_node(observation).N)==0 or self.tree.get_node(observation).isFinal
def _simulate(self):
self.done = False
self.reward = 0
history = []
observation = self.env.reset()
if not self.tree.is_in_nodes(observation):
# print("Added at the beginning of simulation : ", observation)
self.add_node(observation)
while not self._is_leaf(observation):
# Action selection using upper confidence bound
action = int(self.tree.get_node(observation).get_UCB_action(c=self.c))
history.append(deepcopy((observation, action)))
# Environement step
# print("Observation : ", observation)
# print("Action : ", action, self.tree.get_node(observation).Q)
observation, reward, self.done, _ = self.env.step(action)
self.reward += reward
if not self.tree.is_in_nodes(observation):
# print("Added at the while in simulation : ", observation)
self.add_node(observation)
if not (self.done or self.tree.get_node(observation).isFinal):
# Last action selection using upper confidence bound
action = int(self.tree.get_node(observation).get_UCB_action())
history.append(deepcopy((observation, action)))
# Last environement step
# print("Last step taken ! {} {}".format(observation, self.tree.get_node(observation).N))
# print("At :", observation, "Action :", action, self.tree.get_node(observation).Q)
# print(self.env.legal_actions(observation))
# self.tree.render()
observation, reward, self.done, _ = self.env.step(action)
self.reward += reward
# Add extanded node
# print("Added by extanding : ", observation)
self.add_node(observation)
return history, observation
def add_node(self, observation):
if self.tree.is_in_nodes(observation):
# print("{} is already in nodes !".format(observation))
return
P, value = None, None
if self.cost is not None:
legal_actions = self.env.legal_actions(observation)
P = np.ones(len(legal_actions))
for i, action in enumerate(legal_actions):
P[i] = self.cost(observation[-1], int(action))
P = softmax(P)
self.tree.add_node(observation, legal_actions, P, value)
def _backup(self, history, last_observation):
# Get last_observation value
action_space_size = self.env.action_space.n
if self.tree.get_node(last_observation).value is not None:
value = self.tree.get_node(last_observation).value
else:
if self.model is None:
_, value = np.ones(action_space_size), self.reward
else:
x = np.array([last_observation])
_, value = self.model.predict(x)
value = value[0,0]
# print("After sim : For leaf : ", x[0], " value is ", value)
# Update nodes in history
for observation, action in history:
node = self.tree.get_node(observation)
node.update(action, value)
def build_policy(self, temperature=1):
root = self.tree.get_node(self.initial_observation)
N = np.array(root.N)
if np.size(N) == 0: return 1
if temperature > 0:
P = np.divide(np.power(N, 1/temperature), np.sum(np.power(N, 1/temperature)))
if np.any(np.isnan(P)):
P = np.zeros((N.size,))
P[np.argmax(N)] = 1
else:
P = np.zeros((N.size,))
P[np.argmax(N)] = 1
return P
def run_search(self, n_simulation=1600, temperature=1):
for sim in range(n_simulation):
if 100*sim/n_simulation % 10 == 0:
# print("\nSimulation {}/{}".format(sim, n_simulation))
pass
history, observation = self._simulate()
self._backup(history, observation)
policy = self.build_policy(temperature=temperature)
actions = self.tree.get_node(self.initial_observation).actions
if len(actions) == 0:
print(self.initial_observation, actions)
actions = [[action, self.env.X_dict[action]['tf']-self.env.time] for action in range(1, len(self.env.X_dict)+1) if action not in self.initial_observation]
actions = np.array(actions)
action = actions[actions[:,1].argsort()][0, 0]
return action, policy
action = choice(actions, p=policy)
# print("Choice", self.initial_observation, actions, action)
return action, policy
def resetEnv(self, observation):
# print("Initial state : ", observation)
self.env.initial_state = observation
self.initial_observation = self.env.reset()
self.reward, self.done = 0, False
def resetTree(self, observation):
self.tree = MCTS_DAG()
self.add_node(observation)
class TSPTW_Env(Env):
potential = Potential()
early_end = False
def __init__(self, X_dict):
self.X_dict = X_dict
self.initial_state = [1]
self.path = deepcopy(self.initial_state)
self.time = 0
self.action_space = spaces.Discrete(len(self.X_dict)-1)
self.observation_space = spaces.Discrete(len(self.X_dict))
def legal_actions(self, observation):
legal_actions = []
for action in range(len(self.X_dict)+2):
if (action not in observation) and (action in self.X_dict):
legal_actions.append(action)
return legal_actions
def isLegal(self, action:int):
return (action not in self.path) and (int(action) in self.X_dict)
def step(self, action:int):
real_action = int(action)
try:
assert(self.isLegal(real_action))
except AssertionError:
raise AssertionError('\n Non-Legal action : {}\n'.format(real_action))
time_to_go = self.potential.get_time(self.X_dict, self.path[-1], real_action, self.time)
self.path.append(real_action)
observation = self.path
self.time += time_to_go
reward = 0
done = len(self.path) == len(self.X_dict)
if done:
errors = self.potential.evaluate(self.X_dict, self.path)[2]
if errors == 0:
global solution_found
solution_found = deepcopy(self.path)
# print('Solution found! : {}'.format(solution_found))
# print('N_distances: {}'.format(self.potential.dist_count))
return
else:
reward = -errors
if not self.potential.in_window(self.time, self.X_dict[real_action]['ti'], self.X_dict[real_action]['tf']):
reward += -self.potential.distance_to_window(self.time, self.X_dict[real_action]['ti'], self.X_dict[real_action]['tf'])
# if len(self.legal_actions(observation)) == 0 and not done:
# if self.early_end:
# done = True
# reward += -10
# print(observation, reward, done)
return observation, reward, done, {}
def reset(self):
self.path = deepcopy(self.initial_state)
self.time = 0
return self.path
# def mc_backup(history, mcts_tree, G, alpha=0.1, decay=0.3):
# for i, observation in enumerate(history):
# print("MC on ", observation[-1], mcts_tree.get_node(observation).value, G*decay**i)
# if mcts_tree.get_node(observation).value is not None:
# mcts_tree.get_node(observation).value += alpha*(G*decay**i - mcts_tree.get_node(observation).value)
# else:
# mcts_tree.get_node(observation).value = G*decay**i
if __name__ == "__main__":
np.set_printoptions(suppress=True, precision=2)
nodes = 20
width = 20
instance = '001'
data, official_sol = extract_inst("n{}w{}.{}.txt".format(nodes, width, instance))
def cost(prev_key, key, data=data):
min_ti, max_ti = np.min([data[key]['ti'] for key in data]), np.max([data[key]['ti'] for key in data])
min_tf, max_tf = np.min([data[key]['tf'] for key in data]), np.max([data[key]['tf'] for key in data])
alpha = 1/(max_ti - min_ti)
beta = 1/(max_tf - min_tf)
return (alpha*(data[key]['ti'] - min_ti) + beta*(data[key]['tf'] - min_tf))/2
env = TSPTW_Env(data)
mcts_env = MctsEnv(env, cost=cost)
observation = env.reset()
done = False
hist = [deepcopy(observation)]
G = 0
n_simulation = 10000
while not done:
# n_simulation = int(10000/np.log(1+len(observation)))
mcts_env.resetEnv(observation)
try:
action, _ = mcts_env.run_search(n_simulation=n_simulation, temperature=0)
np.set_printoptions(precision=2, suppress=True)
print(action, mcts_env.tree.get_node(observation).actions, mcts_env.tree.get_node(observation).UCB, mcts_env.tree.get_node(observation).P)
observation, reward, done, _ = env.step(action)
if not done:
hist.append(deepcopy(observation))
G += reward
except TypeError:
done = True
observation = solution_found
break
# mc_backup(hist, mcts_env.tree, G)
n_simulation = 1
print('Final solution : {}'.format(observation))
print('Distance evaluations:', TSPTW_Env.potential.dist_count)
a, b, err = Potential().evaluate(data, observation)
print(a, b, err)
draw_animated_solution(data, [observation, official_sol], save=False)
| MathisFederico/Metaheuristiques | tree_search.py | tree_search.py | py | 12,747 | python | en | code | 0 | github-code | 90 |
25227659105 | import numpy as np
import matplotlib.pyplot as plt
Histo_Bg = np.genfromtxt("Histo_Bg.csv", delimiter=',')
Bino_Bg = np.genfromtxt("Bins_Bg.csv", delimiter=',')
Histo_Sig = np.genfromtxt("Histo_Sig.csv", delimiter=',')
Bino_Sig = np.genfromtxt("Bins_Sig.csv", delimiter=',')
Features = ["pt", "eta", 'dphi', 'energy', 'weight']
fea_num = Histo_Sig.shape[1]
for i in range(fea_num):
center = (Bino_Sig[:-1,i] + Bino_Sig[1:,i])/2
width = 0.7 * (Bino_Sig[1,i] - Bino_Sig[0,i])
plt.figure(i)
plt.step(Bino_Bg[:-1,i], Histo_Bg[:,i], color='red', label='bg')
plt.step(Bino_Sig[:-1,i], Histo_Sig[:,i], color='blue', label='sig')
#plt.bar(Bino_Sig[:-1,i], Histo_Sig[:,i], color='blue', label='sig', width = 0.1)
#plt.bar(center, Histo_Sig[:,i], align='center',width=width)
plt.legend()
plt.suptitle(Features[i])
plt.xlim(-1,1)
plt.savefig(Features[i]+'.png')
#plt.show(block=True)pt
plt.close
| kaifulam/Hbb_ML | archive/Var_histo_plots/Test-ML_Hbb_feature_plot_rev3.py | Test-ML_Hbb_feature_plot_rev3.py | py | 940 | python | en | code | 0 | github-code | 90 |
45309913208 | from ast import Pass
import numpy as np
import random as rand
import reversi
import math
import copy
MAX = math.inf
MIN = -math.inf
MAX_SEARCH_DEPTH = 4
SCORE_RATIO_NORMALIZER = 100.0 / 63.0
MOBILITY_NORMALIZER = 100.0 / 13.0
class ReversiBot:
def __init__(self, move_num):
self.move_num = move_num
def make_move(self, state):
"""
This is the only function that needs to be implemented for the lab!
The bot should take a game state and return a move.
The parameter "state" is of type ReversiGameState and has two useful
member variables. The first is "board", which is an 8x8 numpy array
of 0s, 1s, and 2s. If a spot has a 0 that means it is unoccupied. If
there is a 1 that means the spot has one of player 1's stones. If
there is a 2 on the spot that means that spot has one of player 2's
stones. The other useful member variable is "turn", which is 1 if it's
player 1's turn and 2 if it's player 2's turn.
ReversiGameState objects have a nice method called get_valid_moves.
When you invoke it on a ReversiGameState object a list of valid
moves for that state is returned in the form of a list of tuples.
Move should be a tuple (row, col) of the move you want the bot to make.
"""
valid_moves = state.get_valid_moves()
print("Start of AI making a move")
# print("Number of Available Moves: ", self.get_mobility(state))
# print("Possible Moves: ", state.get_valid_moves())
# print("Score: ", self.get_score_ratio(state))
score, move = self.minimax(
copy.deepcopy(state), 0, True, MIN, MAX, MAX_SEARCH_DEPTH
)
print("Best Score Found: ", score)
print("Best Move Found: ", move)
# move = rand.choice(valid_moves) # Moves randomly...for now
# print("move: ", move)
return move
# https://www.geeksforgeeks.org/minimax-algorithm-in-game-theory-set-4-alpha-beta-pruning/
def minimax(self, state, current_depth, maximizing_player, alpha, beta, max_depth):
# print("Start of Minimax - Depth: ", current_depth)
# print("Maximizing Player: ", maximizing_player)
# print("List of valid moves: ", state.get_valid_moves())
# If max depth is reached
if current_depth == max_depth or len(state.get_valid_moves()) == 0:
# print("Score: ", self.heuristic(state), " at depth: ", current_depth)
return self.heuristic(state), None
if maximizing_player:
# print("Start of maximize player's turn")
best = MIN
best_move = None
# Recur for all possible moves for Maximizer
for move in state.get_valid_moves():
best_score, previous_last_move = self.minimax(
copy.deepcopy(state).simulate_move(move),
current_depth + 1,
False,
alpha,
beta,
max_depth,
)
if best_score > best:
best = best_score
best_move = move
# best = max(best, best_score)
alpha = max(alpha, best)
# Prune if the found alpha is bigger or equal to beta
if beta <= alpha:
break
return best, best_move
else:
# print("Start of minamizing player's turn")
best = MAX
# Recur for all possible moves for Minimizer
for move in state.get_valid_moves():
best_score, previous_last_move = self.minimax(
copy.deepcopy(state).simulate_move(move),
current_depth + 1,
True,
alpha,
beta,
max_depth,
)
if best_score < best:
best = best_score
best_move = move
# best = min(best, best_score)
beta = min(beta, best)
# Prune if the found best is less than or equal to alpha
if beta <= alpha:
break
return best, move
def heuristic(self, state):
mobility = self.get_mobility(state)
score = self.get_score_difference(state)
corner_weight = self.get_corner_heuristic(state)
x_and_c_weight = self.get_x_and_c_heuristic(state)
# todo: maybe check to see if mobility is infinity and return infinity, because that means we'll have all their pieces and instantly win
# print("mobility: ", mobility)
# print("score: ", score)
# print("corner_weight: ", corner_weight)
# print("x_and_c_weight: ", x_and_c_weight)
return (
(1.0) * score
+ (0.375) * mobility
+ (2.0) * corner_weight
+ (0.5) * x_and_c_weight
)
# return (score_ratio * SCORE_RATIO_NORMALIZER * (.75)) + (mobility * MOBILITY_NORMALIZER * (.25))
def get_mobility(self, state):
"""
Get the number of valid moves at this state
"""
number_valid_moves = len(state.get_valid_moves())
number_valid_enemy_moves = len(state.get_valid_enemy_moves())
if number_valid_moves + number_valid_enemy_moves == 0:
return 0
return (
100
* (number_valid_moves - number_valid_enemy_moves)
/ (number_valid_moves + number_valid_enemy_moves)
)
def get_score_difference(self, state):
"""
Returns the score comparted to the enemies (as a tuple)
"""
player = state.turn
enemy = state.enemy_turn
our_score = np.count_nonzero(state.board == player)
enemy_score = np.count_nonzero(state.board == enemy)
if enemy_score == 0: # Make sure we are not dividing by zero
return math.inf
return 100 * (our_score - enemy_score) / (our_score + enemy_score)
def get_corner_heuristic(self, state):
corner_squares = {(0, 0), (0, 7), (7, 0), (7, 7)}
board = state.board
player = state.turn
enemy = state.enemy_turn
number_player_corners = 0
number_enemy_corners = 0
for square in corner_squares:
if board[square[0]][square[1]] == player:
number_player_corners += 1
elif board[square[0]][square[1]] == enemy:
number_enemy_corners += 1
if number_player_corners + number_enemy_corners == 0:
return 0
return (
100
* (number_player_corners - number_enemy_corners)
/ (number_player_corners + number_enemy_corners)
)
def get_x_and_c_heuristic(self, state):
x_and_c_squares = {
(1, 0),
(0, 1),
(1, 1),
(6, 7),
(7, 6),
(6, 6),
(0, 6),
(7, 1),
(6, 1),
(0, 6),
(1, 6),
(1, 7),
}
board = state.board
player = state.turn
enemy = state.enemy_turn
number_player_x_and_c = 0
number_enemy_x_and_c = 0
for square in x_and_c_squares:
if board[square[0]][square[1]] == player:
number_player_x_and_c += 1
elif board[square[0]][square[1]] == enemy:
number_enemy_x_and_c += 1
if number_player_x_and_c + number_enemy_x_and_c == 0:
return 0
return (
100
* (number_enemy_x_and_c - number_player_x_and_c)
/ (number_player_x_and_c + number_enemy_x_and_c)
)
| mrchristensen/ReversiAI | ReversiBot_Python3/reversi_bot.py | reversi_bot.py | py | 7,829 | python | en | code | 0 | github-code | 90 |
26163639225 | from math import sqrt
import sys
def getfactor(n):
prim=set()
while n%2==0:
prim.add(2)
n//=2
for i in range(3, int(sqrt(n))+1, 2):
while n%i==0:
prim.add(i)
n//=i
if n>2:
prim.add(n)
return prim
rem=set()
s=set()
val=0
x=31627
for _ in range(int(input())):
x=31627
s.clear()
print(1, x)
sys.stdout.flush()
m=int(input())
s=getfactor((x*x)-m)
l=list(s)
l.sort()
last=l[-1]
#for i in s:
# print(i, end=' ')
x=last
f=0
for i in range(x, x+11, 1):
rem.clear()
for k in s:
val=(i*i)%k
if val in s:
f=1
break
rem.add(val)
if f==1:
f=0
continue
if len(s)==len(rem):
x=i
f=1
break
print(1, x)
sys.stdout.flush()
m=int(input())
ans=0
for i in s:
if((x*x)%i==m):
ans=i
print(2, ans)
sys.stdout.flush()
tp=input()
if tp=="No":
break
| smitgajjar/Competitive-Programming | codechef/GUESSPRM.py | GUESSPRM.py | py | 1,090 | python | en | code | 0 | github-code | 90 |
33062760156 | import re
import pytest
import sly
import mckit.parser.common as cmn
from mckit.parser.common.Lexer import Lexer as LexerBase, LexError
# noinspection PyUnboundLocalVariable,PyPep8Naming,PyUnresolvedReferences
class DerivedLexer(LexerBase):
tokens = {FRACTION, FLOAT, INTEGER, ZERO}
FRACTION = r"\d+(?:\.\d+[a-z])"
@_(cmn.FLOAT)
def FLOAT(self, token):
return self.on_float(token)
@_(cmn.INTEGER)
def INTEGER(self, token):
return self.on_float(token)
ZERO = r'0'
@pytest.mark.parametrize("text, expected_types, expected_values", [
("1 0 3.14", ['INTEGER', 'ZERO', 'FLOAT'], [1, 0, 3.14]),
("3.14 3.14c", ['FLOAT', 'FRACTION'], [3.14, '3.14c']),
])
def test_derived_lexer(text, expected_types, expected_values):
lexer = DerivedLexer()
tokens = list(lexer.tokenize(text))
result = list(t.type for t in tokens)
assert result == expected_types
result = list(t.value for t in tokens)
assert result == expected_values
@pytest.mark.parametrize("text, msg_contains", [
("1~ 0 3.14", "column 2"),
("\n1 0 3.14 ~", "at line 2, column 10"),
("\n1 0 3.14 ~", r"\s{9}\^"),
("\n1 0 3.14 0~", r"\s{10}\^"),
("~", "column 1\n~\n\\^"),
])
def test_bad_path(text, msg_contains):
lexer = DerivedLexer()
with pytest.raises(LexError, match=msg_contains):
for _ in lexer.tokenize(text):
pass
# noinspection PyUnboundLocalVariable,PyPep8Naming,PyUnresolvedReferences
class MyLexer(LexerBase):
literals = {':', '(', ')'}
ignore = ' \t'
reflags = re.IGNORECASE | re.MULTILINE
tokens = {NAME, FLOAT, INTEGER, ZERO}
NAME = r'\d?[A-Za-z-]+'
@_(cmn.FLOAT)
def FLOAT(self, token):
return self.on_float(token)
@_(cmn.INTEGER)
def INTEGER(self, token):
return self.on_integer(token)
ZERO = r'0'
@pytest.mark.parametrize("text, expected_types, expected_values", [
("AAA 1 0 3.14", ['NAME', 'INTEGER', 'ZERO', 'FLOAT'], ['AAA', 1, 0, 3.14]),
("1B 1 0 3.14", ['NAME', 'INTEGER', 'ZERO', 'FLOAT'], ['1B', 1, 0, 3.14]),
])
def test_good_path(text, expected_types, expected_values):
lexer = MyLexer()
tokens = list(lexer.tokenize(text))
result = list(t.type for t in tokens)
assert result == expected_types
result = list(t.value for t in tokens)
assert result == expected_values
# noinspection PyUnresolvedReferences
class MyParser(sly.Parser):
tokens = MyLexer.tokens
@_("NAME number parameters")
def expression(self, p):
return p.NAME, p.number, p.parameters
@_("NAME ZERO")
def expression(self, p):
return p.NAME, 0, None
@_("INTEGER")
def number(self, p):
return p.INTEGER
@_("parameters FLOAT")
def parameters(self, p):
res = p.parameters
res.append(p.FLOAT)
return res
@_("FLOAT")
def parameters(self, p):
return [p.FLOAT]
@pytest.mark.parametrize("text, expected", [
("AAA 1 1.2 3.4", ("AAA", 1, [1.2, 3.4])),
("A-Z 0", ("A-Z", 0, None)),
])
def test_parser_with_derived_lexer(text, expected):
lexer = MyLexer()
parser = MyParser()
actual = parser.parse(lexer.tokenize(text))
assert actual == expected
if __name__ == '__main__':
pytest.main()
| rorni/mckit | tests/parser/common/test_common_lexer.py | test_common_lexer.py | py | 3,284 | python | en | code | 3 | github-code | 90 |
27228324932 | # File: Work.py
# Description: This program
# Student Name: Michel Gonzalez
# Student UT EID: Mag9989
# Course Name: CS 313E
# Unique Number: 86610
# Date Created: 06/30/2021
# Date Last Modified: 06/30/2021
import sys
import time
# Input: v an integer representing the minimum lines of code and
# k an integer representing the productivity factor
# Output: computes the sum of the series (v + v // k + v // k**2 + ...)
# returns the sum of the series
def sum_series (v, k):
# Gets the sum of the series by adding all terms until
# v // k ** p < 0 and returns the summation of the series
i = 0
total = 0
while (v // (k ** i) > 0):
total += v // (k ** i)
i += 1
return total
# Input: n an integer representing the total number of lines of code
# k an integer representing the productivity factor
# Output: returns v the minimum lines of code to write using linear search
def linear_search (n, k):
# Checks if the number of lines required is less than the prodoction
# factor in whihc case it returns n which would be the minimum number
# of lines needed
if(n <= k):
return n
# If the number of lines required is greater than the production
# factor then it will looks for the minimum lines of code to write
# in a linear fashinon by checking all instances until it finds
# the minimum. This is done using the sum_series function
else:
lst = []
for i in range(1, n + 1):
lst.append(i)
for i in range(0, len(lst)):
if (sum_series(lst[i],k) >= n):
return lst[i]
else:
continue
# Input: n an integer representing the total number of lines of code
# k an integer representing the productivity factor
# Output: returns v the minimum lines of code to write using binary search
def binary_search (n, k):
# Checks if the number of lines required is less than the prodoction
# factor in whihc case it returns n which would be the minimum number
# of lines needed
if(n <= k):
return n
# If the number of lines required is greater than the production
# factor then it will looks for the minimum lines of code to write
# in a logarithmic fashinon by starting in the middle of the array
# and checking if the sum_seires total of that number is greater than
# the total number of lines needed and if the previous number's
# sum_series total is less than the total number of lines needed
else:
lst = []
for i in range(1, n + 1):
lst.append(i)
low = 0
high = len(lst) - 1
mid = 0
while (low <= high):
mid = (high + low) // 2
if (sum_series(lst[mid], k) < n):
low = mid + 1
elif (sum_series(lst[mid], k) >= n):
if (not (sum_series(lst[mid - 1], k) >= n)):
return lst[mid]
else:
high = mid - 1
else:
break
# Input: no input
# Output: a string denoting all test cases have passed
def test_cases():
# write your own test cases
assert sum_series (1, 2) == 1
assert sum_series (50, 5) == 62
assert sum_series (200, 100) == 202
assert sum_series (5, 50) == 5
assert linear_search (2, 5) == 2
assert linear_search (30, 5) == 25
assert linear_search (300, 2) == 152
assert linear_search (500, 500) == 500
assert binary_search (5, 2) == 4
assert binary_search (30, 25) == 29
assert binary_search (300, 2) == 152
assert binary_search (1, 2) == 1
assert linear_search (200, 10) == binary_search (200, 10)
return "all test cases passed"
def main():
# read number of cases
line = sys.stdin.readline()
line = line.strip()
num_cases = int (line)
for i in range (num_cases):
line = sys.stdin.readline()
line = line.strip()
inp = line.split()
n = int(inp[0])
k = int(inp[1])
start = time.time()
print("Binary Search: " + str(binary_search(n, k)))
finish = time.time()
print("Time: " + str(finish - start))
print()
start = time.time()
print("Linear Search: " + str(linear_search(n, k)))
finish = time.time()
print("Time: " + str(finish - start))
print()
print()
if __name__ == "__main__":
main()
| Michel-A-Gonzalez/Coursework-Python | Data Structures and Algorithms/Python Code/Work.py | Work.py | py | 4,743 | python | en | code | 0 | github-code | 90 |
29802073970 | import setuptools
with open("README.md", "r", encoding='utf-8') as fh:
long_description = fh.read()
setuptools.setup(
name="xiaoxiao_lhy",
version="0.0.1",
author="Lhy",
author_email="lhuaye@163.com",
description="IC hardware design tools",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/belang/xiaoxiao",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: BSD-3",
"Operation System :: OS Independent",
],
python_requires='>=3.6',
)
| belang/blackbean | xiaoxiao/setup.py | setup.py | py | 693 | python | en | code | 0 | github-code | 90 |
19734899610 | import numpy as np
import argparse
import os
import sys
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx], idx
def cfar_din_generator(N):
fn_1 = N // 100 * 83
SNR_dB = 3
# signal and 1st clutter zone
s = 10**(SNR_dB/20) * np.exp(2*1j*np.pi*fn_1*np.arange(N)/N) + np.random.randn(N)
fn_1 = N // 100 * 6
SNR_dB = -6
s += 10**(SNR_dB/20) * np.exp(2*1j*np.pi*fn_1*np.arange(N)/N) + np.random.randn(N)
sF = np.fft.fft(s) / N
# 2nd clutter zone (30 %)
n = 10**(20/20) * np.random.randn(N)
nF = np.fft.fft(n) / N
nF[:(N // 100) * 70] = 0
sF += nF
# 3rd clutter zone (30 %)
n = 10**(30/20) * np.random.randn(N)
nF = np.fft.fft(n) / N
nF[:(N // 100) * 30] = 0
nF[(N // 100) * 45:] = 0
sF += nF
xF = np.abs(sF)
# output norming
xF /= np.max(xF)
return xF
parser = argparse.ArgumentParser(description='Input signal size and CFAR parameters')
parser.add_argument('NPOINTS' , help='Number of points', default=None, type=int)
parser.add_argument('REFWIND', help='Number of cells in CFAR window',default=None, type=int)
parser.add_argument('PFA' ,help='Probability of false alarm' ,default=None, type=float)
args = parser.parse_args()
def main():
print('<< Generating Input Data for CFAR')
print('<< aleksei.rostov@protonmail.com')
if args.NPOINTS is None:
NPOINTS = 32
else:
NPOINTS = args.NPOINTS
if args.REFWIND is None:
REFWIND = 4
else:
REFWIND = args.REFWIND
if args.PFA is None:
PFA = 1e-1
else:
PFA = args.PFA
curr_path = os.getcwd()
if (curr_path[-16:] != 'radar-hls-python'):
print("<< Error! Please change directory!")
exit()
if not (os.path.exists(curr_path + '/sim_files')):
os.makedirs(curr_path + '/sim_files')
sF = cfar_din_generator(NPOINTS)
# k-th cell is 75 % from size of sliding window
KTH_CELL = (REFWIND*75) // 100
# Pfa = PFA
N = REFWIND
# scaling factor calculating for Pfa
dPfa_0 = np.zeros(REFWIND)
for k in range(REFWIND):
alpha = k + 1
dPfa_0[k] = (np.math.factorial(N) * np.math.factorial(alpha + N - KTH_CELL)) / (np.math.factorial(N - KTH_CELL) * np.math.factorial(alpha + N))
val, SCALING = find_nearest(dPfa_0, PFA)
py_param = np.zeros(5)
py_param[0] = NPOINTS
py_param[1] = REFWIND
py_param[2] = KTH_CELL
py_param[3] = SCALING
py_param[4] = PFA
T_u16 = np.round(SCALING*2**10)
np.savetxt(curr_path + '/sim_files/cfarIn_u16.txt', np.round(2**16*sF),fmt='%d')
np.savetxt(curr_path + '/sim_files/cfarIn_float.txt', sF,fmt='%f')
np.savetxt(curr_path + '/sim_files/cfarPy_param.txt', py_param,fmt='%f')
with open(curr_path + '/hls_src/cfar/parameters.h', 'w') as fp:
fp.write("\n")
fp.write("#define NPOINTS ")
fp.write(str(NPOINTS))
fp.write("\n#define REFWIND ")
fp.write(str(REFWIND))
fp.write("\n#define KTH_CELL ")
fp.write(str(KTH_CELL))
fp.write("\n#define Z_COEF ")
fp.write(str(int(T_u16)))
fp.write("\n")
fp.write("\n")
fp.write("\n")
print('<< Successfully Done')
if __name__ == "__main__":
main()
| sumitdarak/radar-hls-python | py_scripts/cfar_generator.py | cfar_generator.py | py | 3,627 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.