seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
41104474805 | import requests
from bs4 import BeautifulSoup
from flask import Flask, render_template, jsonify, request
app = Flask(__name__)
from pymongo import MongoClient # pymongo를 임포트 하기(패키지 인스톨 먼저 해야겠죠?)
client = MongoClient('localhost', 27017) # mongoDB는 27017 포트로 돌아갑니다.
db = client.dbsparta # 'dbsparta'라는 이름의 db를 만듭니다.
## HTML을 주는 부분
@app.route('/')
def home():
return render_template('project_fake review_200226_rev2.html')
# URL을 읽어서 HTML를 받아오고,
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
positive_word = ['최고다','인정','뻔한','억지연기','억지','빈약','별로','돈아까운','떨어진','아깝다','엉망','속았다','쪽팔린다'
'거짓','후회','피식','알바','노잼','억지','아쉽','욕','엉망진창','속음','뻔한','짜증','삼류영화','재미없','뻔한','지겹다','졸려','별로','아깝다','아까운','억지스러','어처구니','상술','지루','엉성','재미없다','딱히']
negative_word = ['손색','엄청','유쾌','상쾌','통쾌','잘봤습니다','오졌다','ㅋㅋ','레알','존잼','완전','스트레스','웰메이드','케미','다한,영화','믿고보는','빵빵','오진다','두마리','두마리','토끼','능청'
'대박','원탑','킬링타임','스크린','오져','ㅎㅎ','믿고','정통','연기','재밌','감동','다했','꿀잼','경지','매력','코믹','배꼽','몰입','강추','존잼']
## keyword를 받아와서 처리 후, DB에 데이터를 넣는다.
@app.route('/keyword', methods=['POST'])
def find_moviecode():
##keyword가 출력되어야 한다.
keyword_receive = request.form['keyword_give']
website1 = 'https://movie.naver.com/movie/search/result.nhn?query={}§ion=all&ie=utf8'.format(keyword_receive)
data1 = requests.get(website1, headers=headers)
# HTML을 BeautifulSoup이라는 라이브러리를 활용해 검색하기 용이한 상태로 만듦
soup1 = BeautifulSoup(data1.text, 'html.parser')
# select를 이용해서, li들을 불러오기
moviecode = soup1.select_one('#old_content > ul.search_list_1 > li > dl > dt > a')['href'].split('code=')[1]
##keyword를 통해 moviecode를 얻어내야 한다.
## 1. moviecode 획득 완료
website3 = 'https://movie.naver.com/movie/bi/mi/basic.nhn?code={}'.format(moviecode)
data3 = requests.get(website3, headers=headers)
# HTML을 BeautifulSoup이라는 라이브러리를 활용해 검색하기 용이한 상태로 만듦
soup3 = BeautifulSoup(data3.text, 'html.parser')
# select를 이용해서, img들을 불러오기
# print(soup3)
picture = soup3.select_one('#content > div.article > div.mv_info_area > div.poster > a > img')['src']
## 2. moviecode에 해당하는 사진을 불러온다.
print(picture)
reviews = list(db.fakereview.find({'code': moviecode}, {'_id': 0}))
##2-1 기존에 있는 moviecode가 있으면, for loop를 돌리지 않고 기존 것을 활용해 return한다..
if len(reviews) == 0:
iterations = 11 ## 몇 페이지까지 볼 것인지 기재한다.
rank = 1
##2-2 moviecode가 없으면 for loop를 돌려서 구하고, 값을 return한다.
for page_num in range(1, iterations):
print(page_num)
website2 = 'http://movie.naver.com/movie/bi/mi/pointWriteFormList.nhn?code={}&type=after&isActualPointWriteExecute=false&isMileageSubscriptionAlready=false&isMileageSubscriptionReject=false&page={}'.format(
moviecode, page_num)
data2 = requests.get(website2, headers=headers)
# HTML을 BeautifulSoup이라는 라이브러리를 활용해 검색하기 용이한 상태로 만듦
soup2 = BeautifulSoup(data2.text, 'html.parser')
# select를 이용해서, li들을 불러오기
contents = soup2.select('body > div > div > div.score_result > ul > li')
# reviews (li들) 의 반복문을 돌리기
for content in contents:
# movie 안에 a 가 있으면,
li_1 = content.select_one('div.star_score')
if li_1 is not None:
score = li_1.text.strip()
review = content.select_one(' li > div.score_reple > p').text.strip()
review = review.strip('관람객').strip()
print(rank, score, review, moviecode)
doc = {
'rank': rank,
'score': score,
'review': review,
'code': moviecode
}
db.fakereview.insert_one(doc)
rank += 1
reviews = list(db.fakereview.find({'code': moviecode}, {'_id': 0}))
for idx, r in enumerate(reviews):
review = reviews[idx]['review']
for word in positive_word:
if word in review:
reviews[idx]['review'] = reviews[idx]['review'].replace(word, '<span class="blue">{}</span>'.format(word))
for word in negative_word:
if word in review:
reviews[idx]['review'] = reviews[idx]['review'].replace(word, '<span class="red">{}</span>'.format(word))
return jsonify({'result': 'success', 'reviews': reviews, 'picture': picture})
if __name__ == '__main__':
app.run('0.0.0.0', port=5000, debug=True)
| Woogil90/MY_PROJECT | My_project_rev2.py | My_project_rev2.py | py | 5,588 | python | ko | code | 0 | github-code | 90 |
26685569164 | import base64
from django.core.files.base import ContentFile
from django.db import transaction
from django.shortcuts import get_object_or_404
from djoser.serializers import UserCreateSerializer, UserSerializer
from recipes.models import (Favorite, Ingredient, IngredientRecipe, Recipe,
ShoppingCart, Tag)
from rest_framework import serializers, status
from users.models import Subscription, User
class Base64ImageField(serializers.ImageField):
"""Кодирует картинку в Base64."""
def to_internal_value(self, data):
if isinstance(data, str) and data.startswith('data:image'):
format, imgstr = data.split(';base64,')
ext = format.split('/')[-1]
data = ContentFile(base64.b64decode(imgstr), name='temp.' + ext)
return super().to_internal_value(data)
class UserSignUpSerializer(UserCreateSerializer):
"""Связан с эндпоинтом api/users/ POST."""
class Meta:
model = User
fields = ('email', 'username', 'first_name',
'last_name', 'password', 'id')
class UserSerializer(UserSerializer):
"""Сериализует данные для эндпоинтов:
api/users/ GET
api/users/{id}/ GET
api/users/me/ GET."""
is_subscribed = serializers.SerializerMethodField()
def get_is_subscribed(self, obj):
request = self.context.get('request')
return (request.user.is_authenticated
and obj.following.filter(user=request.user,
author=obj).exists())
class Meta:
model = User
fields = ('email', 'id', 'username', 'first_name',
'last_name', 'is_subscribed')
class TagSerialiser(serializers.ModelSerializer):
"""Сериализатор для работы с моделью Tag для эндпоинтов:
api/tags/ GET, api/tags/{id}/ GET."""
class Meta:
model = Tag
fields = ('id', 'name', 'color', 'slug')
class IngredientSerializer(serializers.ModelSerializer):
"""Сериализатор для работы с моделью Ingredient для эндпоинтов:
api/ingredients/ GET, api/ingredients/{id}/ GET."""
class Meta:
model = Ingredient
fields = ('id', 'name', 'measurement_unit')
class IngredientReadSerializer(serializers.ModelSerializer):
"""Сериализатор для чтения данных рецепта."""
id = serializers.ReadOnlyField(source='ingredient.id')
name = serializers.ReadOnlyField(source='ingredient.name')
amount = serializers.ReadOnlyField()
measurement_unit = serializers.ReadOnlyField(
source='ingredient.measurement_unit')
class Meta:
model = IngredientRecipe
fields = ('id', 'name', 'amount', 'measurement_unit')
class IngredientWriteSerializer(serializers.ModelSerializer):
"""Сериализатор для записи ингредиентов в рецепт."""
id = serializers.PrimaryKeyRelatedField(queryset=Ingredient.objects.all())
amount = serializers.IntegerField()
class Meta:
model = IngredientRecipe
fields = ('id', 'amount')
class RecipeReadSerializer(serializers.ModelSerializer):
"""Сериализатор для чтения рецептов."""
tags = TagSerialiser(read_only=True, many=True)
author = UserSerializer(read_only=True)
ingredients = IngredientReadSerializer(read_only=True, many=True,
source='ingredient_recipe')
is_favorited = serializers.SerializerMethodField()
is_in_shopping_cart = serializers.SerializerMethodField()
image = Base64ImageField(required=False)
def get_is_favorited(self, obj):
user = self.context.get('request').user
return (not user.is_anonymous
and obj.favorites.filter(user=user, recipe=obj).exists())
def get_is_in_shopping_cart(self, obj):
user = self.context.get('request').user
return (not user.is_anonymous
and obj.shopping_cart.filter(user=user, recipe=obj).exists())
class Meta:
model = Recipe
fields = ('id', 'tags', 'author', 'ingredients',
'is_favorited', 'is_in_shopping_cart',
'image', 'name', 'text', 'cooking_time')
class RecipeWriteSerializer(serializers.ModelSerializer):
"""Сериализатор для записи рецепта."""
tags = serializers.PrimaryKeyRelatedField(queryset=Tag.objects.all(),
many=True)
ingredients = IngredientWriteSerializer(many=True)
image = Base64ImageField()
def validate_tags(self, value):
if not value:
raise serializers.ValidationError(
{'ingredients': 'Выбери хотя бы один тег!'})
tag_list = []
for tag in value:
if tag in tag_list:
raise serializers.ValidationError(
{'tags': 'Теги не должны повторяться!'})
tag_list.append(tag)
return value
def validate_ingredients(self, value):
ingredients = value
if not ingredients:
raise serializers.ValidationError(
{'ingredients': 'Нужно выбрать ингредиент!'})
ingredients_list = []
for dict in ingredients:
ingredient = get_object_or_404(Ingredient, name=dict['id'])
if ingredient in ingredients_list:
raise serializers.ValidationError(
{'ingredients': 'Ингридиенты повторяются!'})
if dict['amount'] <= 0:
raise serializers.ValidationError(
{'amount': 'Количество должно быть больше 0!'})
if not ('id' and 'amount'):
raise KeyError('Отсутствует обязательное поле')
ingredients_list.append(ingredient)
return value
def _create_ingredient(self, ingredients, recipe):
IngredientRecipe.objects.bulk_create([
IngredientRecipe(
recipe=recipe,
ingredient=ingredient['id'],
amount=ingredient['amount']
) for ingredient in ingredients])
return
def to_representation(self, instance):
request = self.context.get('request')
return RecipeReadSerializer(instance,
context={'request': request}).data
class Meta:
model = Recipe
fields = ('tags', 'ingredients', 'image',
'name', 'text', 'cooking_time')
class RecipeCreateSerializer(RecipeWriteSerializer):
"""Сериализатор для создания рецепта."""
def validate(self, data):
tags = data['tags']
ingredients = data['ingredients']
name = data['name']
text = data['text']
if not (tags or ingredients):
raise serializers.ValidationError(
'Выберите хотя бы одно значение!')
if Recipe.objects.filter(name=name,
text=text).exists():
raise serializers.ValidationError(
'Такой рецепт уже есть, измените название или описание!')
return data
@transaction.atomic
def create(self, validated_data):
tags = validated_data.pop('tags')
ingredients = validated_data.pop('ingredients')
recipe = Recipe.objects.create(**validated_data)
recipe.tags.set(tags)
self._create_ingredient(ingredients, recipe)
return recipe
class RecipeUpdateSerializer(RecipeWriteSerializer):
"""Сериализатор для обновления рецепта."""
@transaction.atomic
def update(self, instance, validated_data):
tags = validated_data.pop('tags')
instance.tags.clear()
instance.tags.set(tags)
ingredients = validated_data.pop('ingredients')
instance.ingredients.clear()
self._create_ingredient(ingredients, instance)
instance.save()
return instance
class SubscritionRecipeSerializer(serializers.ModelSerializer):
"""Сериализатор для чтения подписок."""
class Meta:
model = Recipe
fields = ('id', 'name', 'image', 'cooking_time')
class SubscriptionSerializer(serializers.ModelSerializer):
"""Сериализатор модели Subscription, методы POST и DELETE."""
id = serializers.ReadOnlyField(source='author.id')
email = serializers.ReadOnlyField(source='author.email')
username = serializers.ReadOnlyField(source='author.username')
first_name = serializers.ReadOnlyField(source='author.first_name')
last_name = serializers.ReadOnlyField(source='author.last_name')
is_subscribed = serializers.SerializerMethodField()
recipes = serializers.SerializerMethodField()
recipes_count = serializers.SerializerMethodField()
def get_is_subscribed(self, obj):
user = self.context.get('request').user
return Subscription.objects.filter(user=user,
author=obj.author).exists()
def get_recipes(self, obj):
request = self.context.get('request')
recipes_limit = request.query_params.get('recipes_limit')
recipes = obj.author.recipes.all()
if recipes_limit:
recipes = recipes[:int(recipes_limit)]
serializer = SubscritionRecipeSerializer(recipes,
many=True)
return serializer.data
def get_recipes_count(self, obj):
return obj.author.recipes.count()
def validate(self, data):
user = self.context.get('request').user
author = self.context.get('author')
if user.follower.filter(author=author).exists():
raise serializers.ValidationError(
detail='Вы уже подписаны на этого автора!',
code=status.HTTP_400_BAD_REQUEST,)
if user == author:
raise serializers.ValidationError(
detail='Нельзя подписаться на себя!',
code=status.HTTP_400_BAD_REQUEST,)
return data
class Meta:
model = Subscription
fields = ('id', 'email', 'username', 'first_name', 'last_name',
'recipes', 'is_subscribed', 'recipes_count')
class SubscriptionsSerializer(serializers.ModelSerializer):
"""Список авторов на которых подписан пользователь."""
is_subscribed = serializers.SerializerMethodField()
recipes = serializers.SerializerMethodField()
recipes_count = serializers.SerializerMethodField()
def get_is_subscribed(self, obj):
user = self.context.get('request').user
return Subscription.objects.filter(user=user,
author=obj).exists()
def get_recipes_count(self, obj):
return obj.recipes.count()
def get_recipes(self, obj):
request = self.context.get('request')
recipes_limit = request.query_params.get('recipes_limit')
recipes = obj.recipes.all()
if recipes_limit:
recipes = recipes[:int(recipes_limit)]
serializer = SubscritionRecipeSerializer(recipes,
many=True)
return serializer.data
class Meta:
model = User
fields = ('email', 'id', 'username', 'first_name',
'last_name', 'is_subscribed', 'recipes', 'recipes_count')
class ShoppingCartSerializer(serializers.ModelSerializer):
"""Сериализатор модели ShoppingCart."""
def to_representation(self, instance):
request = self.context.get('request')
return SubscritionRecipeSerializer(instance.recipe,
context={'request': request}).data
def validate(self, data):
user = self.context.get('request').user
recipe = self.context.get('recipe')
if user.shopping_cart.filter(recipe=recipe).exists():
raise serializers.ValidationError(
'Рецепт уже добавлен в корзину'
)
return data
class Meta:
model = ShoppingCart
fields = ('user', 'recipe')
read_only_fields = ('user', 'recipe')
class FavoriteSerializer(serializers.ModelSerializer):
"""Сериализатор модели Favorite."""
def to_representation(self, instance):
request = self.context.get('request')
return SubscritionRecipeSerializer(instance.recipe,
context={'request': request}).data
def validate(self, data):
user = self.context.get('request').user
recipe = self.context.get('recipe')
if user.favorites.filter(recipe=recipe).exists():
raise serializers.ValidationError(
'Рецепт уже добавлен в избранное')
return data
class Meta:
model = Favorite
fields = ('user', 'recipe')
read_only_fields = ('user', 'recipe')
| LinaArtmv/foodgram-project-react | backend/foodgram/api/serializers.py | serializers.py | py | 13,491 | python | en | code | 0 | github-code | 90 |
72996257258 | # This program calculates the area of a region of interest in a spectra file.
# Spacing: TABS
import os
import time
import glob
def get_start_end():
start = input("Select the starting channel, from 0 to 2047: ")
end = input("Select the end channel, from 0 to 2047: ")
return int(start), int(end)
def calc_area(counts_dict, start, end):
# calculates the area of the region of interest demarcated
# by the specified start and end
chan_sum = 0
i = start
while i <= end:
chan_sum += counts_dict[i]
i += 1
chan_sum -= (counts_dict[start] + counts_dict[start - 1] + counts_dict[start - 2]
+ counts_dict[end] + counts_dict[end + 1] + counts_dict[end + 2])/6
chan_sum *= (end - start + 1)
return chan_sum
def main():
# set path
path = "Y:/roof_1958_spectra/"
print('is the path right?', path)
files = glob.glob(path+'*')
newest = "Y:/roof_1958_spectra/ROOF_2017_10_12_T10_59.Spe"
file_contents = ''
with open(newest) as file:
file_contents = file.readlines()[12:-14]
counts = []
for content in file_contents:
content = content.replace('\n', '')
content = content.replace(' ', '')
counts.append(content)
counts_dict = {}
for i in range(len(counts)):
counts_dict[i] = int(counts[i])
print('counts_dict', counts_dict)
start, end = get_start_end()
print('start', start, 'end', end)
print(type(start))
while (start >= end):
print("Please select a starting value that is"
" lower than the ending value.")
start, end = get_start_end()
area = calc_area(counts_dict, start, end)
print("The area of the region of interest from channels", start, "to", end,
"is", area)
time.sleep(5)
if __name__ == '__main__':
main()
| fangbo-yuan/RWS | calc_roi_area.py | calc_roi_area.py | py | 1,672 | python | en | code | 0 | github-code | 90 |
36295295577 | import sys
#import numpy
def del_last_digit(m): #primito
return m//10
def qdigit(m, counter): #Ejercicio 4.
if m//10==0:
return counter+1
else:
counter += 1
return qdigit(m//10, counter)
def del_last_digitv2(m,digitos): #Ejercicio 3.
if digitos==1:
return m
else:
m= m-int(m*10**-(digitos-1))*10**(digitos-1)
return del_last_digitv2(m,digitos-1)
def main():
x=int(input("Ingrese número natural para otener el último dígito: "))
digitos=qdigit(x,0)
print("Numero de dígitos: {}".format(digitos))
print("Resultado 'ultimo digito: ",del_last_digitv2(x,digitos))
sys.exit(1)
if __name__== "__main__" :
main()
| RosanaR2017/PYTHON | del_last_digit.py | del_last_digit.py | py | 752 | python | es | code | 0 | github-code | 90 |
72669474217 | ogrenciler = {
"190509026": {
"ad":"Aytaç",
"soyad":"Kaşoğlu",
"telefon":"5442903647"
},
"190509010": {
"ad":"Mehmet Kadir",
"soyad":"Cırık",
"telefon":"5387984501"
},
"190543014": {
"ad":"Safa",
"soyad":"Çubuk",
"telefon":"5442516479"
}
}
ogrenciler = {}
number = input("Öğrenci No: ")
first_name = input("Öğrenci adı: ")
last_name = input("Öğrenci soy adı: ")
phone_number = input("Telefon: ")
ogrenciler[number] = {
"ad": first_name,
"soyad": last_name,
"telefon": phone_number
}
ogrenciler.update({
number:{
"ad": first_name,
"soyad": last_name,
"telefon": phone_number
}
})
print(ogrenciler)
ogrNo = input("NO giriniz: ")
ogrenci = ogrenciler[number]
print(ogrenci) | kasoglu/learn-python | demo_dictionaries_07_06_2020_tr.py | demo_dictionaries_07_06_2020_tr.py | py | 836 | python | en | code | 1 | github-code | 90 |
10876121667 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class Attention(nn.Module):
def __init__(self, config):
super(Attention, self).__init__()
self.device = config['device']
self.attention_size = config['hidden_size']
self.attn_vector = nn.Linear(self.attention_size, 1)
# self.attn_vector = Parameter(torch.FloatTensor(self.attention_size))
def get_mask(self, scores, lengths):
max_len = scores.size(1)
mask = torch.arange(0, max_len).unsqueeze(0)
mask = mask.to(self.device)
mask = mask<lengths.unsqueeze(1)
mask = mask.float().clone().detach()
return mask
def forward(self, input, lengths):
# input B, T, H
# attn_vector H
logits = self.attn_vector(input)
logits = logits.squeeze(-1)
# logits = input.matmul(self.attn_vector)
# B, T
scores: torch.Tensor = F.softmax(logits, dim=-1)
#B, T
mask = self.get_mask(scores, lengths)
scores = scores*mask
output = input.mul(scores.unsqueeze(-1).expand_as(input))
output = output.sum(dim=1)
return output, scores
| raja-1996/Pytorch_TextClassification | Attention_Classification/Attention.py | Attention.py | py | 1,231 | python | en | code | 0 | github-code | 90 |
4696863569 | from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth.models import User
from .models import Notification
from likes.models import Like
from comments.models import Comment
from follows.models import Follow
from sounds.models import Sound
from reports.models import Report
def create_notification(**kwargs):
Notification.objects.create(
owner=kwargs["owner"],
sender=kwargs["sender"],
category=kwargs["category"],
item_id=kwargs["item_id"],
title=kwargs["title"],
content=kwargs["content"],
)
# Instructions for signals from:
# https://www.geeksforgeeks.org/how-to-create-and-use-signals-in-django/
@receiver(post_save, sender=Like)
def create_like_notification(sender, instance, created, **kwargs):
if created:
data = {
"owner": instance.sound.owner,
"sender": instance.owner,
"category": "like",
"item_id": instance.sound.id,
"title": "You have a new like!",
"content": f"{instance.owner.username} liked your sound "
f"{instance.sound.title}",
}
create_notification(**data)
@receiver(post_save, sender=Comment)
def create_comment_notification(sender, instance, created, **kwargs):
if created:
data = {
"owner": instance.sound.owner,
"sender": instance.owner,
"category": "comment",
"item_id": instance.sound.id,
"title": "You have a new comment!",
"content": f"{instance.owner.username} commented on your sound "
f"{instance.sound.title}",
}
create_notification(**data)
@receiver(post_save, sender=Follow)
def create_follow_notification(sender, instance, created, **kwargs):
if created:
data = {
"owner": instance.followed,
"sender": instance.owner,
"category": "follow",
"item_id": instance.id,
"title": "You have a new follower!",
"content": f"{instance.owner.username} is now following you.",
}
create_notification(**data)
@receiver(post_save, sender=Sound)
def create_new_sound_notification(sender, instance, created, **kwargs):
if created:
recipients = User.objects.filter(following__followed=instance.owner)
for recipient in recipients:
data = {
"owner": recipient,
"sender": instance.owner,
"category": "new_sound",
"item_id": instance.id,
"title": "You have a new sound!",
"content": f"{instance.owner.username} uploaded a new sound.",
}
create_notification(**data)
@receiver(post_save, sender=Report)
def create_report_notification(sender, instance, created, **kwargs):
if created:
recipients = User.objects.filter(is_staff=True)
sound = instance.sound
for recipient in recipients:
data = {
"owner": recipient,
"sender": instance.owner,
"category": "report",
"item_id": sound.id,
"title": f"New report for sound '{sound.title}'",
"content": f"{instance.owner} flagged {sound.owner}'s sound "
f"'{sound.title}' as '{instance.get_flag_display()}'.",
}
create_notification(**data)
| nacht-falter/sonic-explorers-api | notifications/signals.py | signals.py | py | 3,468 | python | en | code | 0 | github-code | 90 |
71956027817 | import torch.nn as nn
import torch
from torch import Tensor
from typing import *
import numpy as np
from scipy import stats
class CosineSimilarityMatrix(nn.Module):
name = 'cosine_matrix'
def __init__(self, dim: int = 1, eps: float = 1e-8) -> None:
super(CosineSimilarityMatrix, self).__init__()
self.dim = dim
self.eps = eps
def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
return cosine_similarity_matrix(x1, x2, self.dim, self.eps)
class EuclideanSimilarityMatrix(nn.Module):
name = 'euclidean_matrix'
def __init__(self, dim: int = 1, eps: float = 1) -> None:
super(EuclideanSimilarityMatrix, self).__init__()
self.dim = dim
self.eps = eps
def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
return euclidean_similarity_matrix(x1, x2, self.eps)
class EuclideanDistanceMatrix(nn.Module):
name = 'euclidean_distance_matrix'
def __init__(self,) -> None:
super(EuclideanDistanceMatrix, self).__init__()
def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
return euclidean_distance_matrix(x1, x2,)
def cosine_similarity_matrix(x1: Tensor, x2: Tensor, dim: int = 1, eps: float = 1e-8) -> Tensor:
'''
When using cosine similarity the constant value must be positive
'''
#Cosine sim:
xn1, xn2 = torch.norm(x1, dim=dim), torch.norm(x2, dim=dim)
x1 = x1 / torch.clamp(xn1, min=eps).unsqueeze(dim)
x2 = x2 / torch.clamp(xn2, min=eps).unsqueeze(dim)
x1, x2 = x1.unsqueeze(0), x2.unsqueeze(1)
sim = torch.tensordot(x1, x2, dims=([2], [2])).squeeze()
sim = sim #range: [-1, 1] -> [0, 2] -> [0, 1]
return sim
def euclidean_similarity_matrix(x1, x2, eps):
return 1/(1+torch.cdist(x1, x2)+eps)
def euclidean_distance_matrix(x1, x2):
return torch.cdist(x1, x2)
def sigmoid(x, k=1.0):
exponent = -x/k
exponent = torch.clamp(exponent, min=-50, max=50)
y = 1./(1. + torch.exp(exponent))
return y
def knn(distances, k = 1):
# Returns the K-th NN mask
# Default: 1st NN for "with a little help from my friend"
shape = distances.shape
n = shape[0]
nns = distances.argsort()[:, k]
mask = torch.zeros(shape, dtype=bool)
mask[torch.arange(n), nns] = True
return mask
def mutual_knn(distances, k):
# Define the value of k
# TODO: I think this is not ok
k = 3
n = distances.shape[0]
# Find the indices of the k nearest neighbors for each point
nearest_neighbors = np.argsort(distances, axis=1)[:, :k]
# Create a boolean mask indicating whether each pair of points is mutual nearest neighbors
mask = np.zeros(distances.shape, dtype=bool)
mask[np.arange(n)[:, np.newaxis], nearest_neighbors] = True
mask[nearest_neighbors, np.arange(n)[:, np.newaxis]] = True
# Use the boolean mask to create the mutual KNN adjacency matrix
adjacency_matrix = mask.astype(int)
np.fill_diagonal(adjacency_matrix, 0)
return adjacency_matrix
def batched_spearman_rank(h_rank, gt_rank):
data = [stats.spearmanr(h_rank[m].cpu(), gt_rank[m].cpu()) for m in range(h_rank.shape[0])]
return [z.correlation for z in data], [z.pvalue for z in data]
def cov(m):
# m = m.type(torch.double) # uncomment this line if desired
fact = 1.0 / (m.shape[-1] - 1) # 1 / N
meant = m - torch.mean(m, dim=(1, 2), keepdim=True)
mt = torch.transpose(meant, 1, 2) # if complex: mt = m.t().conj()
return fact * meant.matmul(mt).squeeze()
def corrcoef(x, y):
# thanks https://discuss.pytorch.org/t/spearmans-correlation/91931/2
batch_size = x.shape[0]
x = torch.stack((x, y), 1)
# calculate covariance matrix of rows
c = cov(x)
# normalize covariance matrix
d = torch.diagonal(c, dim1=1, dim2=2)
stddev = torch.pow(d, 0.5)
stddev = stddev.repeat(1, 2).view(batch_size, 2, 2)
c = c.div(stddev)
c = c.div(torch.transpose(stddev, 1, 2))
return c[:, 1, 0] | EauDeData/IDF-Net | src/utils/metrics.py | metrics.py | py | 3,965 | python | en | code | 0 | github-code | 90 |
26370559324 | #
# File: MessageSubscriberBase.py
# Date: 21-Mar-2023 J. Smith
#
# Updates:
##
"""
Async message consumer -
This software was developed as part of the World Wide Protein Data Bank
Common Deposition and Annotation System Project
"""
from __future__ import division, absolute_import, print_function
__docformat__ = "restructuredtext en"
__author__ = "James Smith"
__email__ = "james_smithrcsb.org"
__license__ = "Creative Commons Attribution 3.0 Unported"
__version__ = "V0.07"
import logging
import threading
import pika
try:
import exceptions
except ImportError:
import builtins as exceptions
logger = logging.getLogger()
"""
direct or exchange consumer with temporary queue, one for each consumer, which deletes on closing
hence, the subscriber must start before the publisher begins producing messages, since a queue must exist to store messages
each consumer may bind to multiple exchanges, hence the add_exchange function
the publisher and subscriber must use unique names for the exchanges that are not used by other queues
the routing keys of each exchange published to by the producer must match the routing keys used by the consumer
hence, a default key has been set so that the producer and consumer must only coordinate their exchange names
the publishDirect method has been implemented in the MessagePublisher class for the purpose of publishing to a subscriber
"""
class MessageSubscriberBase(object):
def __init__(self, amqpUrl, local=False):
self._url = amqpUrl
self._closing = False
self._consumerTag = None
self.local = local
self.__exchange_type = "direct"
self.__routing_key = "subscriber_routing_key"
self.__exchanges = []
self._connection = self.connect()
self._channel = self._connection.channel()
try:
result = self._channel.queue_declare(queue="", exclusive=True, durable=True)
except: # noqa: E722 pylint: disable=bare-except
self._connection.close()
logger.critical("error - mixing of priority queues and non-priority queues")
return
self.__queue_name = result.method.queue
self._channel.basic_qos(prefetch_count=1)
def add_exchange(self, exchange):
self.__exchanges.append(exchange)
self._channel.exchange_declare(exchange=exchange, exchange_type=self.__exchange_type, passive=False, durable=True)
self._channel.queue_bind(exchange=exchange, queue=self.__queue_name, routing_key=self.__routing_key)
def run(self):
if len(self.__exchanges) == 0:
logger.info("error - no exchanges")
return
self._channel.basic_consume(queue=self.__queue_name, on_message_callback=self.onMessage)
#
self._channel.start_consuming()
def workerMethod(self, msgBody, deliveryTag=None):
raise exceptions.NotImplementedError
def connect(self):
logger.info("Connecting to %s", self._url)
if self.local:
return pika.BlockingConnection(pika.ConnectionParameters("localhost"))
return pika.BlockingConnection(
pika.URLParameters(self._url),
)
def onChannelOpen(self, channel):
logger.info("Channel opened")
self._channel = channel
self._channel.basic_qos(prefetch_count=1)
self._channel.add_on_close_callback(self.onChannelClosed)
def startConsuming(self):
logger.info("Issuing consumer related RPC commands")
self._channel.add_on_cancel_callback(self.onConsumerCancelled)
self._consumerTag = self._channel.basic_consume(queue=self.__queue_name, on_message_callback=self.onMessage)
def onMessage(self, unused_channel, basic_deliver, properties, body):
logger.info("Received message # %s from %s: %s", basic_deliver.delivery_tag, properties.app_id, body)
try:
thread = threading.Thread(target=self.workerMethod, args=(body, basic_deliver.delivery_tag))
thread.start()
while thread.is_alive():
self._channel._connection.sleep(1.0) # pylint: disable=protected-access
except Exception as e:
logger.exception("Worker failing with exception")
logger.exception(e)
#
logging.info("Done task")
self.acknowledgeMessage(basic_deliver.delivery_tag)
def acknowledgeMessage(self, deliveryTag):
logger.info("Acknowledging message %s", deliveryTag)
self._channel.basic_ack(deliveryTag)
def onConnectionOpenError(self, *args, **kw): # pylint: disable=unused-argument
logger.info("Catching connection error - ")
raise pika.exceptions.AMQPConnectionError
def onConsumerCancelled(self, method_frame):
logger.info("Consumer was cancelled remotely, shutting down: %r", method_frame)
if self._channel:
self._channel.close()
def stopConsuming(self):
if self._channel:
logger.info("Sending a Basic.Cancel command to RabbitMQ")
self._channel.basic_cancel(callback=self.onCancelOk, consumer_tag=self._consumerTag)
def onCancelOk(self, unused_frame):
logger.info("RabbitMQ acknowledged the cancellation of the consumer")
self.closeChannel()
def onChannelClosed(self, channel, reply_code, reply_text):
logger.warning("Channel %i was closed: (%s) %s", channel, reply_code, reply_text)
self._connection.close()
def closeChannel(self):
logger.info("Closing the channel")
self._channel.close()
def stop(self):
logger.info("Clean stop")
self._closing = True
self.stopConsuming()
logger.info("Cleanly stopped")
def closeConnection(self):
logger.info("Closing connection")
self._connection.close()
#
| wwPDB/py-wwpdb_utils_message_queue | wwpdb/utils/message_queue/MessageSubscriberBase.py | MessageSubscriberBase.py | py | 5,840 | python | en | code | 0 | github-code | 90 |
36700172704 | #!/usr/bin/env python3
import rospy
from std_msgs.msg import ColorRGBA, Float64
rospy.init_node("battery_led")
pub = rospy.Publisher("/led2", ColorRGBA, queue_size=1)
led_full = ColorRGBA()
led_full.a = 1.0
led_full.r = 0
led_full.g = 0
led_full.b = 1
led_mid = ColorRGBA()
led_mid.a = 1.0
led_mid.r = 0
led_mid.g = 1
led_mid.b = 0
led_low = ColorRGBA()
led_low.a = 1.0
led_low.r = 1
led_low.g = 0
led_low.b = 0
led_no = ColorRGBA()
led_no.a = 1.0
led_no.r = 0
led_no.g = 0
led_no.b = 0
def vbat_cb(msg: Float64):
if msg.data > 16:
pub.publish(led_full)
elif msg.data > 14:
pub.publish(led_mid)
elif msg.data > 13:
pub.publish(led_low)
else:
pub.publish(led_no)
sub = rospy.Subscriber("/core/vbat", Float64, vbat_cb, queue_size=1, tcp_nodelay=True)
rospy.spin()
| MosHumanoid/bitbots_thmos_meta | bitbots_lowlevel/bitbots_ros_control/scripts/battery_led.py | battery_led.py | py | 821 | python | en | code | 3 | github-code | 90 |
18440737998 | #!/usr/bin/python3
'''
Demo of custom Vector Quantiser layer written in tf.keras:
$ ./vq_kmeans_demo.py
'''
import logging
import os
import numpy as np
from matplotlib import pyplot as plt
# Give TF "a bit of shoosh" - needs to be placed _before_ "import tensorflow as tf"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # FATAL
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
from vq_kmeans import *
# constants
dim = 2
nb_samples = 1000
nb_embedding = 4
# Simple test model
inputs = tf.keras.layers.Input(shape=(dim,))
outputs = VQ_kmeans(dim,nb_embedding,name="vq")(inputs)
model = tf.keras.Model(inputs, outputs)
# note we do our own training (no trainable wieghts) so choices here don't matter much
model.compile(loss='mse',optimizer='adam')
model.summary()
# training data - a QPSK constellation with noise
bits = np.random.randint(2,size=nb_samples*dim).reshape(nb_samples, dim)
x_train = 2*bits-1 + 0.1*np.random.randn(nb_samples, dim)
# Set up initial VQ table to something we know should converge
vq_initial = np.array([[1.,1.],[-1.,1.],[-1.,-1.],[1.,-1.]])/10
model.get_layer('vq').set_vq(vq_initial)
print(model.get_layer('vq').get_vq().numpy())
model.fit(x_train, x_train, batch_size=2, epochs=2)
vq_entries = model.get_layer('vq').get_vq().numpy()
print(vq_entries)
plt.scatter(x_train[:,0],x_train[:,1])
plt.scatter(vq_entries[:,0],vq_entries[:,1], marker='x')
plt.show()
| drowe67/ampnn | vq_kmeans_demo.py | vq_kmeans_demo.py | py | 1,429 | python | en | code | 3 | github-code | 90 |
73983799976 | import os
import argparse
from PIL import Image
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from torch.optim import lr_scheduler
from torchvision import models
from FaceMaskClassificationUtils import imshow, DEVICE, CPU_DEVICE, split_prepare_dataset, CNN, train_model, evaluation, test_transform, print_labeled_samples
natural_image_class_names_list = ['airplane', 'car', 'cat', 'dog', 'flower', 'fruit', 'motorbike', 'person']
NUM_OF_NATURAL_IMAGE_CLASSES = len(natural_image_class_names_list)
batch_size = 8
num_workers = 4
_num_epochs = 10
_train_size, _validation_size, _test_size = 0.7, 0.15, 0.15
def classify_natural_image(image_path, model):
"""
Determines if and how a human wears a mask
:param img: the human image to classify
:param model: the model to be used
:return: Mask enum (MASK_WORN_INCORRECT, WITH_MASK, WITHOUT_MASK)
"""
img = test_transform(Image.open(image_path))
with torch.no_grad():
img = img.to(CPU_DEVICE)
model.to(CPU_DEVICE)
class_prediction = torch.argmax(model(img.unsqueeze(0))).item()
mask = natural_image_class_names_list[class_prediction]
print("classify_mask_usage: this is a {}.".format(mask))
return mask
def train_natural_image_detection(model_path, data_dir):
"""
train the model
:return: path to the model
"""
dataloaders, total_batch_sizes, class_names = split_prepare_dataset(data_dir, num_workers, batch_size)
model = models.resnet18(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, NUM_OF_NATURAL_IMAGE_CLASSES)
criterion = nn.CrossEntropyLoss()
optimizer_ft = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model = train_model(model, criterion, optimizer_ft, exp_lr_scheduler, _num_epochs, dataloaders, total_batch_sizes, batch_size, "Natural")
model.eval()
evaluation(dataloaders, model, class_names)
# save model
checkpoint = {'model': models.resnet18(),
'state_dict': model.state_dict()}
if os.path.exists(model_path):
os.remove(model_path)
torch.save(checkpoint, model_path)
return model
def load_natural_image_model(model_path, class_num):
checkpoint = torch.load(model_path, map_location=DEVICE)
model = checkpoint['model']
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, class_num)
model.load_state_dict(checkpoint['state_dict'])
model.eval()
return model
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Description of your program')
parser.add_argument('-M', '--model_path', help='Model Path', required=True)
parser.add_argument('-D', '--data_path', help='Data Path', required=False)
parser.add_argument('-F', '--image_path', help='image to classify', required=True)
parser.add_argument('--train', dest='train', action='store_true')
parser.set_defaults(train=False)
args = parser.parse_args()
print(args)
# train if necessary
if args.train:
train_natural_image_detection(args.model_path, args.data_path, NUM_OF_NATURAL_IMAGE_CLASSES)
# load the model
model = load_natural_image_model(args.model_path, NUM_OF_NATURAL_IMAGE_CLASSES)
# classify the requested image
classify_natural_image(args.image_path, model)
| amird1234/FaceMaskDetection | src/NaturalImages.py | NaturalImages.py | py | 3,470 | python | en | code | 0 | github-code | 90 |
20146445477 | import pytest
from some_project.utils import fib
def test_invalid_value():
value = -1
with pytest.raises(ValueError):
fib(value)
def test_invalid_type():
value = 'aaa'
with pytest.raises(TypeError):
fib(value)
@pytest.mark.parametrize(
'n,expected',
[(0, 0), (1, 1), (2, 1), (7, 13), (10, 55), (15, 610)]
)
def test_fib(n, expected):
assert fib(n) == expected
| inna-tuzhikova/some_project | tests/test_fib.py | test_fib.py | py | 411 | python | en | code | 0 | github-code | 90 |
40315965864 | '''
Input: a List of integers as well as an integer `k` representing the size of the sliding window
Returns: a List of integers
'''
def sliding_window_max(nums, k):
# iterate through list and work with a chunk of k elements at each iteration
# for each iteration sort the splice and append largest number to new array
start = 0
window = k
array = []
while window <= len(nums):
chunk = nums[start:window]
chunk.sort(reverse=True)
array.append(chunk[0])
window +=1
start +=1
return array
if __name__ == '__main__':
# Use the main function here to test out your implementation
arr = [1, 3, -1, -3, 5, 3, 6, 7]
k = 3
print(f"Output of sliding_window_max function is: {sliding_window_max(arr, k)}")
| IanCarreras/first-pass-solution | sliding_window_max/sliding_window_max.py | sliding_window_max.py | py | 783 | python | en | code | 0 | github-code | 90 |
29517714071 | import os
import random
import numpy as np
import pandas as pd
import pickle as pickle
from load_data import *
random.seed(42)
PUNCTUATIONS = ['.', ',', '!', '?', ';', ':']
PUNC_RATIO = 0.3
# Insert punction words into a given sentence with the given ratio "punc_ratio"
def insert_punctuation_marks(sentence, punc_ratio=PUNC_RATIO):
""" 랜덤한 위치에 랜덤하게 문장부호 추가 """
words = sentence.split()
# words = okt.morphs(sentence) # okt 형태소 분석기 사용
new_line = []
q = random.randint(1, int(punc_ratio * len(words) + 1)) # punc를 몇 개 추가할지 선택(min=1, max=문장길이/3) - 논문 기반
qs = random.sample(range(0, len(words)), q) # 1 ~ 문장길이 개의 위치 punc 추가할 위치 선택
for j, word in enumerate(words):
if j in qs:
new_line.append(PUNCTUATIONS[random.randint(0, len(PUNCTUATIONS)-1)]) # 랜덤하게 추가할 punc 선택
new_line.append(word)
else:
new_line.append(word)
new_line = ' '.join(new_line)
return new_line
def iter_punc(num_iter, sentence, punc_ratio=PUNC_RATIO):
""" num_iter 만큼 aeda 수행 """
new_lines = [sentence]
for i in range(num_iter):
new_line = insert_punctuation_marks(sentence, punc_ratio=PUNC_RATIO)
new_lines.append(new_line)
return new_lines
def count_label(train_dataset):
""" 라벨별 개수 구하기 """
num_dict = {}
labels = train_dataset['label'].unique()
for i in range(len(labels)):
df = train_dataset[train_dataset['label'] == labels[i]]
num_dict[labels[i]] = len(df)
nums = sorted(list(num_dict.values())) # 클래스별 데이터 개수 정렬
quarter1_idx = len(nums[:-1]) // 4 # 가장 많은 no_relation은 제외
median_idx = quarter1_idx * 2
quarter3_idx = quarter1_idx * 3
quarter1 = nums[:-1][quarter1_idx]
median = nums[:-1][median_idx]
quarter3 = nums[:-1][quarter3_idx]
avg = np.average(nums[:-1])
return num_dict, quarter1, median, quarter3
def get_num_iter(train_dataset, label):
""" 문장부호 추가 증강을 반복할 횟수 계산 """
num_iter = 0
iter1 = ['per:product', 'per:place_of_residence', 'per:origin']
iter2 = ['org:members', 'per:other_family', 'org:political/religious_affiliation', 'per:place_of_birth']
if label == iter1:
num_iter = 4
elif label in iter2:
num_iter = 2
return num_iter
def aeda(dataset, undersamp=False):
""" 문장부호 추가 증강 수행 후 csv파일로 저장 """
total_id = []
total_sent = []
total_sub = []
total_obj = []
total_label = []
total_sub_type = []
total_obj_type = []
num_dict, quarter1, median, quarter3 = count_label(dataset)
for i in range(len(dataset)):
label = dataset['label'].iloc[i]
num_iter = get_num_iter(dataset, label)
new_id = [dataset['id'].iloc[i]] * (num_iter+1)
new_sentence = iter_punc(num_iter, list(dataset['sentence'])[i], PUNC_RATIO)
new_sub = [dataset['subject_entity'].iloc[i]] * (num_iter+1)
new_obj = [dataset['object_entity'].iloc[i]] * (num_iter+1)
new_label = [label] * (num_iter+1)
new_sub_type = [dataset['subject_type'].iloc[i]] * (num_iter+1)
new_obj_type = [dataset['object_type'].iloc[i]] * (num_iter+1)
total_id += new_id
total_sent += new_sentence
total_sub += new_sub
total_obj += new_obj
total_label += new_label
total_sub_type += new_sub_type
total_obj_type += new_obj_type
aug_df = pd.DataFrame({'id':total_id, 'sentence':total_sent, 'subject_entity':total_sub,
'object_entity':total_obj, 'label':total_label,
'subject_type':total_sub_type,'object_type':total_obj_type,})
if undersamp:
# undersampling 'no_relation'
no_relation_df = aug_df[aug_df['label'] == 'no_relation']
print("no_relation undersampling 이전: ", len(no_relation_df))
no_relation_df = no_relation_df.sample(frac=0.7, random_state=42).reset_index(drop=True)
print("no_relation undersampling 이후: ", len(no_relation_df))
res_df = aug_df[aug_df['label'] != 'no_relation']
aug_df = pd.concat([no_relation_df, res_df])
return aug_df
def swap_entity(sent, sub_s, sub_e, obj_s, obj_e):
new_sent = ''
if sub_s < obj_s:
# sub가 obj보다 먼저 등장하는 경우:
new_sent = sent[:sub_s] + sent[obj_s:obj_e+1] + sent[sub_e+1:obj_s] + sent[sub_s:sub_e+1] + sent[obj_e+1:]
else:
# sub가 obj보다 나중에 등장하는 경우:
new_sent = sent[:obj_s] + sent[sub_s:sub_e+1] + sent[obj_e+1:sub_s] + sent[obj_s:obj_e+1] + sent[sub_e+1:]
return new_sent
def preprocessing_swap(dataset, filter):
""" 처음 불러온 csv 파일을 원하는 형태의 DataFrame으로 변경 시켜줍니다."""
subs = []
objs = []
sub_s = []
sub_e = []
obj_s = []
obj_e = []
filtered_sentence = sentence_filter(dataset['sentence'], filter) # sentence filter
for sub,obj,sentence in zip(dataset['subject_entity'], dataset['object_entity'], filtered_sentence):
sub = eval(sub)
obj = eval(obj)
subs.append(sub['word'])
objs.append(obj['word'])
sub_s.append(sub['start_idx'])
sub_e.append(sub['end_idx'])
obj_s.append(obj['start_idx'])
obj_e.append(obj['end_idx'])
out_dataset = pd.DataFrame({'id':dataset['id'], 'sentence':dataset['sentence'],
'subject_entity':dataset['subject_entity'],'object_entity':dataset['object_entity'], 'label':dataset['label'],
'sub':subs, 'obj':objs, 'sub_s':sub_s, 'sub_e':sub_e, 'obj_s':obj_s, 'obj_e':obj_e})
return out_dataset
def apply_swap(train_df):
""" swap해도 라벨이 동일한 클래스에 대해서만 swap 수행 """
same_df1 = train_df[train_df['label'] == 'org:alternate_names']
same_df2 = train_df[train_df['label'] == 'per:alternate_names']
# same_df3 = train_df[train_df['label'] == 'per:sibilings'] # 0개
same_df4 = train_df[train_df['label'] == 'per:spouse']
same_df5 = train_df[train_df['label'] == 'per:other_family']
# same_df6 = train_df[train_df['label'] == 'per:colleages'] # 0개
same_df1['sentence'] = same_df1.apply(lambda x: swap_entity(x['sentence'], x['sub_s'], x['sub_e'], x['obj_s'], x['obj_e']), axis=1)
same_df2['sentence'] = same_df2.apply(lambda x: swap_entity(x['sentence'], x['sub_s'], x['sub_e'], x['obj_s'], x['obj_e']), axis=1)
# same_df3['sentence'] = same_df3.apply(lambda x: swap_entity(x['sentence'], x['sub_s'], x['sub_e'], x['obj_s'], x['obj_e']), axis=1)
same_df4['sentence'] = same_df4.apply(lambda x: swap_entity(x['sentence'], x['sub_s'], x['sub_e'], x['obj_s'], x['obj_e']), axis=1)
same_df5['sentence'] = same_df5.apply(lambda x: swap_entity(x['sentence'], x['sub_s'], x['sub_e'], x['obj_s'], x['obj_e']), axis=1)
# same_df6['sentence'] = same_df6.apply(lambda x: swap_entity(x['sentence'], x['sub_s'], x['sub_e'], x['obj_s'], x['obj_e']), axis=1)
res_df = pd.concat([same_df1, same_df2, same_df4, same_df5])
swap_df = res_df[["id", "sentence", "subject_entity", "object_entity", "label"]]
train_df = train_df[["id", "sentence", "subject_entity", "object_entity", "label"]]
total_df = pd.concat([swap_df, train_df])
return total_df
def main_aeda(dataset_dir, marking_mode="normal", undersamp=False):
save_dir = "aeda_augmentation.csv"
train_dataset, eval_dataset = load_aug_data(dataset_dir, train=True, filter=False, marking_mode=marking_mode) # 전처리 완료된 dataframe 사용
aug_dataset = aeda(train_dataset, undersamp=undersamp)
aug_dataset.to_csv(save_dir, index=False, encoding="utf-8-sig")
print("현재 사용중인 marking_mode: ", marking_mode)
print("aug 이전 데이터 개수: ", len(train_dataset))
print("문장부호 추가로 증강한 데이터 개수: ", len(aug_dataset) - len(train_dataset))
print("aug 이후 데이터 개수: ", len(aug_dataset))
print('@@@@@@@@@@@@@@@@ Done @@@@@@@@@@@@@@@@')
def main_swap(dataset_dir):
pd_dataset = pd.read_csv(dataset_dir)
train_df, val_df = stratified_choice_train_test_split(pd_dataset, test_size=0.2)
train_df = preprocessing_swap(train_df, False)
swap_dataset = apply_swap(train_df)
swap_dataset.to_csv("swap_and_original.csv", index=False, encoding="utf-8-sig")
print('원본 데이터 개수: ', len(train_df))
print('swap으로 증강한 데이터 개수: ', len(swap_dataset) - len(train_df))
print('원본+swap 데이터 개수: ', len(swap_dataset))
if __name__ == "__main__":
# 본인에게 맞는 원본 train dataset csv경로를 넣어주세요.
dataset_dir = "train_df_v1.csv"
main_aeda(dataset_dir, marking_mode="normal", undersamp=True) # aeda usage
# main_swap(dataset_dir) # swap entity usage | boostcampaitech3/level2-klue-level2-nlp-09 | data_augmentation.py | data_augmentation.py | py | 9,168 | python | en | code | 6 | github-code | 90 |
36121085796 | import io
from pathlib import Path
import click
from openpyxl import load_workbook
from rich.console import Console, Group
from rich.padding import Padding
from rich.panel import Panel
from core.models import AnonymousEtabRows
console = Console()
def load_xlsx(file):
wb = load_workbook(filename=file)
return wb
help_txt = ""
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
class RichHelp(click.Command):
"""Custom help formatter"""
def format_help(self, ctx, formatter):
sio = io.StringIO()
h_console = Console(file=sio, force_terminal=True)
h_console.print(help_txt)
formatter.write(sio.getvalue())
@click.command(cls=RichHelp, context_settings=CONTEXT_SETTINGS)
@click.argument("file", default="file.xlsx")
def parse_validate_convert(file):
export_file_name = "anonymous.csv"
wb = load_xlsx(file)
ws_etablissements = wb.worksheets[0]
anon_rows = AnonymousEtabRows.from_worksheet(ws_etablissements)
anon_rows.validate()
if not anon_rows.is_valid:
console.print()
console.print(
Padding(
":thumbs_down: [red]This file as errors and can't be imported",
(1, 0),
),
)
anon_rows.make_messages()
return
current_path = Path(".")
console.print(":weight_lifter:[green] Exporting")
csv_dir = current_path / "csv"
csv_dir.mkdir(exist_ok=True)
with open(f"csv/{export_file_name}", "w", newline="") as csvfile:
for row in anon_rows.as_csv():
csvfile.write(row)
console.print(f":thumbs_up:[green] Done - Your file is ./csv/{export_file_name}")
panel_group = Group(
Padding(
f"[green] scalingo --app trackdechets-production-api --region osc-secnum-fr1 run --file ./csv/{export_file_name} bash",
(1, 4),
),
Padding("[yellow] then : ", (1, 0)),
Padding(
f"[green] node ./dist/src/scripts/bin/importAnonymousCompany.js /tmp/uploads/{export_file_name}",
(1, 4),
),
)
console.print(
Panel(
panel_group,
title="[bold green]Now you may run these commands to import csv",
)
)
if __name__ == "__main__":
parse_validate_convert()
| MTES-MCT/trackdechets-xslx2csv | src/anonymous.py | anonymous.py | py | 2,299 | python | en | code | 1 | github-code | 90 |
37564762909 | import fileinput
def parse():
tape = []
for line in fileinput.input():
words = line.strip().split(" ")
cmd = words[0]
reg = words[1].strip(",")
args = [reg]
if len(words) > 2:
args.append(int(words[2]))
tape.append((cmd, args))
return tape
def run(tape, regs):
idx = 0
while idx < len(tape):
cmd, args = tape[idx]
reg = args[0]
if cmd == "hlf":
regs[reg] //= 2
elif cmd == "tpl":
regs[reg] *= 3
elif cmd == "inc":
regs[reg] += 1
elif cmd == "jmp":
offset = int(reg)
idx += offset
continue
elif cmd == "jie":
offset = args[1]
if regs[reg] % 2 == 0:
idx += offset
continue
elif cmd == "jio":
offset = args[1]
if regs[reg] == 1:
idx += offset
continue
idx += 1
return regs
def main():
tape = parse()
regs = run(tape, {"a": 0, "b": 0})
print(f"Part 1: {regs['b']}")
regs = run(tape, {"a": 1, "b": 0})
print(f"Part 2: {regs['b']}")
if __name__ == "__main__":
main()
| rodrigorahal/advent-of-code-2015 | 23/turing_lock.py | turing_lock.py | py | 1,241 | python | en | code | 0 | github-code | 90 |
20341096567 | # Hash Table, String, Breath-First Search
from collections import deque
class Solution:
def minMutation(self, startGene: str, endGene: str, bank: List[str]) -> int:
que = deque([startGene])
info = deque(bank)
count = 0
gene = {0: 'A', 1: 'C', 2: 'G', 3: 'T'}
visited = set()
# bfs
while que:
for _ in range(len(que)):
curr = que.popleft()
if curr == endGene: # endGene 까지 변형이 완료
return count
for idx, char in enumerate(curr): # 현재 유전자 문자열을 idx, char 로 순회
for mutate_gene in gene.values(): # 유전자 리스트 gene 의 값들 (A, C, G, T) 중에서 현재 char 과 다른 유전자로 변형
compare = list(curr) # mutation 을 위해 문자열에서 리스트로 변환
if mutate_gene != char: # 현재 유전자중 해당 idx 에서의 char 을 다른 유전자로 변형한다면
compare[idx] = mutate_gene # 해당 idx 의 유전자만 변형한 compare
next = ''.join(compare) # 다시 비교를 위해 리스트에서 문자열로 join
if next in info and next not in visited: # 만약 변형한 next 가 bank 에 존재하고 변형된 이력이 없는 유전자라면
que.append(next) # bfs 순회. 큐에 다음 변형된 유전자 next 를 append
visited.add(next) # 변형된 해당 유전자 방문처리
count += 1
return -1
| Stendhalsynd/algorithm-library | 0433-minimum-genetic-mutation/0433-minimum-genetic-mutation.py | 0433-minimum-genetic-mutation.py | py | 1,867 | python | ko | code | 0 | github-code | 90 |
38715945722 | import os
import sys
import math
from enum import Enum, unique
import shlex
import shutil
import signal
import time
from collections import namedtuple
import tempfile
import traceback
import configparser
import urllib.parse
import urllib.request
import gi
gi.require_version('Gtk', '3.0') # noqa: E402
gi.require_version('Gdk', '3.0') # noqa: E402
gi.require_version('Pango', '1.0') # noqa: E402
gi.require_version('GdkPixbuf', '2.0') # noqa: E402
from gi.repository import GObject, GLib, Gio, GdkPixbuf, Pango, Gdk, Gtk
# time.struct_time with nanoseconds
struct_time_ns = namedtuple('struct_time_ns',
['tm_year', 'tm_mon', 'tm_mday', 'tm_hour', 'tm_min', 'tm_sec', 'tm_wday',
'tm_yday', 'tm_isdst', 'tm_ns'])
class Configuration:
"""Set default configuration values and load/save configuration values."""
def __init__(self):
self._video_extensions = ('mp4', 'avi', 'mkv')
self._remove_subtitles = False
self._volume_increase = 3
self._audio_encoder = 'mp3' # Key for _audio_encoder_quality
self._audio_quality = 3 # Index for _audio_encoder_quality[_audio_encoder]
self._keep_original = False
self._output_prefix = '' # Only used if _keep_original == True
self._output_suffix = '_Vol-inc' # Only used if _keep_original == True
self._use_all_cpus = True
self._max_jobs = os.cpu_count() # Only used if _use_all_cpus == False
self._file_expl_show_hidden_files = False
self._file_expl_case_sensitive_sort = False
self._file_expl_activate_on_single_click = True
self._temp_file_prefix = 'ffmpeg_temp_'
self._ignore_temp_files = True
self._show_milliseconds = False
# Do not change configuration options below this line
self._file = os.path.join(os.path.expanduser("~"), '.config', 'increasevol') # FIXME: This is not portable.
self._required_cmd = ('ffprobe', 'ffmpeg')
self._cwd = GLib.get_home_dir()
self._win_maximized = False
self._win_width = 783
self._win_height = 309
self._file_expl_undo_size = 100 # Stack size for "back" button
self._paned_file_expl_position = 400
""" audio_encoder_quality:
Key: ffmpeg audio encoder (audio_encoder in _ffmpeg_increase_audio_cmd)
Value: list with valid quality values for the audio encoder, from lowest to highest quality
(audio_quality in _ffmpeg_increase_audio_cmd) """
self._audio_encoder_quality = {
'mp3': [9.9, 8, 5, 3, 0], # https://trac.ffmpeg.org/wiki/Encode/MP3
'aac': [0.1, 0.5, 1, 1.5, 2], # https://trac.ffmpeg.org/wiki/Encode/AAC
'libvorbis': [0, 2.5, 5, 7.5, 10], # https://ffmpeg.org/ffmpeg-codecs.html#libvorbis
'flac': [0, 0, 0, 0, 0], # https://ffmpeg.org/ffmpeg-codecs.html#flac-2 (flac doesn't have -q)
# 'libopus': [],
}
self._n_qualities = len(self._audio_encoder_quality[self._audio_encoder])
self._ffprobe_get_duration_cmd = 'ffprobe -v error -show_entries format=duration ' \
'-of default=noprint_wrappers=1:nokey=1 "{video_file_name}"'
self._ffmpeg_increase_audio_cmd = 'ffmpeg -hide_banner -y -i "{video_file_name_input}" -map 0 -c:v copy ' \
'{remove_subtitles_param} -c:s copy ' \
'-c:a {audio_encoder} -q:a {audio_quality} ' \
'-filter:a volume={volume_increase} ' \
'"{video_file_name_output}"'
self._load()
def _load(self):
"""Load configuration values from configuration file"""
temp_conf = configparser.ConfigParser()
temp_conf.read(self._file)
tmp = temp_conf.get('DEFAULT', 'video_extensions', fallback=self._video_extensions)
if isinstance(tmp, str):
tmp_tuple = tuple(tmp.split(','))
if len(tmp_tuple) > 0:
self._video_extensions = tmp_tuple
self._remove_subtitles = temp_conf.getboolean('DEFAULT', 'remove_subtitles', fallback=self._remove_subtitles)
self._cwd = temp_conf.get('DEFAULT', 'directory', fallback=self._cwd)
self._volume_increase = temp_conf.getfloat('DEFAULT', 'volume_increase', fallback=self._volume_increase)
self._audio_encoder = temp_conf.get('DEFAULT', 'audio_encoder', fallback=self._audio_encoder)
self._audio_quality = temp_conf.getint('DEFAULT', 'audio_quality', fallback=self._audio_quality)
self._keep_original = temp_conf.getboolean('DEFAULT', 'keep_original', fallback=self._keep_original)
self._output_prefix = temp_conf.get('DEFAULT', 'output_prefix', fallback=self._output_prefix)
self._output_suffix = temp_conf.get('DEFAULT', 'output_suffix', fallback=self._output_suffix)
self._use_all_cpus = temp_conf.getboolean('DEFAULT', 'use_all_cpus', fallback=self._use_all_cpus)
self._max_jobs = temp_conf.getint('DEFAULT', 'max_jobs', fallback=self._max_jobs)
self._file_expl_show_hidden_files = temp_conf.getboolean('DEFAULT', 'file_explorer_show_hidden_files',
fallback=self._file_expl_show_hidden_files)
self._file_expl_case_sensitive_sort = temp_conf.getboolean('DEFAULT',
'file_explorer_case_sensitive_sort',
fallback=self._file_expl_case_sensitive_sort)
self._file_expl_activate_on_single_click = temp_conf.getboolean('DEFAULT',
'file_explorer_activate_on_single_click',
fallback=
self._file_expl_activate_on_single_click)
self._temp_file_prefix = temp_conf.get('DEFAULT', 'temp_file_prefix', fallback=self._temp_file_prefix)
self._ignore_temp_files = temp_conf.getboolean('DEFAULT', 'ignore_temp_files', fallback=self._ignore_temp_files)
self._show_milliseconds = temp_conf.getboolean('DEFAULT', 'show_milliseconds', fallback=self._show_milliseconds)
self._paned_file_expl_position = temp_conf.getint('DEFAULT', 'paned_file_explorer_position',
fallback=self._paned_file_expl_position)
self._win_maximized = temp_conf.getboolean('DEFAULT', 'win_maximized', fallback=self._win_maximized)
self._win_width = temp_conf.getint('DEFAULT', 'win_width', fallback=self._win_width)
self._win_height = temp_conf.getint('DEFAULT', 'win_height', fallback=self._win_height)
def save(self):
"""Save configuration values to configuration file"""
temp_conf = configparser.ConfigParser()
temp_conf['DEFAULT'] = {
'directory': self._cwd,
'video_extensions': ','.join(self._video_extensions),
'remove_subtitles': self._remove_subtitles,
'volume_increase': self._volume_increase,
'audio_encoder': self._audio_encoder,
'audio_quality': self._audio_quality,
'keep_original': self._keep_original,
'output_prefix': self._output_prefix,
'output_suffix': self._output_suffix,
'use_all_cpus': self._use_all_cpus,
'max_jobs': self._max_jobs,
'file_explorer_show_hidden_files': self._file_expl_show_hidden_files,
'file_explorer_case_sensitive_sort': self._file_expl_case_sensitive_sort,
'file_explorer_activate_on_single_click': self._file_expl_activate_on_single_click,
'temp_file_prefix': self._temp_file_prefix,
'ignore_temp_files': self._ignore_temp_files,
'show_milliseconds': self._show_milliseconds,
'paned_file_explorer_position': self._paned_file_expl_position,
'win_maximized': self._win_maximized,
'win_width': self._win_width,
'win_height': self._win_height
}
try:
with open(self._file, 'w') as configfile:
temp_conf.write(configfile)
except OSError:
traceback.print_exc()
error_message(text='Error',
secondary_text='Error saving configuration',
modal=True)
@property
def file(self) -> str:
return self._file
@property
def required_cmd(self) -> tuple:
return self._required_cmd
@property
def cwd(self) -> str:
return self._cwd
@cwd.setter
def cwd(self, val: str):
self._cwd = val
@property
def video_extensions(self) -> tuple:
return self._video_extensions
@video_extensions.setter
def video_extensions(self, val: tuple):
self._video_extensions = val
@property
def remove_subtitles(self) -> bool:
return self._remove_subtitles
@remove_subtitles.setter
def remove_subtitles(self, val: bool):
self._remove_subtitles = val
@property
def volume_increase(self) -> float:
return self._volume_increase
@volume_increase.setter
def volume_increase(self, val: float):
self._volume_increase = val
@property
def audio_encoder(self) -> str:
return self._audio_encoder
@audio_encoder.setter
def audio_encoder(self, val: str):
self._audio_encoder = val
@property
def audio_encoders(self) -> list:
return list(self._audio_encoder_quality)
@property
def audio_quality(self) -> int:
return self._audio_quality
@audio_quality.setter
def audio_quality(self, val: int):
self._audio_quality = val
@property
def audio_encoder_quality(self) -> int:
return self._audio_encoder_quality[self._audio_encoder][self._audio_quality]
@property
def n_qualities(self) -> int:
return self._n_qualities
@property
def keep_original(self) -> bool:
return self._keep_original
@keep_original.setter
def keep_original(self, val: bool):
self._keep_original = val
@property
def output_prefix(self) -> str:
return self._output_prefix
@output_prefix.setter
def output_prefix(self, val: str):
self._output_prefix = val
@property
def output_suffix(self) -> str:
return self._output_suffix
@output_suffix.setter
def output_suffix(self, val: str):
self._output_suffix = val
@property
def use_all_cpus(self) -> bool:
return self._use_all_cpus
@use_all_cpus.setter
def use_all_cpus(self, val: bool):
self._use_all_cpus = val
@property
def max_jobs(self) -> int:
return self._max_jobs
@max_jobs.setter
def max_jobs(self, val: int):
self._max_jobs = val
@property
def paned_file_expl_position(self) -> int:
return self._paned_file_expl_position
@paned_file_expl_position.setter
def paned_file_expl_position(self, val: int):
self._paned_file_expl_position = val
@property
def file_expl_show_hidden_files(self) -> bool:
return self._file_expl_show_hidden_files
@file_expl_show_hidden_files.setter
def file_expl_show_hidden_files(self, val: bool):
self._file_expl_show_hidden_files = val
@property
def file_expl_case_sensitive_sort(self) -> bool:
return self._file_expl_case_sensitive_sort
@file_expl_case_sensitive_sort.setter
def file_expl_case_sensitive_sort(self, val: bool):
self._file_expl_case_sensitive_sort = val
@property
def file_expl_undo_size(self) -> int:
return self._file_expl_undo_size
@property
def file_expl_activate_on_single_click(self) -> bool:
return self._file_expl_activate_on_single_click
@file_expl_activate_on_single_click.setter
def file_expl_activate_on_single_click(self, val: bool):
self._file_expl_activate_on_single_click = val
@property
def temp_file_prefix(self) -> str:
return self._temp_file_prefix
@temp_file_prefix.setter
def temp_file_prefix(self, val: str):
self._temp_file_prefix = val
@property
def ignore_temp_files(self) -> bool:
return self._ignore_temp_files
@ignore_temp_files.setter
def ignore_temp_files(self, val: bool):
self._ignore_temp_files = val
@property
def show_milliseconds(self) -> bool:
return self._show_milliseconds
@show_milliseconds.setter
def show_milliseconds(self, val: bool):
self._show_milliseconds = val
@property
def ffprobe_get_duration_cmd(self) -> str:
return self._ffprobe_get_duration_cmd
@property
def ffmpeg_increase_audio_cmd(self) -> str:
return self._ffmpeg_increase_audio_cmd
@property
def win_maximized(self) -> bool:
return self._win_maximized
@win_maximized.setter
def win_maximized(self, val: bool):
self._win_maximized = val
@property
def win_width(self) -> int:
return self._win_width
@win_width.setter
def win_width(self, val: int):
self._win_width = val
@property
def win_height(self) -> int:
return self._win_height
@win_height.setter
def win_height(self, val: int):
self._win_height = val
class FileExplorer(Gtk.VBox):
"""
The central panel of the main window is a file explorer.
Copied from:
https://github.com/GNOME/pygobject/blob/master/examples/demo/demos/IconView/iconviewbasics.py
"""
(COL_PATH,
COL_DISPLAY_NAME,
COL_PIXBUF,
COL_IS_DIRECTORY,
NUM_COLS) = range(5)
@GObject.Signal(arg_types=[str, ])
def video_selected(self, path):
pass
def __init__(self):
super().__init__()
if not os.path.isdir(config.cwd):
config.cwd = GLib.get_home_dir()
if not os.path.isdir(config.cwd):
config.cwd = '/' # FIXME: This is not portable.
self._parent_dir = config.cwd
self._locations = []
self._locations_showed_element = 0
self._locations_init(self._parent_dir)
# create the store and fill it with content
self._pixbuf_lookup = {}
self._store = self._create_store()
self._fill_store()
self._tool_bar = Gtk.Toolbar()
self.pack_start(self._tool_bar, False, False, 0)
self._back_button = Gtk.ToolButton(icon_name='go-previous')
self._back_button.set_sensitive(False)
self._tool_bar.insert(self._back_button, -1)
self._forward_button = Gtk.ToolButton(icon_name='go-next')
self._forward_button.set_sensitive(False)
self._tool_bar.insert(self._forward_button, -1)
self._up_button = Gtk.ToolButton(icon_name='go-up')
self._up_button.set_sensitive(self._parent_dir != '/') # FIXME: This is not portable.
self._tool_bar.insert(self._up_button, -1)
self._refresh_button = Gtk.ToolButton(icon_name='view-refresh')
self._tool_bar.insert(self._refresh_button, -1)
self._home_button = Gtk.ToolButton(icon_name='go-home')
self._tool_bar.insert(self._home_button, -1)
self._back_button.connect('clicked', self._back_clicked)
self._forward_button.connect('clicked', self._forward_clicked)
self._up_button.connect('clicked', self._up_clicked)
self._refresh_button.connect('clicked', self.refresh_clicked)
self._home_button.connect('clicked', self._home_clicked)
self._separator = Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL)
self.pack_start(self._separator, False, False, 0)
self._location_label = Gtk.Label(label=self._parent_dir)
self._location_label.set_selectable(True)
self._location_label.set_xalign(0)
self._location_label.set_ellipsize(Pango.EllipsizeMode.START)
self._location_label.set_margin_top(5)
self._location_label.set_margin_bottom(5)
self.pack_start(self._location_label, False, False, 0)
self._sw = Gtk.ScrolledWindow()
self._sw.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
self._sw.set_policy(Gtk.PolicyType.AUTOMATIC,
Gtk.PolicyType.AUTOMATIC)
self.pack_start(self._sw, True, True, 0)
self._icon_view = Gtk.IconView(model=self._store)
self._icon_view.set_selection_mode(Gtk.SelectionMode.MULTIPLE)
self._icon_view.set_activate_on_single_click(config.file_expl_activate_on_single_click)
self._icon_view.set_text_column(self.COL_DISPLAY_NAME)
self._icon_view.set_pixbuf_column(self.COL_PIXBUF)
self._icon_view.connect('item-activated', self._item_activated, self._store)
self._icon_view.enable_model_drag_source(Gdk.ModifierType.BUTTON1_MASK, [], Gdk.DragAction.COPY)
self._icon_view.drag_source_add_uri_targets()
self._icon_view.connect("drag-data-get", self._on_drag_data_get)
self._sw.add(self._icon_view)
def _on_drag_data_get(self, _widget, _drag_context, data, _info, _time):
uris = []
for selected_path in self._icon_view.get_selected_items():
selected_iter = self._store.get_iter(selected_path)
uri = self._store.get_value(selected_iter, self.COL_PATH)
uri = urllib.parse.urljoin('file:', urllib.request.pathname2url(uri))
uris.append(uri)
data.set_uris(uris)
@property
def cwd(self):
return self._parent_dir
def set_single_click(self, val: bool):
self._icon_view.set_activate_on_single_click(val)
def open_location_from_place_sidebar(self, _places_sidebar, location, _open_flags):
if location.get_path() is None:
return
self._parent_dir = location.get_path()
self._locations_push(self._parent_dir)
self._refresh(self._parent_dir)
def refresh_clicked(self, _item):
"""It is used in AppWindow"""
self._fill_store()
def _back_clicked(self, _item):
self._parent_dir = self._locations_pop()
self._refresh(self._parent_dir)
def _forward_clicked(self, _item):
self._parent_dir = self._locations_forward()
self._refresh(self._parent_dir)
def _up_clicked(self, _item):
self._parent_dir = os.path.dirname(self._parent_dir)
self._locations_push(self._parent_dir)
self._refresh(self._parent_dir)
def _home_clicked(self, _item):
self._parent_dir = GLib.get_home_dir()
self._locations_push(self._parent_dir)
self._refresh(self._parent_dir)
def _item_activated(self, _icon_view, tree_path, store):
iter_ = store.get_iter(tree_path)
(path, is_dir) = store.get(iter_, self.COL_PATH, self.COL_IS_DIRECTORY)
if not is_dir:
self.emit('video_selected', path)
else:
self._parent_dir = path
self._locations_push(self._parent_dir)
self._refresh(self._parent_dir)
def _refresh(self, path: str):
self._fill_store()
self._location_label.set_label(path)
self._up_button.set_sensitive(path != '/') # FIXME: This is not portable.
# Methods for the locations stack for "back" and "forward" buttons
def _locations_init(self, path: str):
self._locations_showed_element = 1
self._locations.append(path)
def _locations_push(self, path: str):
self._back_button.set_sensitive(True)
if self._locations_showed_element < len(self._locations):
self._locations = self._locations[:self._locations_showed_element]
self._forward_button.set_sensitive(False)
if self._locations_showed_element == config.file_expl_undo_size:
self._locations.pop(0)
else:
self._locations_showed_element += 1
self._locations.append(path)
def _locations_pop(self) -> str:
self._forward_button.set_sensitive(True)
self._locations_showed_element -= 1
if self._locations_showed_element == 1:
self._back_button.set_sensitive(False)
return self._locations[self._locations_showed_element - 1]
def _locations_forward(self) -> str:
if self._locations_showed_element == len(self._locations) - 1:
self._forward_button.set_sensitive(False)
if self._locations_showed_element == 1:
self._back_button.set_sensitive(True)
path = self._locations[self._locations_showed_element]
self._locations_showed_element += 1
return path
def _sort_func(self, store, a_iter, b_iter, _user_data):
(a_name, a_is_dir) = store.get(a_iter,
self.COL_DISPLAY_NAME,
self.COL_IS_DIRECTORY)
(b_name, b_is_dir) = store.get(b_iter,
self.COL_DISPLAY_NAME,
self.COL_IS_DIRECTORY)
if a_name is None:
a_name = ''
if b_name is None:
b_name = ''
if (not a_is_dir) and b_is_dir:
return 1
elif a_is_dir and (not b_is_dir):
return -1
else:
if not config.file_expl_case_sensitive_sort:
a_name = a_name.lower()
b_name = b_name.lower()
if a_name > b_name:
return 1
elif a_name < b_name:
return -1
else:
return 0
def _create_store(self):
store = Gtk.ListStore(str, str, GdkPixbuf.Pixbuf, bool)
# set sort column and function
store.set_default_sort_func(self._sort_func)
store.set_sort_column_id(-1, Gtk.SortType.ASCENDING)
return store
def _file_to_icon_pixbuf(self, path):
pixbuf = None
# get the theme icon
f = Gio.file_new_for_path(path)
info = f.query_info(Gio.FILE_ATTRIBUTE_STANDARD_ICON,
Gio.FileQueryInfoFlags.NONE,
None)
gicon = info.get_icon()
# check to see if it is an image format we support
for f in GdkPixbuf.Pixbuf.get_formats():
for mime_type in f.get_mime_types():
content_type = Gio.content_type_from_mime_type(mime_type)
if content_type is not None:
break
format_gicon = Gio.content_type_get_icon(content_type)
if format_gicon.equal(gicon):
# gicon = f.icon_new()
# gicon = info.set_icon(format_gicon)
info.set_icon(format_gicon)
break
if gicon in self._pixbuf_lookup:
return self._pixbuf_lookup[gicon]
if isinstance(gicon, Gio.ThemedIcon):
names = gicon.get_names()
icon_theme = Gtk.IconTheme.get_default()
for name in names:
try:
pixbuf = icon_theme.load_icon(name, 64, 0)
break
except GLib.GError:
pass
self._pixbuf_lookup[gicon] = pixbuf
elif isinstance(gicon, Gio.FileIcon):
icon_file = gicon.get_file()
path = icon_file.get_path()
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(path, 72, 72)
self._pixbuf_lookup[gicon] = pixbuf
return pixbuf
def _fill_store(self):
self._store.clear()
for name in os.listdir(self._parent_dir):
if ((config.ignore_temp_files and name.startswith(config.temp_file_prefix)) or
(not config.file_expl_show_hidden_files and name.startswith('.'))): # FIXME: This is not portable.
continue
path = os.path.join(self._parent_dir, name)
is_dir = os.path.isdir(path)
pixbuf = self._file_to_icon_pixbuf(path)
self._store.append([path, name, pixbuf, is_dir])
@unique
class JobStatus(Enum):
"""Job statuses. It is used in Job and JobsQueue."""
RUNNING = 1,
QUEUED = 2,
FAILED = 3,
FINISHED = 4
# Icon displayed in JobsListWidget for each job status. Assumes Adwaita theme:
# https://gitlab.gnome.org/GNOME/adwaita-icon-theme/-/tree/master/Adwaita
job_status_pixbuf = {
JobStatus.QUEUED: 'document-open-recent-symbolic',
JobStatus.RUNNING: 'emblem-system-symbolic',
JobStatus.FAILED: 'computer-fail-symbolic',
JobStatus.FINISHED: 'emblem-ok-symbolic'
}
# Column names for the Gtk.ListStore model in JobsListWidget
# Used in classes: Job, JobsQueue and JobsListWidget
# Only fields with COLUMN are displayed
(JOB_LIST_ID,
JOB_LIST_COLUMN_FILENAME,
JOB_LIST_COLUMN_STATUS,
JOB_LIST_COLUMN_PROGRESS,
JOB_LIST_COLUMN_ESTTIME,
JOB_LIST_START_TIME,
JOB_LIST_END_TIME,
JOB_LIST_ERROR_STRING,
JOB_LIST_VOLUME_INC,
JOB_LIST_AUDIO_ENC,
JOB_LIST_KEEP_ORIGINAL,
JOB_LIST_OUTPUT_FILE,
JOB_LIST_NUM_COLUMNS
) = range(13)
class Job(GObject.GObject):
"""
There is a Job instance for every job showed in JobsListWidget.
It is responsible for executing the ffprobe and ffmpeg commands:
- ffprobe returns the duration of the video in seconds.
- ffmpeg increases the audio volume of the video. For every ffmpeg
output line, this class update the job state showed in JobsListWidget.
"""
@GObject.Signal(arg_types=[str, ])
def job_finished(self, path):
pass
@GObject.Signal(arg_types=[str, str, ])
def job_finished_with_error(self, path, error):
pass
def __init__(self,
id_: int,
file_name: str = '',
model: Gtk.ListStore = None):
super().__init__()
self.id_ = id_
self.file_name = file_name
self._output_file_name = ''
self._model = model
self._ffprobe = None
self._ffmpeg = None
self._row = self._model.append()
self._duration = 0
self._tempOutput = None
self._volume_increase = config.volume_increase
self._audio_encoder = config.audio_encoder
self._audio_quality = config.audio_encoder_quality
self._remove_subtitles = config.remove_subtitles
self._keep_original = config.keep_original
self._output_prefix = config.output_prefix
self._output_suffix = config.output_suffix
self._temp_file_prefix = config.temp_file_prefix
# Update the jobs list widget
self._model[self._row][JOB_LIST_COLUMN_FILENAME] = self.file_name
self._model[self._row][JOB_LIST_COLUMN_STATUS] = job_status_pixbuf[JobStatus.QUEUED]
self._model[self._row][JOB_LIST_COLUMN_PROGRESS] = 0
self._model[self._row][JOB_LIST_COLUMN_ESTTIME] = ''
# Update hidden model fields
self._model[self._row][JOB_LIST_ID] = self.id_
self._model[self._row][JOB_LIST_START_TIME] = 0
self._model[self._row][JOB_LIST_END_TIME] = 0
self._model[self._row][JOB_LIST_AUDIO_ENC] = self._audio_encoder
self._model[self._row][JOB_LIST_VOLUME_INC] = self._volume_increase
self._model[self._row][JOB_LIST_KEEP_ORIGINAL] = self._keep_original
self._model[self._row][JOB_LIST_OUTPUT_FILE] = self._output_file_name
self._model[self._row][JOB_LIST_ERROR_STRING] = ''
# def __del__(self):
# print(f'__del__ Job id: {self.id_} file: {self.file_name}')
def get_duration(self):
"""Launch ffprobe to get video duration. This method is the first step of the chain."""
self._model[self._row][JOB_LIST_COLUMN_STATUS] = job_status_pixbuf[JobStatus.RUNNING]
self._model[self._row][JOB_LIST_START_TIME] = time.time_ns()
self._ffprobe = FfprobeLauncher(self.file_name)
self._ffprobe.connect('finished', self._increase_volume)
self._ffprobe.connect('finished_with_error', self._manage_error)
GLib.idle_add(self._ffprobe.run)
def _increase_volume(self, _object, duration: float):
"""Launch ffmpeg to increase the volume."""
self._ffprobe = None
self._duration = duration
if self._duration == 0:
return
# Check output name doesn't exist if user wants to keep the original file
if self._keep_original:
directory, name = os.path.split(self.file_name)
name, ext = os.path.splitext(name)
self._output_file_name = directory + os.sep + self._output_prefix + name + self._output_suffix + ext
self._model[self._row][JOB_LIST_OUTPUT_FILE] = self._output_file_name
if os.path.exists(self._output_file_name):
self._manage_error(None, f'Output file "{self._output_file_name}" exists.', False)
return
# Choose temporary output file
suffix = os.path.splitext(self.file_name)[1]
directory = os.path.dirname(self.file_name)
try:
handle, self._tempOutput = tempfile.mkstemp(dir=directory,
suffix=suffix,
prefix=self._temp_file_prefix)
except Exception as e:
self._manage_error(None, 'Error creating temporal file:\n' + str(e), True)
return
else:
os.close(handle)
self._ffmpeg = FfmpegLauncher(self.file_name, self._tempOutput, self._volume_increase, self._audio_encoder,
self._audio_quality, self._remove_subtitles, self._duration)
self._ffmpeg.connect('update_state', self._update_conversion_state)
self._ffmpeg.connect('finished', self._conversion_finished)
self._ffmpeg.connect('finished_with_error', self._manage_error)
GLib.idle_add(self._ffmpeg.run)
def _update_conversion_state(self, _object, progress_percent: float):
if progress_percent == 0:
est_remaining = 0
else:
spent_time = time.time_ns() - self._model[self._row][JOB_LIST_START_TIME]
est_remaining = spent_time * (100 - progress_percent) / progress_percent
self._model[self._row][JOB_LIST_COLUMN_PROGRESS] = progress_percent
self._model[self._row][JOB_LIST_COLUMN_ESTTIME] = format_time_ns(est_remaining)
def _conversion_finished(self, _object):
self._ffmpeg = None
self._model[self._row][JOB_LIST_COLUMN_STATUS] = job_status_pixbuf[JobStatus.FINISHED]
self._model[self._row][JOB_LIST_COLUMN_PROGRESS] = 100
self._model[self._row][JOB_LIST_END_TIME] = time.time_ns()
spent_time = self._model[self._row][JOB_LIST_END_TIME] - self._model[self._row][JOB_LIST_START_TIME]
self._model[self._row][JOB_LIST_COLUMN_ESTTIME] = f'Total: {format_time_ns(spent_time)}'
if self._keep_original:
if os.path.exists(self._output_file_name):
self._manage_error(None, f'File "{self._output_file_name}" exists.\n'
f'Not renaming "{self._tempOutput}" to\n"{self._output_file_name}"',
False)
else:
try:
os.rename(self._tempOutput, self._output_file_name)
except Exception as e:
self._manage_error(None, f'Error renaming "{self._tempOutput}" to\n'
f'"{self._output_file_name}":\n\n{str(e)}', False)
finally:
self.emit('job_finished', self.file_name)
else:
# self._keep_original == False
try:
os.remove(self.file_name)
except Exception as e:
self._manage_error(None, f'Error removing "{self.file_name}"\n'
f'Preserving temporal output file:\n'
f'"{self._tempOutput}"\n\n'
f'{str(e)}',
False)
else:
try:
os.rename(self._tempOutput, self.file_name)
except Exception as e:
self._manage_error(None, f'Error renaming "{self._tempOutput}" to\n'
f'"{self.file_name}":\n\n'
f'{str(e)}',
False)
finally:
self.emit('job_finished', self.file_name)
def _manage_error(self, _object, error: str, remove_temp_output: bool):
self._ffprobe = None
self._ffmpeg = None
self._model[self._row][JOB_LIST_COLUMN_STATUS] = job_status_pixbuf[JobStatus.FAILED]
self._model[self._row][JOB_LIST_COLUMN_ESTTIME] = '--:--:--'
self._model[self._row][JOB_LIST_END_TIME] = time.time_ns()
self._model[self._row][JOB_LIST_ERROR_STRING] = error
if remove_temp_output and self._tempOutput is not None and os.path.exists(self._tempOutput):
try:
os.remove(self._tempOutput)
except OSError:
pass
self.emit('job_finished_with_error', self.file_name, error)
def kill(self):
if self._ffprobe is not None:
self._ffprobe.kill()
if self._ffmpeg is not None:
self._ffmpeg.kill()
class JobsQueue(GObject.GObject):
"""
Class instantiated in main. Controls the number of jobs running at
once based on config.max_jobs. If a job ends with error shows a
window with the error text.
"""
@GObject.Signal()
def job_finished(self):
pass
def __init__(self):
super().__init__()
self._model = None
self._job_id = 0
self._job_queue = []
self._running_jobs = []
def set_model(self, model: Gtk.ListStore):
self._model = model
def add_job(self, path: str):
if self._model is None:
return
if self._is_queued_or_running(path):
error_message(text='Duplicated entry',
secondary_text=f'Processing:\n{path}\n\n'
'There is already a queued or running entry with this path.')
else:
if len(self._running_jobs) >= config.max_jobs:
self._queue_job(path)
else:
self._launch_job(path)
def remove_jobs(self, job_id_list: list):
for row in self._model:
id_ = row[JOB_LIST_ID]
if id_ in job_id_list:
job_id_list.remove(id_)
status = row[JOB_LIST_COLUMN_STATUS]
if (status == job_status_pixbuf[JobStatus.FAILED] or
status == job_status_pixbuf[JobStatus.FINISHED]):
self._model.remove(row.iter)
elif status == job_status_pixbuf[JobStatus.QUEUED]:
# remove job from queue
[self._job_queue.remove(j) for j in self._job_queue if j.id_ == id_]
self._model.remove(row.iter)
elif status == job_status_pixbuf[JobStatus.RUNNING]:
pass
# self._kill_job(id_)
# self._model.remove(row.iter)
else:
raise ValueError('Unexpected status')
def force_launch_queued_jobs(self, job_id_list: list):
for job in [j for j in self._job_queue if j.id_ in job_id_list]:
self._job_queue.remove(job)
self._running_jobs.append(job)
job.get_duration()
def launch_again_failed_jobs(self, job_id_list: list):
[self.add_job(i[JOB_LIST_COLUMN_FILENAME]) for i in self._model if i[JOB_LIST_ID] in job_id_list]
# for i in self._model:
# if i[JOB_LIST_ID] in job_id_list:
# self.add_job(i[JOB_LIST_COLUMN_FILENAME])
# job_id_list.remove(i[JOB_LIST_ID])
# if not job_id_list:
# return
def kill_jobs(self, job_id_list: list):
[job.kill() for job in self._running_jobs if job.id_ in job_id_list]
# for job in [j for j in self._running_jobs if j.id_ in job_id_list]:
# job.kill()
def check_queue(self):
while len(self._job_queue) > 0 and len(self._running_jobs) < config.max_jobs:
self._dequeue_job()
def _is_queued_or_running(self, path: str) -> bool:
path_list = [job.file_name for job in self._running_jobs] + \
[job.file_name for job in self._job_queue]
if path in path_list:
return True
else:
return False
def _next_job_id(self) -> int:
id_ = self._job_id
self._job_id += 1
return id_
def _launch_job(self, path: str):
j = Job(id_=self._next_job_id(), file_name=path, model=self._model)
j.connect('job_finished', self._finished_job)
j.connect('job_finished_with_error', self._finished_with_error_job)
self._running_jobs.append(j)
j.get_duration()
def _queue_job(self, path: str):
j = Job(id_=self._next_job_id(), file_name=path, model=self._model)
j.connect('job_finished', self._finished_job)
j.connect('job_finished_with_error', self._finished_with_error_job)
self._job_queue.append(j)
def _dequeue_job(self):
j = self._job_queue.pop(0)
self._running_jobs.append(j)
j.get_duration()
def _finished_job(self, job, _path: str):
self._running_jobs.remove(job)
self.check_queue()
self.emit('job_finished')
def _finished_with_error_job(self, job, path: str, error: str):
self._finished_job(job, path)
error_message(text='Error processing file',
secondary_text=f'Processing:\n{path}\n\nError:\n{error}')
class JobsListWidget(Gtk.ScrolledWindow):
"""
The right panel is a jobs list made with Gtk.TreeView.
Modified from:
https://github.com/GNOME/pygobject/blob/master/examples/demo/demos/TreeView/liststore.py
Tooltip general idea:
https://athenajc.gitbooks.io/python-gtk-3-api/content/gtk-group/gtktooltip.html
Context menu idea:
https://docs.gtk.org/gtk3/treeview-tutorial.html#context-menus-on-right-click
How to make dynamic menus using Gio.Menu (Gtk.Menu is deprecated):
https://discourse.gnome.org/t/how-to-create-menus-for-apps-using-python/2413/22
"""
def __init__(self):
super().__init__()
self.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
self.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
self._model = Gtk.ListStore(int,
str,
str,
int,
str,
float,
float,
str,
float,
str,
bool,
str)
self._treeview = Gtk.TreeView(model=self._model, rubber_banding=True, has_tooltip=True)
self._treeview.set_search_column(JOB_LIST_COLUMN_FILENAME)
self._add_columns(self._treeview)
self.add(self._treeview)
self._tv_selection = self._treeview.get_selection()
self._tv_selection.set_mode(Gtk.SelectionMode.MULTIPLE)
self._remove_queued_action = Gio.SimpleAction(name="remove_queued", parameter_type=None, enabled=True)
self._remove_failed_action = Gio.SimpleAction(name="remove_failed", parameter_type=None, enabled=True)
self._remove_finished_action = Gio.SimpleAction(name="remove_finished", parameter_type=None, enabled=True)
self._launch_queued_action = Gio.SimpleAction(name="launch_queued", parameter_type=None, enabled=True)
self._launch_failed_action = Gio.SimpleAction(name="launch_failed", parameter_type=None, enabled=True)
self._stop_action = Gio.SimpleAction(name="stop", parameter_type=None, enabled=True)
self._remove_queued_action_handler = None
self._remove_failed_action_handler = None
self._remove_finished_action_handler = None
self._launch_queued_action_handler = None
self._launch_failed_action_handler = None
self._stop_action_handler = None
self._action_group = Gio.SimpleActionGroup()
self._action_group.add_action(self._remove_queued_action)
self._action_group.add_action(self._remove_failed_action)
self._action_group.add_action(self._remove_finished_action)
self._action_group.add_action(self._launch_queued_action)
self._action_group.add_action(self._launch_failed_action)
self._action_group.add_action(self._stop_action)
self._treeview.insert_action_group('app', self._action_group)
self._treeview.connect('button-press-event', self._on_button_press)
self._treeview.connect('query-tooltip', self._on_query_tooltip)
self._treeview.enable_model_drag_dest([], Gdk.DragAction.COPY)
self._treeview.drag_dest_add_uri_targets()
self._treeview.connect("drag-data-received", self._on_drag_data_received)
jq.set_model(self._model)
def _on_button_press(self, widget, event):
# single click with the right mouse button?
if event.type == Gdk.EventType.BUTTON_PRESS and event.button == 3:
self._view_popup_menu(widget, event)
return Gdk.EVENT_STOP
return Gdk.EVENT_PROPAGATE
def _view_popup_menu(self, _widget, event):
# path, column, cell_x, cell_y = self._treeview.get_path_at_pos(event.x, event.y)
returned_tuple = self._treeview.get_path_at_pos(event.x, event.y)
if returned_tuple is None:
return
path, *_ = returned_tuple
n_selected = self._tv_selection.count_selected_rows()
popover_menu = Gio.Menu()
# If one or no rows are selected or
# multiple rows are selected but the mouse is not over a selected row
if (n_selected <= 1 or
(n_selected > 1 and not self._tv_selection.path_is_selected(path))):
id_, file_name, status = self._model.get(self._model.get_iter(path),
JOB_LIST_ID,
JOB_LIST_COLUMN_FILENAME,
JOB_LIST_COLUMN_STATUS)
file_name = os.path.basename(file_name)
file_name = file_name.replace('_', '__') # Avoid use _ as menu accelerator mark
if status == job_status_pixbuf[JobStatus.QUEUED]:
popover_menu.append(f'Remove from queue {file_name}', 'app.remove_queued')
popover_menu.append(f'Launch now {file_name}', 'app.launch_queued')
elif status == job_status_pixbuf[JobStatus.FAILED]:
popover_menu.append(f'Remove from list {file_name}', 'app.remove_queued') # jq.remove_jobs works
popover_menu.append(f'Launch again {file_name}', 'app.launch_failed')
elif status == job_status_pixbuf[JobStatus.FINISHED]:
popover_menu.append(f'Remove from list {file_name}', 'app.remove_queued')
elif status == job_status_pixbuf[JobStatus.RUNNING]:
popover_menu.append(f'Stop processing {file_name}', 'app.stop')
else:
raise ValueError('Unexpected status')
if self._remove_queued_action_handler is not None:
self._remove_queued_action.disconnect(self._remove_queued_action_handler)
self._remove_queued_action_handler = self._remove_queued_action.connect("activate",
lambda a, p: jq.remove_jobs([id_]))
if self._launch_queued_action_handler is not None:
self._launch_queued_action.disconnect(self._launch_queued_action_handler)
self._launch_queued_action_handler = self._launch_queued_action.connect("activate",
lambda a, p: jq.force_launch_queued_jobs([id_]))
if self._launch_failed_action_handler is not None:
self._launch_failed_action.disconnect(self._launch_failed_action_handler)
self._launch_failed_action_handler = self._launch_failed_action.connect("activate",
lambda a, p: jq.launch_again_failed_jobs([id_]))
if self._stop_action_handler is not None:
self._stop_action.disconnect(self._stop_action_handler)
self._stop_action_handler = self._stop_action.connect("activate", lambda a, p: jq.kill_jobs([id_]))
else:
# Several rows are selected and the mouse is over a selected row.
queued_jobs = []
running_jobs = []
failed_jobs = []
finished_jobs = []
for row_path in self._tv_selection.get_selected_rows()[1]:
id_, status = self._model.get(self._model.get_iter(row_path), JOB_LIST_ID, JOB_LIST_COLUMN_STATUS)
if status == job_status_pixbuf[JobStatus.QUEUED]:
queued_jobs.append(id_)
elif status == job_status_pixbuf[JobStatus.RUNNING]:
running_jobs.append(id_)
elif status == job_status_pixbuf[JobStatus.FAILED]:
failed_jobs.append(id_)
elif status == job_status_pixbuf[JobStatus.FINISHED]:
finished_jobs.append(id_)
else:
raise ValueError('Unexpected status')
if len(queued_jobs) > 0:
section = Gio.Menu()
section.append('Remove queued jobs from queue', 'app.remove_queued')
section.append('Launch queued jobs now', 'app.launch_queued')
popover_menu.append_section(label='Queued jobs', section=section)
if len(failed_jobs) > 0:
section = Gio.Menu()
section.append('Remove failed jobs from list', 'app.remove_failed')
section.append('Launch failed jobs again', 'app.launch_failed')
popover_menu.append_section(label='Failed jobs', section=section)
if len(finished_jobs) > 0:
section = Gio.Menu()
section.append('Remove finished jobs from list', 'app.remove_finished')
popover_menu.append_section(label='Finished jobs', section=section)
if len(running_jobs) > 0:
section = Gio.Menu()
section.append('Stop processing running jobs', 'app.stop')
popover_menu.append_section(label='Running jobs', section=section)
if self._remove_queued_action_handler is not None:
self._remove_queued_action.disconnect(self._remove_queued_action_handler)
self._remove_queued_action_handler = self._remove_queued_action.connect("activate",
lambda a, p: jq.remove_jobs(queued_jobs))
if self._remove_failed_action_handler is not None:
self._remove_failed_action.disconnect(self._remove_failed_action_handler)
self._remove_failed_action_handler = self._remove_failed_action.connect("activate",
lambda a, p: jq.remove_jobs(failed_jobs))
if self._remove_finished_action_handler is not None:
self._remove_finished_action.disconnect(self._remove_finished_action_handler)
self._remove_finished_action_handler = self._remove_finished_action.connect("activate",
lambda a, p: jq.remove_jobs(finished_jobs))
if self._launch_queued_action_handler is not None:
self._launch_queued_action.disconnect(self._launch_queued_action_handler)
self._launch_queued_action_handler = self._launch_queued_action.connect("activate",
lambda a, p: jq.force_launch_queued_jobs(queued_jobs))
if self._launch_failed_action_handler is not None:
self._launch_failed_action.disconnect(self._launch_failed_action_handler)
self._launch_failed_action_handler = self._launch_failed_action.connect("activate",
lambda a, p: jq.launch_again_failed_jobs(failed_jobs))
if self._stop_action_handler is not None:
self._stop_action.disconnect(self._stop_action_handler)
self._stop_action_handler = self._stop_action.connect("activate", lambda a, p: jq.kill_jobs(running_jobs))
popover = Gtk.Popover.new_from_model(relative_to=self._treeview, model=popover_menu)
popover.set_position(Gtk.PositionType.BOTTOM)
rect = Gdk.Rectangle()
rect.x = event.x
rect.y = event.y + 20
rect.width = rect.height = 1
popover.set_pointing_to(rect)
popover.popup()
def _on_query_tooltip(self, widget, x, y, keyboard_mode, tooltip):
# success, cellx, celly, model, path, iter_ = widget.get_tooltip_context(x, y, keyboard_mode)
success, *_, iter_ = widget.get_tooltip_context(x, y, keyboard_mode)
if not success:
return False
n_selected = self._tv_selection.count_selected_rows()
# If one or no rows are selected or
# multiple rows are selected but the mouse is not over a selected row
if (n_selected <= 1 or
(n_selected > 1 and not self._tv_selection.iter_is_selected(iter_))):
(file_name, audio_enc, volume_inc,
keep_original, output_file, status,
start_time, end_time, est_time,
error_string) = self._model.get(iter_,
JOB_LIST_COLUMN_FILENAME, JOB_LIST_AUDIO_ENC, JOB_LIST_VOLUME_INC,
JOB_LIST_KEEP_ORIGINAL, JOB_LIST_OUTPUT_FILE, JOB_LIST_COLUMN_STATUS,
JOB_LIST_START_TIME, JOB_LIST_END_TIME, JOB_LIST_COLUMN_ESTTIME,
JOB_LIST_ERROR_STRING)
file_name = os.path.basename(file_name)
if not keep_original:
output_file_name_str = ''
else:
output_file_name = os.path.basename(output_file)
output_file_name_str = f'Output file:\t\t\t{output_file_name}\n'
if status == job_status_pixbuf[JobStatus.QUEUED]:
status_str = "Queued"
elif status == job_status_pixbuf[JobStatus.RUNNING]:
status_str = "Running"
elif status == job_status_pixbuf[JobStatus.FAILED]:
status_str = "Failed"
elif status == job_status_pixbuf[JobStatus.FINISHED]:
status_str = "Finished"
else:
raise ValueError('Unexpected status')
start_time_str = ''
end_time_str = ''
estimated_end_time_str = ''
elapsed_time_str = ''
if start_time != 0:
status_str += '\n'
temp = localtime_ns(start_time)
start_time_str = f'Start time:\t\t\t{format_localtime_ns(temp)}\n'
if end_time != 0:
temp = localtime_ns(end_time)
end_time_str = f'End time:\t\t\t{format_localtime_ns(temp)}\n'
elapsed_time = end_time - start_time
elapsed_time_str = f'Elapsed time:\t\t{format_time_ns(elapsed_time)}'
elif est_time != '': # and end_time == 0
h, m, s = est_time.split(':')
est_time_ns = time.time_ns() + int((int(h) * 3600 + int(m) * 60 + float(s)) * 10e8)
estimated_end_time_str = f'Estimated end time:\t{format_localtime_ns(localtime_ns(est_time_ns))}'
if error_string != '':
error_string = f'\nError:\n{error_string}'
if elapsed_time_str != '':
elapsed_time_str += '\n'
if estimated_end_time_str != '':
estimated_end_time_str += '\n'
tooltip.set_text(f'File:\t\t\t\t{file_name}\n'
f'Keep Original:\t\t{keep_original}\n'
f'{output_file_name_str}'
f'Audio encoder:\t\t{audio_enc}\n'
f'Volume increase:\t{volume_inc}\n'
f'Status:\t\t\t\t{status_str}'
f'{start_time_str}{end_time_str}{estimated_end_time_str}{elapsed_time_str}{error_string}')
return True
else:
# Several rows are selected and the mouse is over a selected row.
total_queued_jobs = 0
total_finished_jobs = 0
total_running_jobs = 0
total_time = 0
start_time = float('+Infinity')
end_time = 0
est_time = 0
for row_path in self._tv_selection.get_selected_rows()[1]:
status, est_time_str, start, end = self._model.get(self._model.get_iter(row_path),
JOB_LIST_COLUMN_STATUS,
JOB_LIST_COLUMN_ESTTIME,
JOB_LIST_START_TIME,
JOB_LIST_END_TIME)
if status == job_status_pixbuf[JobStatus.FINISHED]:
total_finished_jobs += 1
total_time += end - start
if start < start_time:
start_time = start
if end > end_time:
end_time = end
if status == job_status_pixbuf[JobStatus.RUNNING] and est_time_str != '':
total_running_jobs += 1
h, m, s = est_time_str.split(':')
t = int((int(h) * 3600 + int(m) * 60 + float(s)) * 10e8)
if t > est_time:
est_time = t
if status == job_status_pixbuf[JobStatus.QUEUED]:
total_queued_jobs += 1
if total_finished_jobs == 0:
start_time_str = '00:00:00'
end_time_str = '00:00:00'
elapsed_time_str = '00:00:00'
avg_elapsed_time_str = ''
accumulated_time_str = '00:00:00'
avg_time_str = '00:00:00'
est_end_time_queued_str = ''
else:
elapsed_time = end_time - start_time
start_time = localtime_ns(start_time)
end_time = localtime_ns(end_time)
avg_time = total_time // total_finished_jobs
avg_elapsed_time = elapsed_time // total_finished_jobs
elapsed_time_str = format_time_ns(elapsed_time)
start_time_str = format_localtime_ns(start_time)
end_time_str = format_localtime_ns(end_time)
accumulated_time_str = format_time_ns(total_time)
avg_time_str = format_time_ns(avg_time)
avg_elapsed_time_str = f'Average elapsed time:\t\t{format_time_ns(avg_elapsed_time)}\n'
if total_queued_jobs == 0 and total_running_jobs == 0:
est_end_time_queued_str = ''
else:
est_end_time_queued = (time.time_ns() + est_time +
(avg_time * math.ceil(total_queued_jobs / config.max_jobs)))
est_end_time_queued = localtime_ns(est_end_time_queued)
est_end_time_queued_str = f'\n\nEstimated end time:\t\t{format_localtime_ns(est_end_time_queued)}'
tooltip.set_text(f'Statistics of {total_finished_jobs} completed jobs:\n'
f'Start time:\t\t\t\t\t{start_time_str}\n'
f'End time:\t\t\t\t\t{end_time_str}\n'
f'Elapsed time:\t\t\t\t{elapsed_time_str}\n'
f'{avg_elapsed_time_str}'
f'Total accumulated time:\t\t{accumulated_time_str}\n'
f'Average completion time:\t{avg_time_str}'
f'{est_end_time_queued_str}')
return True
def _on_drag_data_received(self, _widget, _drag_context, _x, _y, data, _info, _time_str):
for uri in data.get_uris():
# continue if it isn't a local file
if not uri.startswith('file:///'): # FIXME: This is not portable.
continue
path = urllib.request.url2pathname(uri)
path = path[7:] # removes 'file://'
if os.path.isfile(path):
if config.ignore_temp_files and os.path.basename(path).startswith(config.temp_file_prefix):
continue
if path.lower().endswith(config.video_extensions):
jq.add_job(path)
else:
error_message(text='Not a video file', secondary_text=f'File:\n{path}')
elif os.path.isdir(path):
for root, dirs, files in os.walk(path, topdown=False, followlinks=True):
for name in files:
if config.ignore_temp_files and name.startswith(config.temp_file_prefix):
continue
if name.lower().endswith(config.video_extensions):
jq.add_job(os.path.join(root, name))
def add_job_from_path(self, _object, path: str):
if path.lower().endswith(config.video_extensions):
jq.add_job(path)
else:
error_message(text='Not a video file', secondary_text=f'File:\n{path}')
def _add_columns(self, treeview):
# column for file names
renderer = Gtk.CellRendererText(ellipsize=Pango.EllipsizeMode.START)
column = Gtk.TreeViewColumn("File", renderer,
text=JOB_LIST_COLUMN_FILENAME)
column.set_expand(True)
column.set_sort_column_id(JOB_LIST_COLUMN_FILENAME)
treeview.append_column(column)
# column for job status
renderer = Gtk.CellRendererPixbuf()
column = Gtk.TreeViewColumn("Status", renderer,
icon_name=JOB_LIST_COLUMN_STATUS)
column.set_sort_column_id(JOB_LIST_COLUMN_STATUS)
treeview.append_column(column)
# column for progress bar
renderer = Gtk.CellRendererProgress()
column = Gtk.TreeViewColumn("Progress", renderer, value=JOB_LIST_COLUMN_PROGRESS)
column.set_sort_column_id(JOB_LIST_COLUMN_PROGRESS)
treeview.append_column(column)
# column for estimated remaining time
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("Remaining time", renderer,
text=JOB_LIST_COLUMN_ESTTIME)
column.set_sort_column_id(JOB_LIST_COLUMN_ESTTIME)
treeview.append_column(column)
class ProcessLauncher(GObject.GObject):
"""
Process launcher with async IO.
Modified from:
https://gist.github.com/fthiery/da43365ceeefff8a9e3d0dd83ec24af9
This class is not used. Instead, the child classes FfprobeLauncher and
FfmpegLauncher are used.
To use the ProcessLauncher class it is necessary to create a derived class
and override at least the methods __init__, for_each_line, at_finalization
and at_finalization_with_error.
The super().__init__(command) call in the derived class must include the
command executed, like super().__init__('/bin/ls').
The for_each_line method is called for each line of the command output. The
end of the line character set is specified in the read_upto_async call of
the _queue_read method. In the current implementation the end of line
character list is '\r\n' which is the correct one for ffmpeg output on Linux
For a common Linux command the '\n' character should be sufficient.
The at_finalization and at_finalization_with_error methods are called when
the command is over without error and with error respectively
"""
def __init__(self, cmd: str = None):
super().__init__()
if cmd is None:
raise NotImplementedError()
else:
self._cmd = cmd
self._process = None
self._data_stream = None
self._cancellable = None
def run(self):
self._cancellable = Gio.Cancellable()
try:
flags = Gio.SubprocessFlags.STDOUT_PIPE | Gio.SubprocessFlags.STDERR_MERGE
args = shlex.split(self._cmd)
self._process = p = Gio.Subprocess.new(args, flags)
p.wait_check_async(
cancellable=self._cancellable,
callback=self._on_finished
)
# print('Started')
stream = p.get_stdout_pipe()
self._data_stream = Gio.DataInputStream.new(stream)
self._queue_read()
except GLib.GError as e:
traceback.print_exc()
self.at_finalization_with_error(f'{str(e)}\n\nCommand:\n{self._cmd}')
return
def _queue_read(self):
self._data_stream.read_upto_async(
stop_chars='\r\n', # FIXME: This is not portable.
stop_chars_len=2,
io_priority=GLib.PRIORITY_DEFAULT,
cancellable=self._cancellable,
callback=self._on_data
)
def _cancel_read(self):
# print('Cancelling read')
self._cancellable.cancel()
def _on_finished(self, proc, results):
# print('Process finished')
try:
proc.wait_check_finish(results)
except Exception as e:
traceback.print_exc()
self.at_finalization_with_error(f'{str(e)}\n\nCommand:\n{self._cmd}')
else:
self.at_finalization()
self._cancel_read()
def _on_data(self, source, result):
# FIXME: sometimes this method is executed even when the task is cancelled
if result.had_error():
return
try:
line, length = source.read_upto_finish(result)
if line:
# consume the stop character
source.read_byte(self._cancellable)
self.for_each_line(line)
except GLib.GError as e:
traceback.print_exc()
self.at_finalization_with_error(f'{str(e)}\n\nCommand:\n{self._cmd}')
return
# read_upto_finish() returns None on error without raise any exception
if line is not None:
self._queue_read()
def stop(self):
# print('Stop')
self._process.send_signal(signal.SIGTERM)
def kill(self):
# print('Kill')
self._cancel_read()
self._process.send_signal(signal.SIGKILL)
def for_each_line(self, line: str):
raise NotImplementedError()
def at_finalization(self):
raise NotImplementedError()
def at_finalization_with_error(self, error: str):
raise NotImplementedError()
class FfprobeLauncher(ProcessLauncher):
@GObject.Signal(arg_types=[float, ])
def finished(self, duration):
pass
@GObject.Signal(arg_types=[str, bool, ])
def finished_with_error(self, error, remove_temp_output):
pass
def __init__(self, file_name: str):
self._error = False
self._duration = 0
self._n_lines = 0
self._cmd = config.ffprobe_get_duration_cmd.format(video_file_name=file_name)
super().__init__(self._cmd)
def for_each_line(self, line: str):
if self._n_lines > 0:
self.at_finalization_with_error(f'Error executing ffprobe:\nExpected just one line, got more:\n{line}')
try:
self._duration = float(line)
except Exception as e:
self.at_finalization_with_error(f'Error executing ffprobe:\n'
f'Trying to read a float got error:\n{str(e)}\n'
f'in line:\n{line}')
finally:
self._n_lines += 1
def at_finalization(self):
self.emit('finished', self._duration)
def at_finalization_with_error(self, error):
if not self._error:
self._error = True
self.emit('finished_with_error', error, True)
class FfmpegLauncher(ProcessLauncher):
@GObject.Signal(arg_types=[float, ])
def update_state(self, progress):
pass
@GObject.Signal()
def finished(self):
pass
@GObject.Signal(arg_types=[str, bool, ])
def finished_with_error(self, error, remove_temp_output):
pass
def __init__(self, file_name: str, temp_output: str, volume_increase: float, audio_encoder: str,
audio_quality: float, remove_subtitles: bool, duration: float):
self._error = False
self._duration = duration
self._cmd = config.ffmpeg_increase_audio_cmd.format(video_file_name_input=file_name,
video_file_name_output=temp_output,
volume_increase=volume_increase,
audio_encoder=audio_encoder,
audio_quality=audio_quality,
remove_subtitles_param='-sn' if remove_subtitles else '')
super().__init__(self._cmd)
def for_each_line(self, line: str):
if line.startswith('frame='):
time_beg = line.find(' time=')
time_beg += 6
time_end = line.find(' ', time_beg)
time_str = line[time_beg:time_end]
h, m, s = time_str.split(':')
progress = int(h) * 3600 + int(m) * 60 + float(s)
progress_percent = progress * 100 / self._duration
self.emit('update_state', progress_percent)
def at_finalization(self):
self.emit('finished')
def at_finalization_with_error(self, error):
if not self._error:
self._error = True
self.emit('finished_with_error', error, True)
class Preferences(Gtk.Window):
"""Preferences window."""
def __init__(self):
super().__init__()
self._vol_increase_decimals = 1
self._separator_margin = 5
self.set_title('Preferences')
self.set_border_width(10)
self._grid = Gtk.Grid()
self._video_ext_label = Gtk.Label(label='Video extensions: ')
self._video_ext_entry = Gtk.Entry(text=','.join(config.video_extensions))
self._grid.add(self._video_ext_label)
self._grid.attach_next_to(self._video_ext_entry, self._video_ext_label, Gtk.PositionType.RIGHT, 1, 1)
self._sep1 = Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL)
self._sep1.set_margin_top(self._separator_margin)
self._sep1.set_margin_bottom(self._separator_margin)
self._grid.attach_next_to(self._sep1, self._video_ext_label, Gtk.PositionType.BOTTOM, 2, 1)
self._vol_increase_label = Gtk.Label(label='Volume increase: ')
self._vol_increase_spin = Gtk.SpinButton(climb_rate=1.0,
digits=self._vol_increase_decimals,
adjustment=Gtk.Adjustment(value=float(config.volume_increase),
lower=0.1,
upper=10.0,
step_increment=0.1,
page_increment=0.5,
page_size=0.0))
self._grid.attach_next_to(self._vol_increase_label, self._sep1, Gtk.PositionType.BOTTOM, 1, 1)
self._grid.attach_next_to(self._vol_increase_spin, self._vol_increase_label, Gtk.PositionType.RIGHT, 1, 1)
self._audio_encoder_label = Gtk.Label(label='Audio encoder: ')
self._audio_encoder_combo = Gtk.ComboBoxText()
for i, enc in enumerate(config.audio_encoders):
self._audio_encoder_combo.append_text(enc)
if enc == config.audio_encoder:
self._audio_encoder_combo.set_active(i)
self._grid.attach_next_to(self._audio_encoder_label, self._vol_increase_label, Gtk.PositionType.BOTTOM, 1, 1)
self._grid.attach_next_to(self._audio_encoder_combo, self._audio_encoder_label, Gtk.PositionType.RIGHT, 1, 1)
self._audio_quality_label = Gtk.Label(label='Audio quality: ')
self._audio_quality_label.set_margin_bottom(27) # FIXME. How to center the label?
self._audio_quality_scale = Gtk.Scale(orientation=Gtk.Orientation.HORIZONTAL,
adjustment=Gtk.Adjustment(value=config.audio_quality,
lower=0,
upper=config.n_qualities - 1,
step_increment=1,
page_increment=1,
page_size=0))
self._audio_quality_scale.set_draw_value(False)
self._audio_quality_scale.add_mark(value=0, position=Gtk.PositionType.BOTTOM, markup='Min')
for i in range(1, config.n_qualities - 1):
self._audio_quality_scale.add_mark(value=i, position=Gtk.PositionType.BOTTOM)
self._audio_quality_scale.add_mark(value=config.n_qualities - 1, position=Gtk.PositionType.BOTTOM, markup='Max')
self._grid.attach_next_to(self._audio_quality_label, self._audio_encoder_label, Gtk.PositionType.BOTTOM, 1, 1)
self._grid.attach_next_to(self._audio_quality_scale, self._audio_quality_label, Gtk.PositionType.RIGHT, 1, 1)
self._sep2 = Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL)
self._sep2.set_margin_top(self._separator_margin)
self._sep2.set_margin_bottom(self._separator_margin)
self._grid.attach_next_to(self._sep2, self._audio_quality_label, Gtk.PositionType.BOTTOM, 2, 1)
self._remove_subtitles_label = Gtk.Label(label='Remove subtitles: ')
self._remove_subtitles_toggle = Gtk.CheckButton(active=config.remove_subtitles)
self._grid.attach_next_to(self._remove_subtitles_label, self._sep2, Gtk.PositionType.BOTTOM, 1, 1)
self._grid.attach_next_to(self._remove_subtitles_toggle, self._remove_subtitles_label, Gtk.PositionType.RIGHT,
1, 1)
self._sep3 = Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL)
self._sep3.set_margin_top(self._separator_margin)
self._sep3.set_margin_bottom(self._separator_margin)
self._grid.attach_next_to(self._sep3, self._remove_subtitles_label, Gtk.PositionType.BOTTOM, 2, 1)
self._max_jobs_label = Gtk.Label(label='Number of jobs: ')
self._max_jobs_spin = Gtk.SpinButton(adjustment=Gtk.Adjustment(value=config.max_jobs,
lower=1,
upper=os.cpu_count(),
step_increment=1,
page_increment=1,
page_size=0))
self._grid.attach_next_to(self._max_jobs_label, self._sep3, Gtk.PositionType.BOTTOM, 1, 1)
self._grid.attach_next_to(self._max_jobs_spin, self._max_jobs_label, Gtk.PositionType.RIGHT, 1, 1)
self._use_all_cpus_label = Gtk.Label(label='Use all CPUs: ')
self._use_all_cpus_toggle = Gtk.CheckButton(active=False)
self._grid.attach_next_to(self._use_all_cpus_label, self._max_jobs_label, Gtk.PositionType.BOTTOM, 1, 1)
self._grid.attach_next_to(self._use_all_cpus_toggle, self._use_all_cpus_label, Gtk.PositionType.RIGHT, 1, 1)
self._sep4 = Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL)
self._sep4.set_margin_top(self._separator_margin)
self._sep4.set_margin_bottom(self._separator_margin)
self._grid.attach_next_to(self._sep4, self._use_all_cpus_label, Gtk.PositionType.BOTTOM, 2, 1)
self._keep_original_label = Gtk.Label(label='Keep Original: ')
self._keep_original_toggle = Gtk.CheckButton(active=config.keep_original)
self._grid.attach_next_to(self._keep_original_label, self._sep4, Gtk.PositionType.BOTTOM, 1, 1)
self._grid.attach_next_to(self._keep_original_toggle, self._keep_original_label, Gtk.PositionType.RIGHT, 1, 1)
self._output_prefix_label = Gtk.Label(label='Output prefix: ')
self._output_prefix_entry = Gtk.Entry(text=config.output_prefix)
self._grid.attach_next_to(self._output_prefix_label, self._keep_original_label, Gtk.PositionType.BOTTOM, 1, 1)
self._grid.attach_next_to(self._output_prefix_entry, self._output_prefix_label, Gtk.PositionType.RIGHT, 1, 1)
self._output_suffix_label = Gtk.Label(label='Output suffix: ')
self._output_suffix_entry = Gtk.Entry(text=config.output_suffix)
self._grid.attach_next_to(self._output_suffix_label, self._output_prefix_label, Gtk.PositionType.BOTTOM, 1, 1)
self._grid.attach_next_to(self._output_suffix_entry, self._output_suffix_label, Gtk.PositionType.RIGHT, 1, 1)
self._sep5 = Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL)
self._sep5.set_margin_top(self._separator_margin)
self._sep5.set_margin_bottom(self._separator_margin)
self._grid.attach_next_to(self._sep5, self._output_suffix_label, Gtk.PositionType.BOTTOM, 2, 1)
self._ignore_temp_files_label = Gtk.Label(label='Ignore temporal files: ')
self._ignore_temp_files_toggle = Gtk.CheckButton(active=config.ignore_temp_files)
self._grid.attach_next_to(self._ignore_temp_files_label, self._sep5, Gtk.PositionType.BOTTOM, 1, 1)
self._grid.attach_next_to(self._ignore_temp_files_toggle, self._ignore_temp_files_label, Gtk.PositionType.RIGHT, 1, 1)
self._temp_file_prefix_label = Gtk.Label(label='Temporal file prefix: ')
self._temp_file_prefix_entry = Gtk.Entry(text=config.temp_file_prefix)
self._grid.attach_next_to(self._temp_file_prefix_label, self._ignore_temp_files_label, Gtk.PositionType.BOTTOM, 1, 1)
self._grid.attach_next_to(self._temp_file_prefix_entry, self._temp_file_prefix_label, Gtk.PositionType.RIGHT, 1, 1)
self._show_milliseconds_label = Gtk.Label(label='Show milliseconds: ')
self._show_milliseconds_toggle = Gtk.CheckButton(active=config.show_milliseconds)
self._grid.attach_next_to(self._show_milliseconds_label, self._temp_file_prefix_label, Gtk.PositionType.BOTTOM, 1, 1)
self._grid.attach_next_to(self._show_milliseconds_toggle, self._show_milliseconds_label, Gtk.PositionType.RIGHT, 1, 1)
# Avoid selection of _video_ext_entry text
self._use_all_cpus_toggle.grab_focus()
# Kubuntu GTK theme doesn't show scale ticks
provider = Gtk.CssProvider()
provider.load_from_data(b"scale marks mark indicator {min-height: 5px; min-width: 5px;}")
self._audio_quality_scale.get_style_context().add_provider(provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
if config.use_all_cpus:
self._use_all_cpus_toggle.set_active(True)
self._max_jobs_spin.set_sensitive(False)
if not config.keep_original:
self._output_prefix_entry.set_sensitive(False)
self._output_suffix_entry.set_sensitive(False)
self._changed_value_id = self._audio_quality_scale.connect('change-value', self._on_audio_quality_change_value)
self._use_all_cpus_toggle.connect('toggled', self._on_max_jobs_toggled)
self._keep_original_toggle.connect('toggled', self._on_keep_original_toggled)
self.add(self._grid)
self.show_all()
self.connect('destroy', self._on_destroy)
def _on_audio_quality_change_value(self, _scale, scroll_type, value):
"""
Allows only discrete values for the Gtk.Scale widget.
https://stackoverflow.com/questions/39013193/is-there-an-official-way-to-create-discrete-valued-range-widget-in-gtk
"""
# find the closest valid value
# value = min(range(config.n_qualities), key=lambda v: abs(value-v))
value = round(value)
# emit a new signal with the new value
self._audio_quality_scale.handler_block(self._changed_value_id)
self._audio_quality_scale.emit('change-value', scroll_type, value)
self._audio_quality_scale.handler_unblock(self._changed_value_id)
return True # prevent the signal from escalating
def _on_max_jobs_toggled(self, toggle: Gtk.ToggleButton):
if toggle.get_active():
self._max_jobs_spin.set_sensitive(False)
self._max_jobs_spin.set_value(os.cpu_count())
else:
self._max_jobs_spin.set_sensitive(True)
def _on_keep_original_toggled(self, toggle: Gtk.ToggleButton):
if toggle.get_active():
self._output_prefix_entry.set_sensitive(True)
self._output_suffix_entry.set_sensitive(True)
else:
self._output_prefix_entry.set_sensitive(False)
self._output_suffix_entry.set_sensitive(False)
def _on_destroy(self, _w):
if config.max_jobs != int(self._max_jobs_spin.get_value()):
config.max_jobs = int(self._max_jobs_spin.get_value())
jq.check_queue()
config.video_extensions = tuple(self._video_ext_entry.get_text().split(','))
config.remove_subtitles = self._remove_subtitles_toggle.get_active()
config.volume_increase = round(self._vol_increase_spin.get_value(), self._vol_increase_decimals)
config.audio_encoder = self._audio_encoder_combo.get_active_text()
config.audio_quality = int(self._audio_quality_scale.get_value())
config.use_all_cpus = self._use_all_cpus_toggle.get_active()
config.keep_original = self._keep_original_toggle.get_active()
config.output_prefix = self._output_prefix_entry.get_text()
config.output_suffix = self._output_suffix_entry.get_text()
config.ignore_temp_files = self._ignore_temp_files_toggle.get_active()
config.temp_file_prefix = self._temp_file_prefix_entry.get_text()
config.show_milliseconds = self._show_milliseconds_toggle.get_active()
self.destroy()
# This XML is here to avoid another file. This is a "one file application" with no installation instructions.
MENU_XML = """
<?xml version="1.0" encoding="UTF-8"?>
<interface>
<menu id="app-menu">
<section>
<item>
<attribute name="action">app.preferences</attribute>
<attribute name="label" translatable="yes">_Preferences</attribute>
<attribute name="accel"><Primary>p</attribute>
</item>
</section>
<section>
<item>
<attribute name="action">win.file_expl_show_hidden_files</attribute>
<attribute name="label" translatable="yes">Show hidden files</attribute>
</item>
<item>
<attribute name="action">win.file_expl_case_sensitive_sort</attribute>
<attribute name="label" translatable="yes">Case sensitive sort</attribute>
</item>
<item>
<attribute name="action">win.file_expl_single_click</attribute>
<attribute name="label" translatable="yes">Single click</attribute>
</item>
</section>
<section>
<item>
<attribute name="action">app.about</attribute>
<attribute name="label" translatable="yes">_About</attribute>
</item>
<item>
<attribute name="action">app.quit</attribute>
<attribute name="label" translatable="yes">_Quit</attribute>
<attribute name="accel"><Primary>q</attribute>
</item>
</section>
</menu>
</interface>
"""
class AppWindow(Gtk.ApplicationWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Window size
if config.win_maximized:
self.maximize()
else:
self.unmaximize()
self.set_default_size(config.win_width, config.win_height)
# https://wiki.gnome.org/HowDoI/SaveWindowState
self.connect('size-allocate', self._on_size_allocate_change)
self.connect('window-state-event', self._on_state_event)
# Menu actions
file_exp_hidden_files_action = Gio.SimpleAction.new_stateful(
"file_expl_show_hidden_files", None, GLib.Variant.new_boolean(config.file_expl_show_hidden_files)
)
file_exp_hidden_files_action.connect("change-state", self._on_hidden_files_toggle)
self.add_action(file_exp_hidden_files_action)
file_exp_case_sort_action = Gio.SimpleAction.new_stateful(
"file_expl_case_sensitive_sort", None, GLib.Variant.new_boolean(config.file_expl_case_sensitive_sort)
)
file_exp_case_sort_action.connect("change-state", self._on_case_sort_toggle)
self.add_action(file_exp_case_sort_action)
file_exp_single_click_action = Gio.SimpleAction.new_stateful(
"file_expl_single_click", None, GLib.Variant.new_boolean(config.file_expl_activate_on_single_click)
)
file_exp_single_click_action.connect("change-state", self._on_single_click_toggle)
self.add_action(file_exp_single_click_action)
# Build main window
menubutton = Gtk.MenuButton(direction=Gtk.ArrowType.NONE)
builder = Gtk.Builder.new_from_string(MENU_XML, -1)
menubutton.set_menu_model(builder.get_object("app-menu"))
headerbar = Gtk.HeaderBar(title='Increase video audio volume with ffmpeg')
headerbar.set_show_close_button(True)
headerbar.pack_start(menubutton)
self.set_titlebar(headerbar)
self._places = Gtk.PlacesSidebar()
self.file_exp = FileExplorer()
self._jobsListWidget = JobsListWidget()
self._places.connect('open-location', self.file_exp.open_location_from_place_sidebar)
self.file_exp.connect('video_selected', self._jobsListWidget.add_job_from_path)
jq.connect('job_finished', self.file_exp.refresh_clicked)
self._paned_file_exp = Gtk.Paned(orientation=Gtk.Orientation.HORIZONTAL)
self._paned_file_exp.add1(self.file_exp)
self._paned_file_exp.add2(self._jobsListWidget)
# FIXME: if the window is maximized this does not work
self._paned_file_exp.set_position(config.paned_file_expl_position)
self._paned_file_exp.connect('notify::position', self._on_paned_file_exp_position)
self._paned_places = Gtk.Paned(orientation=Gtk.Orientation.HORIZONTAL)
self._paned_places.add1(self._places)
self._paned_places.add2(self._paned_file_exp)
self.add(self._paned_places)
def _on_hidden_files_toggle(self, action: Gio.SimpleAction, value: bool):
action.set_state(value)
config.file_expl_show_hidden_files = value
self.file_exp.refresh_clicked(None)
def _on_case_sort_toggle(self, action: Gio.SimpleAction, value: bool):
action.set_state(value)
config.file_expl_case_sensitive_sort = value
self.file_exp.refresh_clicked(None)
def _on_single_click_toggle(self, action: Gio.SimpleAction, value: bool):
action.set_state(value)
config.file_expl_activate_on_single_click = value
self.file_exp.set_single_click(value)
def _on_paned_file_exp_position(self, paned: Gtk.Paned, _pspec):
config.paned_file_expl_position = paned.get_position()
def _on_size_allocate_change(self, w: Gtk.Window, _allocation: Gdk.Rectangle):
if not w.is_maximized():
width, height = w.get_size()
config.win_width = width
config.win_height = height
def _on_state_event(self, _w: Gtk.Window, event: Gdk.EventWindowState):
is_maximized = (event.new_window_state & Gdk.WindowState.MAXIMIZED != 0)
config.win_maximized = is_maximized
class Application(Gtk.Application):
def __init__(self):
super().__init__(application_id="org.example.myapp")
self.window = None
def do_startup(self):
Gtk.Application.do_startup(self)
preferences_action = Gio.SimpleAction(name="preferences", parameter_type=None, enabled=True)
preferences_action.connect("activate", lambda action, param: Preferences())
self.add_action(preferences_action)
about_action = Gio.SimpleAction(name="about", parameter_type=None, enabled=True)
about_action.connect("activate", self._on_about)
self.add_action(about_action)
quit_action = Gio.SimpleAction(name="quit", parameter_type=None, enabled=True)
quit_action.connect("activate", lambda action, param: self.quit())
self.add_action(quit_action)
def do_activate(self):
if not self.window:
self.window = AppWindow(application=self)
self.window.show_all()
self.window.present()
def do_shutdown(self):
config.cwd = self.window.file_exp.cwd
config.save()
Gtk.Application.do_shutdown(self)
def _on_about(self, _action, _param):
about_dialog = Gtk.AboutDialog(transient_for=self.window, modal=True)
about_dialog.set_title('About')
about_dialog.set_program_name('increasevol')
about_dialog.set_comments('Increase video audio volume with ffmpeg')
about_dialog.set_website('https://github.com/eduardoposadas/increasevol')
about_dialog.set_website_label('Source Code at GitHub')
about_dialog.set_logo_icon_name(None)
try:
gpl = ""
with open('/usr/share/common-licenses/GPL-3', encoding="utf-8") as h:
s = h.readlines()
for line in s:
gpl += line
about_dialog.set_license(gpl)
except Exception as e:
print(e)
about_dialog.connect('response', lambda w, res: w.destroy())
about_dialog.present()
def error_message(text: str, secondary_text: str, modal: bool = False):
dialog = Gtk.MessageDialog(
message_type=Gtk.MessageType.ERROR,
buttons=Gtk.ButtonsType.CLOSE,
title='Error',
text=text,
secondary_text=secondary_text
)
dialog.connect('response', lambda *d: dialog.destroy())
dialog.get_message_area().foreach(lambda label: label.set_selectable(True)) # Allow user copy dialog's text
if modal:
dialog.run()
else:
dialog.show_all()
def localtime_ns(ns_: int) -> struct_time_ns:
"""time.localtime() with nanoseconds"""
s, ns = divmod(int(ns_), int(10e8))
st = time.localtime(s)
return struct_time_ns(st.tm_year, st.tm_mon, st.tm_mday, st.tm_hour, st.tm_min, st.tm_sec,
st.tm_wday, st.tm_yday, st.tm_isdst, ns)
def format_localtime_ns(st: struct_time_ns) -> str:
if config.show_milliseconds:
return f'{st.tm_hour:02d}:{st.tm_min:02d}:{st.tm_sec:02d}.{str(st.tm_ns)[0:3]}'
else:
return f'{st.tm_hour:02d}:{st.tm_min:02d}:{st.tm_sec:02d}'
def format_time_ns(ns_: int) -> str:
s, ns = divmod(int(ns_), int(10e8))
m, s = divmod(s, 60)
h, m = divmod(m, 60)
if config.show_milliseconds:
return f'{h:02d}:{m:02d}:{s:02d}.{str(ns)[0:3]}'
else:
return f'{h:02d}:{m:02d}:{s:02d}'
def check_prerequisites():
"""Check commands in config.required_cmd tuple are in PATH."""
error = False
missing_commands = ''
for cmd in config.required_cmd:
if shutil.which(cmd) is None:
error = True
missing_commands += f'\n{cmd}'
if error:
error_message(text='Does not meet the prerequisites. Install:',
secondary_text=missing_commands,
modal=True)
raise SystemExit(missing_commands)
if __name__ == '__main__':
config = Configuration()
check_prerequisites()
jq = JobsQueue()
app = Application()
app.run(sys.argv)
| eduardoposadas/increasevol | increasevol.py | increasevol.py | py | 89,937 | python | en | code | 0 | github-code | 90 |
17422992642 | import os
from datasets import load_dataset
one_sentence_tasks = ['cola', 'sst2']
two_sentence_tasks = ['mrpc', 'stsb', 'qqp', 'mnli', 'mnli-mm', 'qnli', 'rte', 'wnli']
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
folder_to_split = {'train': 'train', 'dev': 'validation', 'test': 'test'}
dataset_root = './data/glue'
os.makedirs(dataset_root, exist_ok=True)
if __name__ == '__main__':
for task in one_sentence_tasks + two_sentence_tasks:
dataset = load_dataset('glue', task if task != 'mnli-mm' else 'mnli')
for folder in ['train', 'dev', 'test']:
if folder == 'dev':
split = 'validation'
if task == 'mnli':
split = 'validation_matched'
elif task == 'mnli-mm':
split = 'validation_mismatched'
elif folder == 'test':
split = 'test'
if task == 'mnli':
split = 'test_matched'
elif task == 'mnli-mm':
split = 'test_mismatched'
else:
split = folder
data_path = os.path.join(dataset_root, task, folder)
os.makedirs(data_path, exist_ok=True)
data_list = dataset[split]
lines = []
sentence1_key, sentence2_key = task_to_keys[task if task != 'mnli-mm' else 'mnli']
for item in data_list:
if sentence2_key is None:
line = item[sentence1_key] + '\t' + str(item['label']) + '\n'
else:
line = item[sentence1_key] + '\t' + item[sentence2_key] + '\t' + str(item['label']) + '\n'
lines.append(line)
file_name = os.path.join(data_path, 'data')
with open(file_name, 'w') as f:
f.writelines(lines)
| AI-secure/adversarial-glue | training_script/ERNIE/download_glue_data.py | download_glue_data.py | py | 2,144 | python | en | code | 4 | github-code | 90 |
30722164184 | # imporrt packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.formula.api as smf
import statsmodels.api as sm
from statsmodels.formula.api import ols
from statsmodels.compat import lzip
import pylab
import scipy.stats as st
from sklearn.model_selection import train_test_split
# print dataset
toyo = pd.read_csv("C:\\Users\\ACER\\Desktop\\scrap\\Multi linear regression\\ToyotaCorolla1.csv",encoding='latin1')
print(toyo.head())
toyota1 = toyo.iloc[:,[2,3,6,8,12,13,15,16,17]]
# rename column name
toyota1 = toyota1.rename(columns={"Age_08_04":"Age","Quarterly_Tax":"QuarterlyTax"})
startup.corr()
# Getting coefficients of variables
to1 = smf.ols('Price~Age+KM+HP+cc+Doors+Gears+QuarterlyTax+Weight',data=toyota1).fit()
to1.params
to1.summary()
to2_v=smf.ols('Price~Age',data = toyota1).fit()
to2_v.summary()
to3_w=smf.ols('Price~KM',data = toyota1).fit()
to3_w.summary()
to4_wv=smf.ols('Price~HP',data = toyota1).fit()
to4_wv.summary()
to5_pq=smf.ols('Price~QuarterlyTax',data = toyota1).fit()
to5_wv.summary()
to6_pw=smf.ols('Price~Weight',data = toyota1).fit()
to6_wv.summary()
toyo2 = smf.ols('Price~Age+KM+HP+cc+Doors+Gears+QuarterlyTax+Weight',data=toyota2).fit()
toyo2.params
toyo2.summary()
print(toyo2.conf_int(0.01))
final_t= smf.ols('Price~Age+KM+HP+cc+Doors+Gears+QuarterlyTax+Weight',data = toyota2).fit()
final_t.params
final_t.summary()
# drop row
toyota2=toyota1.drop(toyota1.index[[80,221,960,601]],axis=0)
toyota2.head()
# Predicted values of profit
Profit1_pred = toyo2.predict(toyota2[['Age','KM','HP','cc','Doors','Gears','QuarterlyTax','Weight']])
Profit1_pred
Profit2_pred = final_t.predict(toyota2)
# calculating VIF's values of independent variables
rsq_Age = smf.ols('Age~KM+HP+cc+Weight',data=toyota2).fit().rsquared
vif_Age = 1/(1-rsq_Age)
rsq_QuarterlyTax = smf.ols('QuarterlyTax~Age+HP+KM+Weight',data=toyota2).fit().rsquared
vif_QuarterlyTax = 1/(1-rsq_QuarterlyTax)
rsq_Weight = smf.ols('Weight~KM+HP+Doors+Gears',data=toyota2).fit().rsquared
vif_Weight = 1/(1-rsq_Weight)
rsq_KM = smf.ols('KM~Age+HP+Gears+Weight',data=toyota2).fit().rsquared
vif_KM = 1/(1-rsq_KM)
t1 = {'Variables':['Age','QuarterlyTax','Weight','KM'],'VIF':[vif_Age,vif_QuarterlyTax,vif_Weight,vif_KM]}
Vif_frame = pd.DataFrame(t1)
Vif_frame
# calculating VIF's values of independent variables
model_train = smf.ols("Price~Age+KM+HP+cc+Doors+Gears+QuarterlyTax+Weight",data=startup_train).fit()
train_pred = model_train.predict(startup_train)
train_resid = train_pred - startup_train.Price
train_rmse = np.sqrt(np.mean(train_resid*train_resid))
test_pred = model_train.predict(startup_test)
test_resid = test_pred - startup_test.Price
test_rmse = np.sqrt(np.mean(test_resid*test_resid))
# visualization
sm.graphics.influence_plot(to1)
sm.graphics.plot_partregress_grid(toyo2)
sm.graphics.plot_partregress_grid(final_t)
plt.scatter(toyota2.Price,Profit1_pred,c="r");plt.xlabel("observed_values");plt.ylabel("fitted_values")
plt.scatter(Profit2_pred,final_t.resid_pearson,c="r"),plt.axhline(y=0,color='blue');plt.xlabel("fitted_values");plt.ylabel("residuals")
plt.hist(final_t.resid_pearson)
st.probplot(final_t.resid_pearson, dist="norm", plot=pylab)
startup_train,startup_test = train_test_split(toyota2,test_size = 0.2)
| Pushpendra7767/Multilinear-regression | ToyotaCorolla final.py | ToyotaCorolla final.py | py | 3,433 | python | en | code | 0 | github-code | 90 |
3391129414 | import time
from src import utils
async def notice(ctx, userId, title='Example', content='Testing', penaltyType=0, adminOpNote=None, noticeType=0):
if not adminOpNote: adminOpNote = {}
data = {
"uid": userId,
"title": title,
"content": content,
"attachedObject": {
"objectId": userId,
"objectType": 0
},
"penaltyType": penaltyType,
"adminOpNote": adminOpNote,
"noticeType": noticeType,
"timestamp": int(time.time() * 1000)
}
response = await ctx.client.request("POST", "notice", json=data)
return
@utils.isStaff
@utils.userTracker("notice")
async def giveNotice(ctx):
msg = ctx.msg.content.split(" ")
num = -1
if len(msg) > 1:
try : num = int(msg[1])
except ValueError : num = 0
content = 'testing'
if len(msg) > 2: content = " ".join(msg[2:])
await notice(ctx, userId=ctx.msg.author.uid, title='Example', content=content, penaltyType=0, noticeType=num)
return
| leafylemontree/nati_amino-bot | src/admin/notices.py | notices.py | py | 1,065 | python | en | code | 5 | github-code | 90 |
6034619242 | from django.shortcuts import render, redirect
from django.http import JsonResponse
from django.contrib import messages
from management.Constant import *
import datetime
from .stockForm import AddStock
from .models import Stock
def addStock(request):
now = datetime.datetime.now()
today_date = now.strftime("%Y-%m-%d")
if request.method == "POST":
stockData = Stock.objects.filter(product_id=request.POST['product']).count()
if stockData:
check_stock = Stock.objects.get(product_id=request.POST['product'])
current_stock = int(check_stock.current_stock) + int(request.POST['new_stock'])
Stock.objects.filter(product_id = request.POST['product']).update(current_stock=current_stock,modified_at=today_date)
messages.success(request, success_update)
else:
current_stock = int(request.POST['new_stock'])
stock = Stock(product_id = request.POST['product'],current_stock=current_stock,new_stock=request.POST['new_stock'],
created_at=today_date, modified_at=today_date)
stock.save()
messages.success(request, success_saved)
return redirect('/dashboard/stock/list')
else:
form = AddStock()
return render(request, 'add_stock.html', {'form': form})
def check_stock(request):
product_id = request.GET.get('product_id')
stock = list(Stock.objects.filter(product_id=product_id).values('current_stock'))
data = dict()
if stock:
data['stock'] = stock[0]['current_stock']
else:
data['stock'] = 0
return JsonResponse(data)
def stockList(request):
stockList = Stock.objects.all()
return render(request, 'stocklist.html',{'stockList': stockList, 'flag': '1'}) | multisanjeev/Django-management-system | default/stockView.py | stockView.py | py | 1,760 | python | en | code | 0 | github-code | 90 |
22834495144 | import queue
import itertools
import threading
import Process
class MainMemory:
mainMemory = 512 # MB
usedMemory = 0 # MB
frameSize = 32 / 1000 # MB aka 32 KB
tableSize = 35
q = queue.Queue()
def __init__(self):
self.frameTable = [None] * self.tableSize
self.processes = []
def getMemoryLeft(self):
return self.mainMemory - self.usedMemory
def useMemory(self, used, process):
lock = threading.Lock()
with lock:
if (self.usedMemory + used > self.mainMemory):
raise ValueError('Used memory cannot exceed main memory!')
else:
self.usedMemory = self.usedMemory + used
self.processes.append(process)
print(f'Main memory remaining: {self.getMemoryLeft()} MB')
def relieveMemory(self, used):
lock = threading.Lock()
with lock:
if (self.usedMemory - used < 0):
raise ValueError('Cannot relieve more memory than was available!')
else:
self.usedMemory = self.usedMemory - used
print(f'Main memory remaining: {self.getMemoryLeft()} MB')
def addToQueue(self, process):
self.q.put(process)
def printQueue(self):
print(self.q)
class VirtualMemory:
virtualMemory = 512 # MB aka 2^29 logical address space
usedMemory = 0 # MB
pageSize = 32 / 1000 # MB or 32 KB
pageNumIter = itertools.count()
def __init__(self):
self.logicalMemory = []
self.disk = []
def getMemoryLeft(self):
return self.virtualMemory - self.usedMemory
def useMemory(self, used):
lock = threading.Lock()
with lock:
if (self.usedMemory + used > self.virtualMemory):
raise ValueError('Used memory cannot exceed virtual memory!')
else:
self.usedMemory = self.usedMemory + used
print(f'Virtual memory remaining: {self.getMemoryLeft()} MB')
def addPages(self, process): #Upon Process creation, pages for each Process operation are added to logical memory in order to move to ready state
if (len(self.logicalMemory) < (self.virtualMemory / self.pageSize)):
opList = process.operationsList
count = 0
for op in opList:
self.logicalMemory.append((process.id, next(self.pageNumIter))) # references page number
self.useMemory(self.pageSize)
#print(self.logicalMemory)
self.toPageTable(process)
def toPageTable(self, process):
count = 0
for page in self.logicalMemory:
if (page[0] == process.id):
pageNum = page[1]
offset = count # for now offset def aults to 0
#count += 1
process.pageTable.append((pageNum, offset))
#print(process.pageTable)
def toPhysical(self, process, frameTable):
lock = threading.Lock()
with lock:
for page in process.pageTable:
index = page[0] + page[1]
if (len(frameTable) > index and frameTable[index] == None and frameTable.count(None) > 0):
frameTable[index] = f'Page {page[0]}'
elif (len(frameTable) > index and frameTable.count(None) > 0):
emptyIndex = frameTable.index(None)
pageIndex = process.pageTable.index(page[0])
frameTable[emptyIndex] = f'Page {page[0]}'
process.pageTable[pageIndex] = (page[0], emptyIndex-page[1])
else: # Basic page replacement algorithm
newIndex = 0 # Victim selection (first frame) - essentially a FIFO queue because processes arrive at same time in order
#print(process.pageTable)
count = 0
pageIndex = -1
for element in page:
if (element == page[0]):
index = count
count += 1
victim = frameTable[newIndex]
self.disk.append(victim)
frameTable[newIndex] = f'Page {page[0]}'
process.pageTable[pageIndex] = (page[0], page[1]*-1)
#print (frameTable)
| untermanlm/CMSC312 | src/Memory.py | Memory.py | py | 4,335 | python | en | code | 0 | github-code | 90 |
36040391020 | # SM3
import math
from typing import ByteString
IV = [0x7380166F, 0x4914B2B9, 0x172442D7, 0xDA8A0600, 0xA96F30BC, 0x163138AA, 0xE38DEE4D, 0xB0FB0E4E]
T = [0x79cc4519, 0x7a879d8a]
def AsToByte(string):
BString = ''
for i in string:
BString += hex(ord(i))[2:]
return BString
def FF(X, Y, Z, j):
if j >= 0 and j <= 15:
return X ^ Y ^ Z
else:
return ((X & Y) | (X & Z) | (Y & Z))
def RoundS(X, i):
i = i % 32
return ((X << i) & 0xFFFFFFFF) | ((X & 0xFFFFFFFF) >> (32 - i))
def GG(X, Y, Z, j):
if j >= 0 and j <= 15:
return X ^ Y ^ Z
else:
return ((X & Y) | (~X & Z))
def P0(X):
return X ^ RoundS(X, 9) ^ RoundS(X, 17)
def P1(X):
return X ^ RoundS(X, 15) ^ RoundS(X, 23)
def T_(j):
if j >= 0 and j <= 15:
return T[0]
else:
return T[1]
def Pad(message):
m = bin(int(message, 16))[2:]
if len(m) != len(message) * 4:
m = '0' * (len(message) * 4 - len(m)) + m
l = len(m)
Pad_len = '0' * (64 - len(bin(l)[2:])) + bin(l)[2:]
m = m + '1'
if len(m) % 512 > 448:
m = m + '0' * (512 - len(m) % 512 + 448) + Pad_len
else:
m = m + '0' * (448 - len(m) % 512) + Pad_len
m = hex(int(m, 2))[2:]
return m
def Group(m):
n = len(m) / 128
M = []
for i in range(int(n)):
M.append(m[0 + 128 * i:128 + 128 * i])
return M
def Ex_msg(M, n):
W = []
_W = []
for j in range(16):
W.append(int(M[n][0 + 8 * j:8 + 8 * j], 16))
for j in range(16, 68):
W.append(P1(W[j - 16] ^ W[j - 9] ^ RoundS(W[j - 3], 15)) ^ RoundS(W[j - 13], 7) ^ W[j - 6])
for j in range(64):
_W.append(W[j] ^ W[j + 4])
return W, _W
def CF(V, M, i):
A, B, C, D, E, F, G, H = V[i]
W, _W = Ex_msg(M, i)
for j in range(64):
SS1 = RoundS((RoundS(A, 12) + E + RoundS(T_(j), j % 32)) % (2 ** 32), 7)
SS2 = SS1 ^ RoundS(A, 12)
TT1 = (FF(A, B, C, j) + D + SS2 + _W[j]) % (2 ** 32)
TT2 = (GG(E, F, G, j) + H + SS1 + W[j]) % (2 ** 32)
D = C
C = RoundS(B, 9)
B = A
A = TT1
H = G
G = RoundS(F, 19)
F = E
E = P0(TT2)
a, b, c, d, e, f, g, h = V[i]
V_ = [a ^ A, b ^ B, c ^ C, d ^ D, e ^ E, f ^ F, g ^ G, h ^ H]
return V_
def Round_iter(M):
n = len(M)
V = []
V.append(IV)
for i in range(n):
V.append(CF(V, M, i))
return V[n]
def SM3(message):
m = Pad(message)
M = Group(m)
Vn = Round_iter(M)
res = ''
for x in Vn:
res += hex(x)[2:]
return res
def KDF(z,klen):
klen = int(klen)
ct = 0x00000001
cnt = math.ceil(klen/32)
Ha = ""
for i in range(cnt):
msg = z+hex(ct)[2:].rjust(8,'0')
#print(msg)
Ha = Ha + SM3(msg)
#print(Ha)
ct += 1
return Ha[0:klen*2]
if __name__ == "__main__":
content = AsToByte('abc')
res = SM3(content)
print(res) | sdu-lzq/Innovation-practice-homework | SM3/SM3.py | SM3.py | py | 3,131 | python | en | code | 1 | github-code | 90 |
1994026885 | from django.core.cache import cache
from gpsDatingApp.redis.RedisInterface import RedisInterface
from gpsDatingApp.dao.ReservationDaoSingleton import ReservationDaoSingleton
from gpsDatingApp.otherConfig.ListInitConfig import ListInitConfig
from gpsDatingApp.otherConfig.LifeCycleConfig import LifeCycleConfig
import threading
lock = threading.Lock()
class ReservationCounterRedisSingleton(RedisInterface):
__instance: RedisInterface = None
__isFirstInit: bool = False
prefix: str = "rcrs"
def __new__(cls):
if not cls.__instance:
with lock:
if not cls.__instance:
ReservationCounterRedisSingleton.__instance = super().__new__(cls)
return cls.__instance
def __init__(self):
if not self.__isFirstInit:
ReservationCounterRedisSingleton.__isFirstInit = True
def set(self, reservationCounterInfo: dict) -> None:
cache.set(self.prefix, reservationCounterInfo, timeout = LifeCycleConfig.INFO_TYPE_REDIS_DATA_LIFE_CYCLE)
def count(self, city: str) -> None:
# 避免死結
if self.has() == False:
self.get()
with lock:
reservationCounterInfo: dict = self.get()
reservationCounterInfo[city] = reservationCounterInfo[city] + 1
self.set(reservationCounterInfo)
def get(self) -> dict:
if self.has() == False:
with lock:
if self.has() == False:
value: dict = dict(ListInitConfig().REZ_CNTR_INFO_INIT_FORMAT)
unitList: list = ReservationDaoSingleton().getAll()
for unit in unitList:
value[unit.reserveCity] = value[unit.reserveCity] + 1
self.set(value)
reservationCounterInfo: dict = cache.get(self.prefix)
return reservationCounterInfo
def has(self) -> bool:
return cache.has_key(self.prefix)
def delete(self) -> None:
pass
def ttl(self) -> None:
pass
| GitHub-WeiChiang/main | GpsDatingApp/Back-End/DjangoEnv/project/gpsDatingApp/redis/ReservationCounterRedisSingleton.py | ReservationCounterRedisSingleton.py | py | 2,054 | python | en | code | 7 | github-code | 90 |
42683200763 | class BikeRental:
"""
A class which packs all the functions including renting and issuing bills.
"""
def _init_(self, name, no_of_bikes) -> None:
self.name = name
self.no_of_bikes = no_of_bikes
self.users = {}
def rent_bike_hourly(self, name:str, bikes:int):
"""
Allots bikes to a person on hourly basis.
Takes name of the person and number of bikes to rent as inputs.
"""
if bikes > self.no_of_bikes:
print(f"Bike shortage! You can have max {self.no_of_bikes} bikes.")
else:
self.users.update({name:{"hourly":bikes}})
print("You rented",bikes,"on the hourly basis")
self.no_of_bikes -= bikes
def rent_bike_daily(self, name:str, bikes:int):
"""
Allots bikes to a person on daily basis.
Takes name of the person and number of bikes to rent as inputs.
"""
if bikes > self.no_of_bikes:
print(f"Bike shortage! You can have max {self.no_of_bikes} bikes.")
else:
self.users.update({name:{"daily":bikes}})
print("You rented",bikes,"on the daily basis")
self.no_of_bikes -= bikes
def rent_bike_weekly(self, name:str, bikes:int):
"""
Allots bikes to a person on weekly basis.
Takes name of the person and number of bikes to 2rent as inputs.
"""
if bikes > self.no_of_bikes:
print(f"Bike shortage! You can have max {self.no_of_bikes} bikes.")
else:
self.users.update({name:{"weekly":bikes}})
print("You rented",self.no_of_bikes,"on the weekly 1basis")
self.no_of_bikes -= bikes
def issue_bill(self, name:str):
"""
Issues bill to the customer on basis of the customer name.
"""
if name in self.users.keys():
for i in self.users[name].keys():
if i == 'hourly':
print(f"{name.capitalize()}, you rented {self.users.get(name).get('hourly')} bikes on hourly basis.")
if self.users[name]['hourly'] > 0 and self.users[name]['hourly'] < 6:
print("You also got the family discount of 30%!")
bill = 0.7 * (5 * self.users[name]['hourly'])
else:
bill = 5 * self.users[name]['hourly']
print(f"Therefore, total amount payable: {bill}$")
self.no_of_bikes += self.users[name]['hourly']
check = input("Press any key to continue. ")
print("Bike returned successfully!")
self.users.pop(name)
elif i == 'daily':
print(f"{name.capitalize()}, you rented {self.users.get(name).get('daily')} bikes on daily basis.")
if self.users[name]['daily'] > 3 and self.users[name]['daily'] < 6:
print("You also got the family discount of 30%!")
bill = 0.7 * (20 * self.users[name]['daily'])
else:
bill = 20 * self.users[name]['daily']
print(f"Therefore, total amount payable: {bill}$")
self.no_of_bikes += self.users[name]['daily']
print("Bike returned successfully!")
self.users.pop(name)
elif i == 'weekly':
print(f"{name.capitalize()}, you rented {self.users.get(name).get('weekly')} bikes on weekly basis.")
if self.users[name]['weekly'] > 3 and self.users[name]['weekly'] < 6:
print("You also got the family discount of 30%!")
bill = 0.7 * (60 * self.users[name]['weekly'])
else:
bill = 60 * self.users[name]['weekly']
print(f"Therefore, total amount payable: {bill}$")
self.no_of_bikes += self.users[name]['weekly']
print("Bike returned successfully!")
self.users.pop(name)
else:
print("ERROR")
else:
print("Put proper name!")
if _name_ == "_main_":
Nagpur_Bikes = BikeRental("Nagpur Bike Rental Service", 100)
while 1:
task = int(input("What to do:\n 1. See available bikes\n 2. Rent a bike\n 3. Return a bike\n "))
if task == 1:
print(Nagpur_Bikes.no_of_bikes)
elif task == 2:
type = int(input("On what basis would you like to rent a bike:\n 1. Hourly basis - 5$ per hour\n 2. Daily basis - 20$ per day\n 3. Weekly basis - 60$ per week\n"))
name = str(input("Enter your name: "))
if name in Nagpur_Bikes.users.keys():
print("This name not available! Try with a slight adaption.")
continue
bikes = int(input("How many bikes do you want to rent? "))
if type == 1:
Nagpur_Bikes.rent_bike_hourly(name, bikes)
elif type == 2:
Nagpur_Bikes.rent_bike_daily(name, bikes)
elif type == 3:
Nagpur_Bikes.rent_bike_weekly(name, bikes)
else:
print("Invalid input!")
elif task == 3:
if len(Nagpur_Bikes.users) != 0:
for i in Nagpur_Bikes.users.keys():
print(f"Customer name: {i}")
name = input("Who are you from the above list? ")
Nagpur_Bikes.issue_bill(name)
else:
print("No one has rented a bike yet!")
else:
print("Invalid input!")
print("\n----------------------------------------------------\n") | attardesujal/Bike-Rental-system-project | bike rental system.py | bike rental system.py | py | 5,988 | python | en | code | 0 | github-code | 90 |
1506811319 |
# coding: utf-8
# # Self-Driving Car Engineer Nanodegree
#
# ## Deep Learning
#
# ## Project: Build a Traffic Sign Recognition Classifier
#
# In this notebook, a template is provided for you to implement your functionality in stages, which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission if necessary.
#
# > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \n",
# "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
#
# In addition to implementing code, there is a writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) that can be used to guide the writing process. Completing the code template and writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/481/view) for this project.
#
# The [rubric](https://review.udacity.com/#!/rubrics/481/view) contains "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the "stand out suggestions", you can include the code in this Ipython notebook and also discuss the results in the writeup file.
#
#
# >**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
# ---
# ## Step 0: Load The Data
# In[1]:
# Load pickled data
import pickle
import os
# TODO: Fill this in based on where you saved the training and testing data
training_file = "train.p"
validation_file="valid.p"
testing_file = "test.p"
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
# Checking the dimensions of the training/valid/test data
# In[2]:
print(X_train.shape)
print(X_valid.shape)
print(X_test.shape)
# ---
#
# ## Step 1: Dataset Summary & Exploration
#
# The pickled data is a dictionary with 4 key/value pairs:
#
# - `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).
# - `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id.
# - `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image.
# - `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES**
#
# Complete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the [pandas shape method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shape.html) might be useful for calculating some of the summary results.
# ### Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas
# In[3]:
### Replace each question mark with the appropriate value.
### Use python, pandas or numpy methods rather than hard coding the results
# TODO: Number of training examples
n_train = X_train.shape[0]
# TODO: Number of validation examples
n_validation = X_valid.shape[0]
# TODO: Number of testing examples.
n_test = X_test.shape[0]
# TODO: What's the shape of an traffic sign image?
image_shape = X_train.shape[1:3]
# TODO: How many unique classes/labels there are in the dataset.
# n_classes = distinct(y_train)
n_classes = len(set(y_train)|set(y_valid)|set(y_test))
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
# ### Include an exploratory visualization of the dataset
# Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc.
#
# The [Matplotlib](http://matplotlib.org/) [examples](http://matplotlib.org/examples/index.html) and [gallery](http://matplotlib.org/gallery.html) pages are a great resource for doing visualizations in Python.
#
# **NOTE:** It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. It can be interesting to look at the distribution of classes in the training, validation and test set. Is the distribution the same? Are there more examples of some classes than others?
# In[4]:
### Data exploration visualization code goes here.
### Feel free to use as many code cells as needed.
import matplotlib.pyplot as plt
import random
# Visualizations will be shown in the notebook.
get_ipython().magic('matplotlib inline')
# In[5]:
index = random.randint(0, len(X_train))
image = X_train[index].squeeze()
plt.figure(figsize=(1,1))
plt.imshow(image)
print(y_train[index])
# In[6]:
# look at the distribution of classes in the training, validation and test
import pandas as pd
df = pd.DataFrame({"y_train":y_train})
df['y_train'].value_counts()
# It can be seen that the distribution of data is skewed. Some of the class (i.e., class 2) has more than 10 times samples than others (i.e., class 0). Data generation might be needed.
# ----
#
# ## Step 2: Design and Test a Model Architecture
#
# Design and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset).
#
# The LeNet-5 implementation shown in the [classroom](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play!
#
# With the LeNet-5 solution from the lecture, you should expect a validation set accuracy of about 0.89. To meet specifications, the validation set accuracy will need to be at least 0.93. It is possible to get an even higher accuracy, but 0.93 is the minimum for a successful project submission.
#
# There are various aspects to consider when thinking about this problem:
#
# - Neural network architecture (is the network over or underfitting?)
# - Play around preprocessing techniques (normalization, rgb to grayscale, etc)
# - Number of examples per label (some have more than others).
# - Generate fake data.
#
# Here is an example of a [published baseline model on this problem](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these.
# ### Pre-process the Data Set (normalization, grayscale, etc.)
# Minimally, the image data should be normalized so that the data has mean zero and equal variance. For image data, `(pixel - 128)/ 128` is a quick way to approximately normalize the data and can be used in this project.
#
# Other pre-processing steps are optional. You can try different techniques to see if it improves performance.
#
# Use the code cell (or multiple code cells, if necessary) to implement the first step of your project.
# In[7]:
import cv2
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.image as mpimgb
import numpy as np
# #### Data generation
#
# To address the imbalanced dataset, we use the following data generationt techniques:
# 1. For class with less than 500 counts, generate three rotated images with rotation angles between -10 and 10 degrees.
# 2. For class with more than 500 but less than 1000 counts, generate on rotated copy with rotation angles between -10 and 10 degrees.
# 3. For all data, generate one additional translated copy with translation in -4 and 4 pixels (exclude 0). Dataset is divided into three batches for faster processing.
# In[8]:
from scipy.ndimage import rotate
from sklearn.utils import shuffle
def generate_additional_data_rotate(x, y):
for i in range(43):
indexes = y == i
count = np.sum(indexes)
x_train_sample = x[indexes]
y_train_sample = y[indexes]
if count < 500:
# three random angles between -10 to 10 degress
three_random_angles = 20.0*np.random.random_sample(3) - 10.0
x1 = rotate(x_train_sample, three_random_angles[0], axes=(1,2), reshape=False)
x2 = rotate(x_train_sample, three_random_angles[1], axes=(1,2), reshape=False)
x3 = rotate(x_train_sample, three_random_angles[2], axes=(1,2), reshape=False)
X_additional = np.concatenate((x1, x2, x3))
x = np.concatenate((x, X_additional))
y = np.concatenate((y, y_train_sample, y_train_sample, y_train_sample))
elif count < 1000:
# one random angles between -10 to 10 degress
one_random_angle = 20.0*np.random.random_sample(1) - 10.0
x1 = rotate(x_train_sample, one_random_angle[0], axes=(1,2), reshape=False)
x = np.concatenate((x, x1))
y = np.concatenate((y, y_train_sample))
return x, y
X_train, y_train = generate_additional_data_rotate(X_train, y_train)
# Shuffle the data
X_train, y_train = shuffle(X_train, y_train)
def generate_additional_data_translate(x, y):
num_total = y.shape[0]
first_break = int(num_total/3)
second_break = 2*first_break
x_sample_1 = x[0:first_break]
x_sample_2 = x[first_break:second_break]
x_sample_3 = x[second_break:]
y1 = y[0:first_break]
y2 = y[first_break:second_break]
y3 = y[second_break:]
random_integer = 0
while(random_integer == 0):
random_integer = np.random.randint(-4,4)
x1 = np.roll(x_sample_1, random_integer, axis=2)
random_integer = 0
while(random_integer == 0):
random_integer = np.random.randint(-4,4)
x2 = np.roll(x_sample_2, random_integer, axis=2)
random_integer = 0
while(random_integer == 0):
random_integer = np.random.randint(-4,4)
x3 = np.roll(x_sample_3, random_integer, axis=2)
x = np.concatenate((x, x1, x2, x3))
y = np.concatenate((y, y1, y2, y3))
return x, y
X_train, y_train = generate_additional_data_translate(X_train, y_train)
# Shuffle the data
X_train, y_train = shuffle(X_train, y_train)
# Plotting number of samples v/s sign id
counts = []
for i in range(43):
counts.append(np.sum(y_train == i))
count_plot = plt.bar(range(43),counts)
axes = plt.gca()
axes.set_ylim([0,5000])
# #### Convert to grey scale and normalize
#
# In this step:
# 1. First convert the data into grey scale by taking the average of three channels.
# 2. Normalize the data to centered at zero with small variance.
# In[9]:
def normalize(x):
X_grey = np.average(x,axis=3)
x_normalized = np.array([(X_grey[i,:,:] - 128.0)/128.0 for i in range(len(X_grey))]).reshape(X_grey.shape+(1,))
return x_normalized
# In[10]:
### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include
### converting to grayscale, etc.
### Feel free to use as many code cells as needed.
# import numpy as np
X_train = X_train.astype(np.float32)
X_test = X_test.astype(np.float32)
X_train_normalized = normalize(X_train)
X_valid_normalized = normalize(X_valid)
X_test_normalized = normalize(X_test)
# In[11]:
print(X_train_normalized.shape)
print(X_valid_normalized.shape)
print(X_test_normalized.shape)
# ### Model Architecture
# The following model architecture is used:
# 1. LeNet architecture with two conv layers, two pooling layers, and three fully connected layers.
# 2. A dropout probablity = 0.7 is used.
# 3. 10 epochs with batch_size = 128.
# 4. Use tanh as activation function.
# In[12]:
### Define your architecture here.
### Feel free to use as many code cells as needed.
import tensorflow as tf
EPOCHS = 15
BATCH_SIZE = 128
# In[13]:
from tensorflow.contrib.layers import flatten
def conv2d(x, W, b, strides=1, padding = 'SAME'):
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding=padding)
x = tf.nn.bias_add(x, b)
return tf.nn.tanh(x)
def maxpool2d(x, k=2):
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
def LeNet(x):
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0
sigma = 0.1
dropout = 0.7
# Store layers weight & bias
weights = {
'wc1': tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6),mean=mu,stddev=sigma)),
'wc2': tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16),mean=mu,stddev=sigma)),
'wd1': tf.Variable(tf.truncated_normal(shape=(5*5*16, 120),mean=mu,stddev=sigma)),
'wd2': tf.Variable(tf.truncated_normal(shape=(120,84),mean=mu,stddev=sigma)),
'out': tf.Variable(tf.truncated_normal(shape=(84,n_classes),mean=mu,stddev=sigma))}
biases = {
'bc1': tf.Variable(tf.zeros([6])),
'bc2': tf.Variable(tf.zeros([16])),
'bd1': tf.Variable(tf.zeros([120])),
'bd2': tf.Variable(tf.zeros([84])),
'out': tf.Variable(tf.zeros([n_classes]))}
# TODO: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
conv1 = conv2d(x,weights['wc1'],biases['bc1'],1,'VALID')
# TODO: Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = maxpool2d(conv1,k=2)
# TODO: Layer 2: Convolutional. Output = 10x10x16.
conv2 = conv2d(conv1,weights['wc2'],biases['bc2'],1,'VALID')
# TODO: Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = maxpool2d(conv2,k=2)
# TODO: Flatten. Input = 5x5x16. Output = 400.
fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
# TODO: Layer 3: Fully Connected. Input = 400. Output = 120.
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.tanh(fc1)
fc1 = tf.nn.dropout(fc1, dropout)
# TODO: Layer 4: Fully Connected. Input = 120. Output = 84.
fc2 = tf.add(tf.matmul(fc1, weights['wd2']), biases['bd2'])
fc2 = tf.nn.tanh(fc2)
fc2 = tf.nn.dropout(fc2, dropout)
# TODO: Layer 5: Fully Connected. Input = 84. Output = 43.
logits = tf.add(tf.matmul(fc2, weights['out']), biases['out'])
return logits
# ### Train, Validate and Test the Model
# A validation set can be used to assess how well the model is performing. A low accuracy on the training and validation
# sets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting.
# The following paratmeters are used after trial and error:
# 1. rate = 0.002
# 2. epoch = 15
# In[14]:
x = tf.placeholder(tf.float32, (None, 32, 32, 1))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, n_classes)
# In[15]:
### Train your model here.
rate = 0.002
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
# In[16]:
### Calculate and report the accuracy on the training and validation set.
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
# return both accuracy and loss
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
total_loss = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
loss = sess.run(loss_operation,feed_dict={x: batch_x, y:batch_y})
total_accuracy += (accuracy * len(batch_x))
total_loss += (loss * len(batch_x))
return total_loss/num_examples, total_accuracy / num_examples
# In[17]:
from sklearn.utils import shuffle
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
training_loss_history = []
validation_loss_history = []
training_accuracy_history = []
validation_accuracy_history = []
print("Training...")
print()
for i in range(EPOCHS):
X_train_normalized, y_train = shuffle(X_train_normalized, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train_normalized[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
# Log accuracy and loss every epoch
training_loss, training_accuracy = evaluate(X_train_normalized,y_train)
training_loss_history.append(training_loss)
training_accuracy_history.append(training_accuracy)
validation_loss, validation_accuracy = evaluate(X_valid_normalized, y_valid)
validation_loss_history.append(validation_loss)
validation_accuracy_history.append(validation_accuracy)
print("EPOCH {} ...".format(i+1))
print("Training loss = {:.3f}".format(training_loss))
print("Training accuracy = {:.3f}".format(training_accuracy))
print("Validation loss = {:.3f}".format(validation_loss))
print("Validation accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './lenet')
print("Model saved")
# The loss plot as a function of epoch is depicted as below:
# In[20]:
loss_plot = plt.subplot(2,1,1)
loss_plot.set_title('Loss')
loss_plot.plot(training_loss_history, 'r', label='Training Loss')
loss_plot.plot(validation_loss_history, 'b', label='Validation Loss')
loss_plot.set_xlim([0, EPOCHS])
loss_plot.legend(loc=1)
# In[40]:
### Once a final model architecture is selected,
### the accuracy on the test set should be calculated and reported as well.
### Feel free to use as many code cells as needed.
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
test_loss, test_accuracy = evaluate(X_test_normalized, y_test)
print("Test Accuracy = {:.3f}".format(test_accuracy))
# In[22]:
# save the data for later use
import pickle
pickle.dump((X_train_normalized, y_train,X_valid_normalized,y_valid,X_test_normalized,y_test),open("save.p","wb"))
# ---
#
# ## Step 3: Test a Model on New Images
#
# To give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type.
#
# You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name.
# ### Load and Output the Images
# Download a dataset of 10 images and put it under directory 'traffic_test'. Randomly choose five images and plot them.
# In[46]:
### Load the images and plot them here.
### Feel free to use as many code cells as needed.
import matplotlib.image as mpimg
plt.rcParams['figure.figsize'] = (8, 10)
directory = 'test_images/'
image_names = ['test0.jpg','test1.jpg','test2.jpg', 'test3.jpg', 'test4.jpg']
test_images = np.zeros((5,32,32,3), dtype=np.uint8)
for i in range(5):
image = mpimg.imread(directory + image_names[i])
test_images[i] = image
plt.subplot(5, 1, i+1)
plt.imshow(image)
plt.axis('off')
# ### Predict the Sign Type for Each Image
# In[48]:
### Run the predictions here and use the model to output the prediction for each image.
### Make sure to pre-process the images with the same pre-processing pipeline used earlier.
### Feel free to use as many code cells as needed.
### Run the predictions here and use the model to output the prediction for each image.
### Make sure to pre-process the images with the same pre-processing pipeline used earlier.
### Feel free to use as many code cells as needed.
test_images = test_images.astype(np.float32)
test_images = normalize(test_images)
labels = ['Speed limit (20km/h)', 'Speed limit (30km/h)', 'Speed limit (50km/h)', 'Speed limit (60km/h)',
'Speed limit (70km/h)', 'Speed limit (80km/h)', 'End of speed limit (80km/h)', 'Speed limit (100km/h)',
'Speed limit (120km/h)', 'No passing', 'No passing for vechiles over 3.5 metric tons',
'Right-of-way at the next intersection', 'Priority road', 'Yield', 'Stop', 'No vechiles',
'Vechiles over 3.5 metric tons prohibited', 'No entry', 'General caution', 'Dangerous curve to the left',
'Dangerous curve to the right', 'Double curve', 'Bumpy road', 'Slippery road', 'Road narrows on the right',
'Road work', 'Traffic signals', 'Pedestrians', 'Children crossing', 'Bicycles crossing',
'Beware of ice/snow', 'Wild animals crossing', 'End of all speed and passing limits', 'Turn right ahead',
'Turn left ahead', 'Ahead only', 'Go straight or right', 'Go straight or left', 'Keep right', 'Keep left',
'Roundabout mandatory', 'End of no passing', 'End of no passing by vechiles over 3.5 metric tons']
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
predicted_logits = sess.run(logits, feed_dict={x:test_images})
predicted_labels = np.argmax(predicted_logits, axis=1)
for i in range(5):
image = mpimg.imread(directory + image_names[i])
plt.subplot(5, 1, i+1)
plt.imshow(image)
plt.axis('off')
plt.title("Predicted Label: " + labels[predicted_labels[i]], fontsize=7)
# 1. The 40km/h speed limit is misclassified as 50km/h.
# 2. The 50km/h speed limit is misclassified as 80km/h.
# 3. The 80km/h speed limit is correctly classified as 80km/h.
# 4. The stop sign is correctly classified.
# 5. The slippery road is misclassified as dangerours curve to the right.
# ### Analyze Performance
# In[51]:
### Calculate the accuracy for these 5 new images.
### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images.
correct_labeled = 2
all_signs = 5
accuracy = correct_labeled/all_signs
print('the accuracy running model against these 5 new images are: ', accuracy)
# ### Output Top 5 Softmax Probabilities For Each Image Found on the Web
# For each of the new images, print out the model's softmax probabilities to show the **certainty** of the model's predictions (limit the output to the top 5 probabilities for each image). [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.html#top_k) could prove helpful here.
#
# The example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image.
#
# `tf.nn.top_k` will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids.
#
# Take this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. `tk.nn.top_k` is used to choose the three classes with the highest probability:
#
# ```
# # (5, 6) array
# a = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497,
# 0.12789202],
# [ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401,
# 0.15899337],
# [ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 ,
# 0.23892179],
# [ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 ,
# 0.16505091],
# [ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137,
# 0.09155967]])
# ```
#
# Running it through `sess.run(tf.nn.top_k(tf.constant(a), k=3))` produces:
#
# ```
# TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202],
# [ 0.28086119, 0.27569815, 0.18063401],
# [ 0.26076848, 0.23892179, 0.23664738],
# [ 0.29198961, 0.26234032, 0.16505091],
# [ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5],
# [0, 1, 4],
# [0, 5, 1],
# [1, 3, 5],
# [1, 4, 3]], dtype=int32))
# ```
#
# Looking just at the first row we get `[ 0.34763842, 0.24879643, 0.12789202]`, you can confirm these are the 3 largest probabilities in `a`. You'll also notice `[3, 0, 5]` are the corresponding indices.
# Calculate the top 5 softmax probability for each new images. Also bar chart is depicted for better illustration.
# In[50]:
### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web.
### Feel free to use as many code cells as needed.
### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web.
### Feel free to use as many code cells as needed.
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
return np.exp(x) / np.sum(np.exp(x), axis=1, keepdims=True)
softmax_probabilites = softmax(predicted_logits)
with tf.Session() as sess:
top5_prob = sess.run(tf.nn.top_k(tf.constant(softmax_probabilites),k=5))
print(top5_prob)
fig = plt.figure()
width = 0.5
x_vals = range(43)
for i in range(5):
ax = fig.add_subplot(5,1,i+1)
ax.bar(x_vals, softmax_probabilites[i], width)
plt.show()
# ### Project Writeup
#
# Once you have completed the code implementation, document your results in a project writeup using this [template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) as a guide. The writeup can be in a markdown or pdf file.
# > **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n",
# "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
# ---
#
# ## Step 4 (Optional): Visualize the Neural Network's State with Test Images
#
# This Section is not required to complete but acts as an additional excersise for understaning the output of a neural network's weights. While neural networks can be a great learning device they are often referred to as a black box. We can understand what the weights of a neural network look like better by plotting their feature maps. After successfully training your neural network you can see what it's feature maps look like by plotting the output of the network's weight layers in response to a test stimuli image. From these plotted feature maps, it's possible to see what characteristics of an image the network finds interesting. For a sign, maybe the inner network feature maps react with high activation to the sign's boundary outline or to the contrast in the sign's painted symbol.
#
# Provided for you below is the function code that allows you to get the visualization output of any tensorflow weight layer you want. The inputs to the function should be a stimuli image, one used during training or a new one you provided, and then the tensorflow variable name that represents the layer's state during the training process, for instance if you wanted to see what the [LeNet lab's](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) feature maps looked like for it's second convolutional layer you could enter conv2 as the tf_activation variable.
#
# For an example of what feature map outputs look like, check out NVIDIA's results in their paper [End-to-End Deep Learning for Self-Driving Cars](https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/) in the section Visualization of internal CNN State. NVIDIA was able to show that their network's inner weights had high activations to road boundary lines by comparing feature maps from an image with a clear path to one without. Try experimenting with a similar test to show that your trained network's weights are looking for interesting features, whether it's looking at differences in feature maps from images with or without a sign, or even what feature maps look like in a trained network vs a completely untrained one on the same sign image.
#
# <figure>
# <img src="visualize_cnn.png" width="380" alt="Combined Image" />
# <figcaption>
# <p></p>
# <p style="text-align: center;"> Your output should look something like this (above)</p>
# </figcaption>
# </figure>
# <p></p>
#
# In[ ]:
### Visualize your network's feature maps here.
### Feel free to use as many code cells as needed.
# image_input: the test image being fed into the network to produce the feature maps
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry
def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):
# Here make sure to preprocess your image_input in a way your network expects
# with size, normalization, ect if needed
# image_input =
# Note: x should be the same name as your network's tensorflow data placeholder variable
# If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function
activation = tf_activation.eval(session=sess,feed_dict={x : image_input})
featuremaps = activation.shape[3]
plt.figure(plt_num, figsize=(15,15))
for featuremap in range(featuremaps):
plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column
plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
if activation_min != -1 & activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray")
elif activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
elif activation_min !=-1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
else:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray")
| taoyang1/CarND-Term1-Project2-TrafficSignClassifier | Traffic_Sign_Classifier.py | Traffic_Sign_Classifier.py | py | 32,295 | python | en | code | 0 | github-code | 90 |
12989856415 | #Animal population game
#V0.7
#Credits: Atkin, Sir.burton, Leo
import time, random, sys
option = 0
#Item stuff
items = {'bunnyPopulation':10,
'foxPopulation':3,
'plants':20,
'thirst':40,
'hunger':40,
'sleep':0,
'day':0
}
#Creating a function to make sure any inputs are numbers.
#I take a lot of number inputs, so this will be helpful
def numberInput(text=""):
cow = input(text)
while not cow.isdigit():
print("Please enter a number:")
cow = input(text)
return int(cow)
#function for plants
def plantPopulation():
plantChance = random.randint(0,100)
if plantChance <= 30:
items ['plants'] -= (bunnyPopulation/2)
#player
def hunger():
items ['hunger'] += 10
def thirst():
items ['thirst'] += 10
#main menu
def menu():
print(' 1) be a bunny')
print(' 2) be a fox')
print(' 0) do something else')
option = numberInput()
print()
if option == 1:
menuBunny()
if option == 2:
menuFox()
if option == 0:
print("you cant")
items ['sleep'] += 1
time.sleep(items ['sleep'])
else:
items ['sleep'] += 1
time.sleep(items ['sleep'])
print()
menu()
def randomEvents():
global plants, hunger, thirst, bunnyPopulation, foxPopulation
event = random.randint(0,600)
#boom of foxes
if event <= 30:
print ("A boom of the fox population went up")
items ['bunnyPopulation']+=4
#boom of bunnies
elif event <= 60:
print("A boom of the bunny population went up")
items ['bunnyPopulation']+=4
#drought
elif event <= 75:
print("a drought acoured!")
items ['thirst'] += 17
#scarcity of plants
elif event <=105:
print("the population of plants have gone down")
items ['plants'] -= 6
#death functions
def death():
if option == 1:
if items ['bunnyPopulation'] == 0:
print('your species failed')
print()
time.sleep(1)
print('you lose')
sys.exit()
if option == 2:
if items ['foxPopulation'] == 0:
print('your species failed')
print()
time.sleep(1)
print('you lose')
sys.exit()
return option
if items ['thirst'] == 100:
print('you died of thirst')
sys.exit()
if items ['hunger'] == 100:
print('you starved to death')
sys.exit()
#bunny menu
def menuBunny():
global hunger, thirst, bunnyPopulation
hunger()
thirst()
print('Day = ',items ['day'])
print('Hunger = ',items ['hunger'])
print('Thirst = ',items ['thirst'])
print('amount of plants =',items ['plants'])
print('Fox population = ', items ['foxPopulation'])
print('Bunny population = ', items ['bunnyPopulation'])
randomEvents() #Initiate possible random events
print(' 1) eat')
print(' 2) drink')
print(' 3) add population')
option = numberInput()
print()
if option == 1:
if items ['plants'] == 0:
print('there are no more plants so you can not eat')
else:
items ['hunger'] -= 15
items ['plants'] -= 1
elif option == 2:
items ['thirst'] -= 15
elif option == 3:
items ['bunnyPopulation'] += 0.35
if random.randint(0,40) < items ["foxPopulation"]:
bunnySeeFox()
items ['day'] += 1
death()
menuBunny()
return option
#fox menu
def menuFox():
global hunger, thirst, foxPopulation
print('Day = ',items ['day'])
print('Hunger = ',items ['hunger'])
print('Thirst = ',items ['thirst'])
print('amount of plants =',items ['thirst'])
print('Fox population = ', items ['foxPopulation'])
print('Bunny population = ', items ['bunnyPopulation'])
randomEvents() #Initiate possible random events
print(' 1) hunt')
print(' 2) drink')
print(' 3) add population')
option = numberInput()
print()
if option == 1:
if items ['bunnyPopulation'] <= 0:
print('there are no more bunnies so you can not eat')
else:
foxSeeBunny()
elif option == 2:
items ['thirst'] -= 15
elif option == 3:
items ['foxPopulation'] += 0.35
elif option == 0:
print("no u")
else:
print("You wasted a day.")
items ['day'] += 1
death()
menuFox()
return option
#animals seeing each other
def bunnySeeFox():
print("You see a fox.")
print(" 1) Follow it")
print(" 2) run away")
choice = numberInput()
if choice == 1:
print('You follow the fox and you die')
sys.quit()
elif choice == 2:
print("You run away")
if random.randint(0,100) < 20:
print("the fox caught up to you and ate you")
sys.exit()
def foxSeeBunny():
print("You see a bunny.")
print(" 1) Ignore")
print(" 2) Hunt it")
choice = numberInput()
if choice == 1:
print('You ignore the bunny')
if choice == 2:
print("you follow the bunny")
items ['hunger'] -= 18
event = random.randint(0,100)
if event <= 40:
print("the bunny ran away")
elif event <= 100:
print("you caught the bunny and you eat it")
#ending of the game stuff
for i in range(250):
menu()
while day <= 250:
print()
print('CONGRATS! you survived till hibernation time!')
time.sleep(1)
print('you go into your bunny hole and hibernate')
time.sleep(2)
print('Thanks for playing!')
| geedo7/AnimalgameV0.7 | AnimalgameV0.7.py | AnimalgameV0.7.py | py | 5,865 | python | en | code | 0 | github-code | 90 |
4769063010 | #!/user/bin/python
__author__ = 'hsenid'
from random import randint
#get number or rows
rows = int(input('Enter number of rows: '))
#get number of columns
columns = int(input('Enter number of columns: '))
output = open("output.csv", "w+")
for rw in range(0,rows):
yy=""
for cl in range(0,columns):
x = randint(0, 99)
if(cl == columns-1):
yy += str(x) + "\n"
else:
yy += str(x) + ","
output.write(yy)
| MashiW/pythonAssignment | assignment1/create_file.py | create_file.py | py | 468 | python | en | code | 0 | github-code | 90 |
18355548389 | s = list(input())
tmp1 = s[0]
tmp2 = ''
count = 0
for i in range(1, len(s)):
tmp2 += s[i]
if tmp2 != tmp1:
count += 1
tmp1 = tmp2
tmp2 = ''
print(count + 1)
| Aasthaengg/IBMdataset | Python_codes/p02939/s980841685.py | s980841685.py | py | 189 | python | en | code | 0 | github-code | 90 |
36225261428 | import cmath
def QuadraticFuncPlus(a, b, c):
try:
x1 = (-b + cmath.sqrt(b**2 - 4*a*c)) / 2*a
except:
print("Error")
return x1
def QuadraticFuncMinus(a, b, c):
try:
x2 = (-b - cmath.sqrt(b**2 - 4*a*c)) / 2*a
except:
print("Error")
return x2
print("Find the solution in the quadratic equation (a*x + b)*(c*x + d)= 0")
print("Enter the values for a, b, c, and d in that order.")
aPre1=input()
aPre2=float(aPre1)
bPre1=input()
bPre2=float(bPre1)
cPre1=input()
cPre2=float(cPre1)
dPre1=input()
dPre2=float(dPre1)
a = aPre2*cPre2
b = aPre2*dPre2 + bPre2*cPre2
c = bPre2*dPre2
x1ans = QuadraticFuncPlus(a, b, c)
x2ans = QuadraticFuncMinus(a, b, c)
print("There are two solutions for x:")
print(x1ans)
print(x2ans)
| GL-Kageyama/QuadraticEquation | QuadraticEquationCheck3.py | QuadraticEquationCheck3.py | py | 740 | python | en | code | 0 | github-code | 90 |
74338810536 | #! /usr/bin/python
import os
import re
import sys
import subprocess
from datetime import datetime
TEGOLA_DB_HOST = os.environ.get("TEGOLA_DB_HOST")
TEGOLA_TEGOLA_PASSWORD = os.environ.get("TEGOLA_TEGOLA_PASSWORD")
OSM2PGSQL_STYLE = "/container/config/tegola/osm2pgsql.style"
EXTRACT_BUCKET = "gs://%s" % os.environ.get("EDITOR_DB_DUMP_BUCKET")
UPDATES_FILE = "%s/000/updates.csv" % EXTRACT_BUCKET
UPDATES_TMP = "/tmp/updates.csv"
dryrun = False
def Log(msg):
print("[%s:%d] %26s: %s" % (
os.path.basename(__file__),
os.getpid(),
datetime.now().strftime("%Y-%m-%d %H:%M:%S:%f"),
msg)
)
sys.stdout.flush()
# Class for maintaining a csv file that contains a log of data files
# that have been loaded. The log file should contain an initial
# line which is the full path of a *.pdf file, followed by some number
# of additional lines containing *.osc (or *.osc.gz) files.
#
# Example log file contents:
# /mnt/editor-db/extract/2020-11-13/planet-2020-11-13.pbf
# /mnt/editor-db/extract/2020-11-14/2020-11-13--2020-11-14.osc.gz
# /mnt/editor-db/extract/2020-11-15/2020-11-14--2020-11-15.osc.gz
# /mnt/editor-db/extract/2020-11-16/2020-11-15--2020-11-16.osc.gz
#
# The path names for these files should be such that lexical ordering
# equals chronological ordering.
#
# This class contains methods for:
# * checking if an osc file (either one that's in the current log file,
# or a new one) is chronologically after the pbf file
# * checking if a given osc file is in the current log file or not
# * adding a new osc file to the log file
class UpdatesFile:
def __init__(self, path):
self.path = path
self.pbf = None
self.oscs = []
lines = subprocess.check_output(["gsutil", "cat", path]).strip().split("\n")
for line in lines:
line = line.strip()
if len(line) == 0:
continue
if line.endswith(".pbf"):
if self.pbf != None:
print("Error: updates file '%s' contains multiple pbfs" % path)
sys.exit(-1)
self.pbf = line
continue
if not (line.endswith(".osc") or line.endswith(".osc.gz")):
print("Error: updates file '%s' contains something other than .pbf, .osc, or .osc.gz" % path)
print(line)
sys.exit(-1)
self.oscs.append(line)
def oscIsAfterPbf(self, osc):
return osc > self.pbf
def containsOsc(self, osc):
return osc in self.oscs
def appendOsc(self, osc):
self.oscs.append(osc)
with open(UPDATES_TMP, "w") as f:
f.write("%s\n" % self.pbf)
for osc in self.oscs:
f.write("%s\n" % osc)
system("gsutil cp %s %s" % (UPDATES_TMP, UPDATES_FILE))
#with open(self.path, "a") as f:
# f.write("%s\n" % osc)
# Returns the osc file from a directory.
# parent = directory path (e.g. /mnt/editor-db/extract)
# dir = directory name (e.g. "2020-11-05")
# Looks in parent/dir directory for a file whose name matches "--DIR.osc" or "--DIR.osc.gz", and returns
# the full path of that file if there is exactly one such file, and if there is also a
# file named DONE in the directory. Otherwise returns None.
# Example:
# parent="/mnt/editor-db/extract"
# dir="2020-11-05"
# Returns "/mnt/editor-db/extract/2020-11-05/2020-11-04--2020-11-05.osc"
# if such a file exists, and there are no other files in that dir ending
# with "--2020-11-05.osc", and if "/mnt/editor-db/extract/2020-11-05/DONE" exists.
def GetDoneOscPath(parent, dir):
oscs = [os.path.join(parent, dir, entry)
for entry in os.listdir(os.path.join(parent,dir))
if entry.endswith("--" + dir + ".osc") or entry.endswith("--" + dir + ".osc.gz")]
if len(oscs) > 1:
print("Warning: ignoring multple .osc files in directory %s" % os.path.join(parent,dir))
return None
done = os.path.exists(os.path.join(parent, dir, "DONE"))
if done and len(oscs) == 1:
return oscs[0]
return None
def GetAllDoneOscs():
doneOscs = []
for entry in os.listdir(EXTRACT_DIR):
if re.match(r'20\d\d-\d\d-\d\d', entry):
osc = GetDoneOscPath("/mnt/editor-db/extract", entry)
if osc:
doneOscs.append(osc)
doneOscs.sort()
return doneOscs
def ListBucket(bucket):
return subprocess.check_output(["gsutil", "ls", bucket]).strip().split("\n")
def BucketDir(bucket):
m = re.match(r'^(.*)/[^/]+$', bucket)
if m:
return m.group(1)
return None
def GetAllDoneOscsBucket(bucket):
listing = subprocess.check_output([
"gsutil", "ls", "%s/**" % bucket
]).strip().split("\n")
dones = set()
oscs = set()
for entry in listing:
if entry.endswith("DONE"):
dones.add(BucketDir(entry))
elif entry.endswith(".osc.gz") or entry.endswith(".osc"):
oscs.add(entry)
return [osc for osc in oscs if BucketDir(osc) in dones]
def osm2pgsqlCmd(osc):
return "gsutil cat %s | gunzip | PGPASSWORD=%s osm2pgsql -a -S %s -C 30000 --slim -d antique -U tegola -H %s -r xml -" % (osc, TEGOLA_TEGOLA_PASSWORD, OSM2PGSQL_STYLE, TEGOLA_DB_HOST)
def system(cmd):
return os.system(cmd)
def ParseArgs():
global dryrun
for i in range(1, len(sys.argv)):
if sys.argv[i] in ['-d', '--dryrun']:
dryrun = True
else:
print("unrecognized option: %s" % sys.argv[i])
sys.exit(-1)
def main():
ParseArgs()
Log("starting")
updates = UpdatesFile(UPDATES_FILE)
doneOscs = GetAllDoneOscsBucket(EXTRACT_BUCKET)
for osc in sorted(doneOscs):
if updates.oscIsAfterPbf(osc) and not updates.containsOsc(osc):
Log("appending %s to updates.csv" % osc)
if not dryrun:
updates.appendOsc(osc)
cmd = osm2pgsqlCmd(osc)
Log("executing: %s" % cmd)
if not dryrun:
system(cmd)
Log("done")
if __name__ == "__main__":
main()
| kartta-labs/Project | k8s/cronjobs/tegola-update.py | tegola-update.py | py | 6,133 | python | en | code | 47 | github-code | 90 |
26254367549 | fname = input("enter the file name: ")
fhand = open(fname)
count = 0
for word in fhand:
#checking whether line has more than two elements space seperated
if word.startswith("From") and len(word.split()) > 2:
temp = word.split()
print(temp[1])
count = count + 1
print("There were", count, "lines in the file with From as the first word")
| utkarshtambe10/Python-for-Everybody-Course | Course2: Python Data Structures/week4/assignment8.5.py | assignment8.5.py | py | 371 | python | en | code | 1 | github-code | 90 |
2797739706 | import argparse, random
class Piece:
def __init__(self, color):
self.color = color
def __repr__(self):
return self.color[0].upper()
def __eq__(self, other):
if type(other) is Piece:
if self.color == other.color:
return True
class GameBoard:
DEPTH = 6
WIDTH = 7
def __init__(self):
self.board = [['O' for col in range(self.WIDTH)] for row in range(self.DEPTH)]
def _get_height(self, position):
for i in range(self.DEPTH-1, -1, -1):
if self.board[i][position] == 'O':
return i
return False # column is full
def place_piece(self, piece_color, position):
height = self._get_height(position)
if type(height) is bool:
print('Column full. Select another.')
return False
self.board[height][position] = Piece(piece_color)
def print_board(self):
for row in range(len(self.board)):
for cell in self.board[row]:
print(cell, end='|')
print()
print()
def check_win(self):
for i in range(self.DEPTH-1, -1, -1):
for j in range(self.WIDTH):
# check horizontal win
if j < self.WIDTH-3:
chunk = self.board[i][j:j+4]
if all(item == self.board[i][j] and item != 'O' for item in chunk):
return self.board[i][j]
# check vertical win
if i < self.DEPTH-3:
chunk = []
chunk.append(self.board[i][j])
chunk.append(self.board[i+1][j])
chunk.append(self.board[i+2][j])
chunk.append(self.board[i+3][j])
if all(item == self.board[i][j] and item != 'O' for item in chunk):
return self.board[i][j]
# check diagonal wins
if i < self.DEPTH-3 and j < self.WIDTH-3:
chunk = []
chunk.append(self.board[i][j:j+4])
chunk.append(self.board[i+1][j:j+4])
chunk.append(self.board[i+2][j:j+4])
chunk.append(self.board[i+3][j:j+4])
if chunk[0][0] == chunk[1][1] == chunk[2][2] == chunk[3][3] and chunk[0][0] != 'O':
return chunk[0][0]
elif chunk[3][0] == chunk[1][2] == chunk[2][1] == chunk[0][3] and chunk[3][0] != 'O':
return chunk[3][0]
def is_full(self):
for row in self.board:
if any(item=='O' for item in row):
return False
return True
def game_over(self):
return self.check_win() or self.is_full()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Connect Four module')
parser.add_argument('-f', '--file',
help='File path of Connect Four moves you want to evaluate.')
args = parser.parse_args()
# File input version
if args.file:
with open(args.file, 'r') as f:
moves = f.read().split()
board = GameBoard()
for i, move in enumerate(moves):
current_turn = 'yellow' if i % 2 == 0 else 'red'
board.place_piece(current_turn, int(move) - 1)
board.print_board()
print(board.check_win())
# 2 player game version
else:
while True:
board = GameBoard()
player_one = 'yellow'
player_two = 'red'
game_round = 1
while not board.game_over():
current_player = player_one if game_round % 2 else player_two
while True:
move = input(f"{current_player}: Enter your move: ").strip().lower()
try:
move = int(move)
if 1 <= move <= 7:
move = board.place_piece(current_player, move-1)
if type(move) is str:
raise IndexError('Invalid column.')
board.print_board()
break
else:
raise IndexError('Invalid column.')
except (ValueError, IndexError):
print("Invalid move. Please choose a column [1-7] that isn't full.")
game_round += 1
if not board.is_full():
print(f"Game over! Winner: {board.check_win()}")
else:
print(f"Game over! No one wins!")
while True:
play_ag = input("Do you want to play again: ").strip().lower()
if play_ag in ['yes', 'y', 'no', 'n']:
break
if play_ag in ['no', 'n']:
break | PdxCodeGuild/class_salmon | 1 Python/solutions/lab27-connect_four.py | lab27-connect_four.py | py | 4,930 | python | en | code | 5 | github-code | 90 |
10586976188 | from microbit import pin16, sleep, button_b, display
from utime import ticks_ms, ticks_diff
from utime import sleep_us
_DEBOUNCE_DELAY = 50
_HIGH = 1
_LOW = 0
class Button:
def __init__(self, pin):
self._pin = pin
self._button_state = None
self._last_button_state = None
self._last_debounce_time = None
def is_pressed(self):
reading = self._pin.read_digital()
if reading != self._last_button_state:
self._last_debounce_time = ticks_ms()
if ticks_diff(ticks_ms(), self._last_debounce_time) > _DEBOUNCE_DELAY:
if reading != self._button_state:
self._button_state = reading
self._last_button_state = reading
return (self._button_state == _HIGH)
def main():
button = Button(pin16)
display.clear()
display.show('>')
while True:
if button_b.was_pressed():
display.clear()
break
if button.is_pressed():
print('Button Pressed')
else:
print('Button Not Pressed')
sleep(500)
if __name__ == '__main__':
main()
| dooley-ch/microbit-grove | src/button.py | button.py | py | 1,205 | python | en | code | 0 | github-code | 90 |
36848200709 | class EmptyError(Exception):
print(Exception)
#==========================================================================================
#
#
#==========================================================================================
def read_proxy_metadata_S1csv(datadir_proxy, datafile_proxy, proxy_region, proxy_resolution, \
proxy_definition):
#==========================================================================================
#
# ... reads metadata worksheet from PAGES2K_DatabaseS1 dataset ...
#
#==========================================================================================
import sys
import numpy as np
from random import sample
# Library needed to read CSV file format
xlrd_dir = '/home/disk/ekman/rtardif/nobackup/lib/pylibs/xlrd/xlrd/'
sys.path.append(xlrd_dir)
import xlrd
# Parsing dictionary of proxy definitions
proxy_list = {}; # dict list containing proxy types and associated proxy id's (sites)
sites_assim = {}
sites_eval = {}
proxy_types = list(proxy_definition.keys())
# check if dealing with with "order" digits or not in definition of proxies
try:
proxy_types_unordered = [i.split(':', 1)[1] for i in list(proxy_definition.keys())]
except:
proxy_types_unordered = proxy_types
for t in proxy_types:
proxy_list[t] = []
sites_assim[t] = []
sites_eval[t] = []
proxy_category = [item.split('_')[0] for item in proxy_types_unordered]
# Define name of file & open
proxy_file = datadir_proxy + '/'+datafile_proxy;
print('Reading metadata file: ', proxy_file)
workbook = xlrd.open_workbook(proxy_file);
# ====================
# Read in the metadata
# ====================
metadata = workbook.sheet_by_name('Metadata');
# Get columns headers
meta_fields = [metadata.cell(0,col_index).value for col_index in range(metadata.ncols)];
proxy_metadata = []; # dict list containing proxy metadata
for row_index in range(1,metadata.nrows):
d = {meta_fields[col_index]: metadata.cell(row_index, col_index).value
for col_index in range(metadata.ncols)};
proxy_metadata.append(d)
# =================================================================
# Restrict to proxy_region and proxy_assim items listed in NAMELIST
# =================================================================
for row_index in range(0,metadata.nrows-1):
if proxy_metadata[row_index]['PAGES 2k Region'] in proxy_region:
if proxy_metadata[row_index]['Archive type'] in proxy_category:
if proxy_metadata[row_index]['Resolution (yr)'] in proxy_resolution:
indt = [i for i, s in enumerate(proxy_definition) if proxy_metadata[row_index]['Archive type'] in s]
proxy_measurement = [proxy_definition[proxy_types[indt[k]]] for k in range(len(indt))]
indm = [i for i, s in enumerate(proxy_measurement) if proxy_metadata[row_index]['Proxy measurement'] in s]
if indm:
indtype = indt[indm[0]]
# Add chronology ID to appropriate list in dictionary
proxy_list[proxy_types[indtype]].append(str(proxy_metadata[row_index]['PAGES ID']))
return proxy_list
def create_proxy_lists_from_metadata_S1csv(datadir_proxy, datafile_proxy, proxy_region, proxy_resolution, \
proxy_definition, proxy_frac, psm_data, psm_r_crit):
#==========================================================================================
#
# ... reads metadata worksheet from PAGES2K_DatabaseS1 dataset ...
#
#==========================================================================================
import sys
import numpy as np
from random import sample
# Library needed to read CSV file format
xlrd_dir = '/home/disk/ekman/rtardif/nobackup/lib/pylibs/xlrd/xlrd/'
sys.path.append(xlrd_dir)
import xlrd
# Parsing dictionary of proxy definitions
proxy_list = {}; # dict list containing proxy types and associated proxy id's (sites)
sites_assim = {}
sites_eval = {}
proxy_types = list(proxy_definition.keys())
proxy_types_unordered = [i.split(':', 1)[1] for i in list(proxy_definition.keys())]
for t in proxy_types:
proxy_list[t] = []
sites_assim[t] = []
sites_eval[t] = []
proxy_category = [item.split('_')[0] for item in proxy_types_unordered]
# Define name of file & open
proxy_file = datadir_proxy + '/'+datafile_proxy;
print('Reading metadata file: ', proxy_file)
workbook = xlrd.open_workbook(proxy_file);
# ====================
# Read in the metadata
# ====================
metadata = workbook.sheet_by_name('Metadata');
# Get columns headers
meta_fields = [metadata.cell(0,col_index).value for col_index in range(metadata.ncols)];
proxy_metadata = []; # dict list containing proxy metadata
for row_index in range(1,metadata.nrows):
d = {meta_fields[col_index]: metadata.cell(row_index, col_index).value
for col_index in range(metadata.ncols)};
proxy_metadata.append(d)
# Restrict to proxy_region and proxy_assim items listed in NAMELIST
for row_index in range(0,metadata.nrows-1):
if proxy_metadata[row_index]['PAGES 2k Region'] in proxy_region:
if proxy_metadata[row_index]['Archive type'] in proxy_category:
if proxy_metadata[row_index]['Resolution (yr)'] in proxy_resolution:
indt = [i for i, s in enumerate(proxy_definition) if proxy_metadata[row_index]['Archive type'] in s]
proxy_measurement = [proxy_definition[proxy_types[indt[k]]] for k in range(len(indt))]
indm = [i for i, s in enumerate(proxy_measurement) if proxy_metadata[row_index]['Proxy measurement'] in s]
if indm:
indtype = indt[indm[0]]
# Add chronology ID to appropriate list in dictionary
proxy_list[proxy_types[indtype]].append(str(proxy_metadata[row_index]['PAGES ID']))
# =========================================================================
# Filter list to retain sites with PSM calibration correlation > PSM_r_crit
# =========================================================================
if psm_data is not None:
proxy_TypesSites_psm = list(psm_data.keys())
proxy_TypesSites_psm_ok = [t for t in proxy_TypesSites_psm if abs(psm_data[t]['PSMcorrel']) > psm_r_crit]
proxy_list_ok = {}
for t in list(proxy_list.keys()):
proxy = t.split(':', 1)[1]
list_ok = [proxy_TypesSites_psm_ok[k][1] for k in range(len(proxy_TypesSites_psm_ok)) if proxy_TypesSites_psm_ok[k][0] == proxy]
proxy_list_ok[t] = list_ok
else:
proxy_list_ok = proxy_list
# ================================================================
# Create lists of sites to assimilate / keep for recon. evaluation
# ================================================================
if proxy_frac < 1.0:
# List all sites, regardless of proxy type
mergedlist = []
tmp = [proxy_list_ok[x] for x in proxy_list_ok]
nbtype = len(tmp)
for k in range(nbtype):
mergedlist.extend(tmp[k])
nbsites = len(mergedlist)
nbsites_assim = int(nbsites*proxy_frac)
# random selection over entire site list
ind_assim = sample(list(range(0, nbsites)), nbsites_assim)
ind_eval = set(range(0,nbsites)) - set(ind_assim) # list indices of sites not chosen
p_assim = [mergedlist[p] for p in ind_assim]
p_eval = [mergedlist[p] for p in ind_eval]
#ind = [i for i, s in enumerate(proxy_definition) if proxy_metadata[row_index]['Archive type'] in s]
# Re-populate lists by proxy type
for t in proxy_types:
inda = [i for i, s in enumerate(p_assim) if s in proxy_list_ok[t]]
sites_assim[t] = [p_assim[k] for k in inda]
inde = [i for i, s in enumerate(p_eval) if s in proxy_list_ok[t]]
sites_eval[t] = [p_eval[k] for k in inde]
else:
sites_assim = proxy_list_ok
# leave sites_eval list empty
return sites_assim, sites_eval
def read_proxy_metadata_S1csv_old(datadir_proxy, datafile_proxy, proxy_region, proxy_resolution, \
proxy_type, proxy_measurement):
#==========================================================================================
#
# ... reads metadata worksheet from PAGES2K_DatabaseS1 dataset ...
#
#==========================================================================================
import sys
import numpy as np
# Library needed to read CSV file format
xlrd_dir = '/home/disk/ekman/rtardif/nobackup/lib/pylibs/xlrd/xlrd/'
sys.path.append(xlrd_dir)
import xlrd
# Uploading proxy data
proxy_file = datadir_proxy + '/'+datafile_proxy;
print('Reading metadata file: ', proxy_file)
workbook = xlrd.open_workbook(proxy_file);
# Read in the metadata
metadata = workbook.sheet_by_name('Metadata');
# Get columns headers
meta_fields = [metadata.cell(0,col_index).value for col_index in range(metadata.ncols)];
proxy_metadata = []; # dict list containing proxy metadata
for row_index in range(1,metadata.nrows):
d = {meta_fields[col_index]: metadata.cell(row_index, col_index).value
for col_index in range(metadata.ncols)};
proxy_metadata.append(d)
# Restrict to proxy_region and proxy_assim items listed in NAMELIST
proxy_type_to_assim = [];
proxy_id_to_assim = [];
proxy_lat_to_assim = [];
proxy_lon_to_assim = [];
for row_index in range(0,metadata.nrows-1):
if proxy_metadata[row_index]['PAGES 2k Region'] in proxy_region:
if proxy_metadata[row_index]['Archive type'] in proxy_type:
if proxy_metadata[row_index]['Proxy measurement'] in proxy_measurement:
if proxy_metadata[row_index]['Resolution (yr)'] in proxy_resolution:
proxy_id_to_assim.append(proxy_metadata[row_index]['PAGES ID'])
proxy_type_to_assim.append(proxy_metadata[row_index]['Archive type'])
proxy_lat_to_assim.append(proxy_metadata[row_index]['Lat (N)'])
proxy_lon_to_assim.append(proxy_metadata[row_index]['Lon (E)'])
site_list = [str(item) for item in proxy_id_to_assim]; # getting rid of unicode
site_lat = proxy_lat_to_assim
site_lon = proxy_lon_to_assim
return site_list, site_lat, site_lon
def read_proxy_data_S1csv_site(datadir_proxy, datafile_proxy, proxy_site):
#==========================================================================================
#
# ... reads data from a selected site (chronology) in PAGES2K_DatabaseS1 ...
# ... site is passed as argument ...
#
#==========================================================================================
import sys
import numpy as np
# Library needed to read CSV file format
xlrd_dir = '/home/disk/ekman/rtardif/nobackup/lib/pylibs/xlrd/xlrd/'
sys.path.append(xlrd_dir)
import xlrd
# Uploading proxy data
proxy_file = datadir_proxy + '/'+datafile_proxy;
#print 'Reading file: ', proxy_file
workbook = xlrd.open_workbook(proxy_file);
# Getting general (number & names of worksheets) info on file content
nb_worksheets = workbook.nsheets;
#worksheet_list = workbook.sheet_names();
worksheet_list = [str(item) for item in workbook.sheet_names()]; # getting rid of unicode
# Create list of worksheet names containing data
worksheet_list_data = worksheet_list;
del worksheet_list[worksheet_list_data.index('ReadMe')]
del worksheet_list[worksheet_list_data.index('Metadata')]
# Read in the metadata
metadata = workbook.sheet_by_name('Metadata');
# Get columns headers
meta_fields = [metadata.cell(0,col_index).value for col_index in range(metadata.ncols)];
proxy_metadata = []; # dict list containing proxy metadata
for row_index in range(1,metadata.nrows):
d = {meta_fields[col_index]: metadata.cell(row_index, col_index).value
for col_index in range(metadata.ncols)};
proxy_metadata.append(d)
# Restrict to proxy_region and proxy_assim items listed in NAMELIST
proxy_type_to_assim = [];
proxy_id_to_assim = [];
proxy_lat_to_assim = [];
proxy_lon_to_assim = [];
for row_index in range(0,metadata.nrows-1):
if proxy_metadata[row_index]['PAGES ID'] in proxy_site:
proxy_id_to_assim.append(proxy_metadata[row_index]['PAGES ID'])
proxy_type_to_assim.append(proxy_metadata[row_index]['Archive type'])
proxy_lat_to_assim.append(proxy_metadata[row_index]['Lat (N)'])
proxy_lon_to_assim.append(proxy_metadata[row_index]['Lon (E)'])
proxy_id_to_assim = [str(item) for item in proxy_id_to_assim]; # getting rid of unicode encoding
proxy_type_to_assim = [str(item) for item in proxy_type_to_assim]; # getting rid of unicode encoding
# ------------------------------------------
# Loop over worksheets containing proxy data
# ------------------------------------------
# Dictionary containing proxy metadata & data
proxy_data = {}
nb_ob = -1
for worksheet in worksheet_list_data:
data = workbook.sheet_by_name(worksheet)
num_cols = data.ncols - 1
# Get columns headers
tmp_headers = [data.cell(0,col_index).value for col_index in range(data.ncols)]
data_headers = [str(item) for item in tmp_headers]; # getting rid of unicode encoding
tmp_refs = [data.cell(1,col_index).value for col_index in range(data.ncols)]
data_refs = [str(item) for item in tmp_refs] # getting rid of unicode encoding
data_headers[0] = data_refs[0]; # correct tag for years
# Column indices of proxy id's in proxy_id_to_assim list
col_assim = [i for i, item in enumerate(data_headers) if item in proxy_id_to_assim]
if col_assim: # if non-empty list
for row_index in range(2,data.nrows):
for col_index in col_assim:
found = False
# associate metadata to data record
for meta_row_index in range(1,len(proxy_metadata)):
if proxy_metadata[meta_row_index]['PAGES ID'] == data_headers[col_index]:
found = True
typedat = proxy_metadata[meta_row_index]['Archive type']
measure = proxy_metadata[meta_row_index]['Proxy measurement']
resolution = proxy_metadata[meta_row_index]['Resolution (yr)']
lat = proxy_metadata[meta_row_index]['Lat (N)']
lon = proxy_metadata[meta_row_index]['Lon (E)']
alt = 0.0 # no altitude info in data file
if lon < 0:
lon = 360 + lon
if found:
if data.cell(row_index, col_index).value: # only keep those with non-empty values
nb_ob = nb_ob + 1
proxy_data[nb_ob] = {}
proxy_data[nb_ob]['id'] = data_headers[col_index]
proxy_data[nb_ob]['type'] = str(typedat)
proxy_data[nb_ob]['meas'] = str(measure)
proxy_data[nb_ob]['resol'] = resolution
proxy_data[nb_ob]['lat'] = lat
proxy_data[nb_ob]['lon'] = lon
proxy_data[nb_ob]['alt'] = alt
proxy_data[nb_ob]['time'] = data.cell(row_index, 0).value
proxy_data[nb_ob]['value'] = data.cell(row_index, col_index).value
id = proxy_data[0]['id']
lat = proxy_data[0]['lat']
lon = proxy_data[0]['lon']
alt = proxy_data[0]['alt']
# proxy time series
time = [proxy_data[k]['time'] for k in range(0,len(proxy_data))]
value = [proxy_data[k]['value'] for k in range(0,len(proxy_data))]
return id, lat, lon, alt, time, value # could add more output here as we develop further
#return proxy_data
def read_proxy_data_S1csv(self, datadir_proxy, datafile_proxy, proxy_region, proxy_type, proxy_measurement):
#==========================================================================================
#
# ... reads data from all sites (chronologies) in PAGES2K_DatabaseS1 dataset meeting
# selection criteria from NAMELIST ...
#
#==========================================================================================
import sys
import numpy as np
# Library needed to read CSV file format
xlrd_dir = '/home/disk/ekman/rtardif/nobackup/lib/pylibs/xlrd/xlrd/'
sys.path.append(xlrd_dir)
import xlrd
# Uploading proxy data
proxy_file = datadir_proxy + '/'+datafile_proxy;
print('Reading file: ', proxy_file)
workbook = xlrd.open_workbook(proxy_file);
# Getting general (number & names of worksheets) info on file content
nb_worksheets = workbook.nsheets;
#worksheet_list = workbook.sheet_names();
worksheet_list = [str(item) for item in workbook.sheet_names()]; # getting rid of unicode
# Create list of worksheet names containing the data
worksheet_list_data = worksheet_list;
del worksheet_list[worksheet_list_data.index('ReadMe')]
del worksheet_list[worksheet_list_data.index('Metadata')]
# Read in the metadata
metadata = workbook.sheet_by_name('Metadata');
# Get columns headers
meta_fields = [metadata.cell(0,col_index).value for col_index in range(metadata.ncols)];
proxy_metadata = []; # dict list containing proxy metadata
for row_index in range(1,metadata.nrows):
d = {meta_fields[col_index]: metadata.cell(row_index, col_index).value
for col_index in range(metadata.ncols)};
proxy_metadata.append(d)
# Restrict to proxy_region and proxy_assim items listed in NAMELIST
proxy_type_to_assim = [];
proxy_id_to_assim = [];
proxy_lat_to_assim = [];
proxy_lon_to_assim = [];
for row_index in range(0,metadata.nrows-1):
if proxy_metadata[row_index]['PAGES 2k Region'] in proxy_region:
if proxy_metadata[row_index]['Archive type'] in proxy_type:
if proxy_metadata[row_index]['Proxy measurement'] in proxy_measurement:
proxy_id_to_assim.append(proxy_metadata[row_index]['PAGES ID'])
proxy_type_to_assim.append(proxy_metadata[row_index]['Archive type'])
proxy_lat_to_assim.append(proxy_metadata[row_index]['Lat (N)'])
proxy_lon_to_assim.append(proxy_metadata[row_index]['Lon (E)'])
# Loop over worksheets containing proxy data
# dictionary containing proxy metadata & data
proxy_data = {}
nb_ob = -1
for worksheet in worksheet_list_data:
#print 'worksheet: ', worksheet
data = workbook.sheet_by_name(worksheet)
num_cols = data.ncols - 1
# Get columns headers
tmp_headers = [data.cell(0,col_index).value for col_index in range(data.ncols)]
data_headers = [str(item) for item in tmp_headers]; # getting rid of unicode encoding
tmp_refs = [data.cell(1,col_index).value for col_index in range(data.ncols)]
data_refs = [str(item) for item in tmp_refs] # getting rid of unicode encoding
data_headers[0] = data_refs[0]; # correct tag for years
# Column indices of proxy id's in proxy_id_to_assim list
col_assim = [i for i, item in enumerate(data_headers) if item in proxy_id_to_assim]
if col_assim: # if non-empty list
for row_index in range(2,data.nrows):
for col_index in col_assim:
found = False
# associate metadata to data record
for meta_row_index in range(1,len(proxy_metadata)):
if proxy_metadata[meta_row_index]['PAGES ID'] == data_headers[col_index]:
found = True
typedat = proxy_metadata[meta_row_index]['Archive type']
measure = proxy_metadata[meta_row_index]['Proxy measurement']
resolution = proxy_metadata[meta_row_index]['Resolution (yr)']
lat = proxy_metadata[meta_row_index]['Lat (N)']
lon = proxy_metadata[meta_row_index]['Lon (E)']
alt = 0.0 # no altitude info in data file
if found:
if data.cell(row_index, col_index).value: # only keep those with non-empty values
nb_ob = nb_ob + 1
proxy_data[nb_ob] = {}
proxy_data[nb_ob]['id'] = data_headers[col_index]
proxy_data[nb_ob]['type'] = str(typedat)
proxy_data[nb_ob]['meas'] = str(measure)
proxy_data[nb_ob]['resol'] = resolution
proxy_data[nb_ob]['lat'] = lat
proxy_data[nb_ob]['lon'] = lon
proxy_data[nb_ob]['alt'] = alt
proxy_data[nb_ob]['time'] = data.cell(row_index, 0).value
proxy_data[nb_ob]['value'] = data.cell(row_index, col_index).value
id = [proxy_data[k]['id'] for k in range(0,len(proxy_data))]
lat = [proxy_data[k]['lat'] for k in range(0,len(proxy_data))]
lon = [proxy_data[k]['lon'] for k in range(0,len(proxy_data))]
alt = [proxy_data[k]['alt'] for k in range(0,len(proxy_data))]
time = [proxy_data[k]['time'] for k in range(0,len(proxy_data))]
value = [proxy_data[k]['value'] for k in range(0,len(proxy_data))]
return id, lat, lon, alt, time, value # should add more output here as we develop further
#return proxy_data
#==========================================================================================
#
#
#==========================================================================================
# =========================================================================================
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
# =========================================================================================
def create_proxy_lists_from_metadata_NCDC(datadir_proxy, datafile_proxy, proxy_resolution, \
proxy_definition, proxy_frac):
#==========================================================================================
#
# ... reads metadata worksheet for NCDC formatted proxy dataset ...
#
#==========================================================================================
import sys
import numpy as np
from random import sample
# NEED TO THINK OF SOMETHING ELSE HERE... ... ... ... ... ... ... ... ...
# ... provide this library as part of LMR distribution?
# Library needed to read CSV file format
xlrd_dir = '/home/disk/ekman/rtardif/nobackup/lib/pylibs/xlrd/xlrd/'
sys.path.append(xlrd_dir)
import xlrd
# Parsing dictionary of proxy definitions
proxy_list = {}; # dict list containing proxy types and associated proxy id's (sites)
sites_assim = {}
sites_eval = {}
proxy_types = list(proxy_definition.keys())
proxy_types_unordered = [i.split(':', 1)[1] for i in list(proxy_definition.keys())]
for t in proxy_types:
proxy_list[t] = []
sites_assim[t] = []
sites_eval[t] = []
proxy_category = [item.split('_')[0] for item in proxy_types_unordered]
# Define name of file & open
proxy_file = datadir_proxy + '/'+datafile_proxy;
print('Reading metadata file: ', proxy_file)
workbook = xlrd.open_workbook(proxy_file);
# ====================
# Read in the metadata
# ====================
metadata = workbook.sheet_by_name('Master Metadata File');
# Get columns headers
meta_fields = [metadata.cell(0,col_index).value for col_index in range(metadata.ncols)];
proxy_metadata = []; # dict list containing proxy metadata
for row_index in range(1,metadata.nrows):
d = {meta_fields[col_index]: metadata.cell(row_index, col_index).value
for col_index in range(metadata.ncols)};
proxy_metadata.append(d)
# =================================================================
# Restrict to proxy_assim items listed in NAMELIST
# =================================================================
for row_index in range(0,metadata.nrows-1):
if proxy_metadata[row_index]['Archive'] in proxy_category:
if proxy_metadata[row_index]['Resolution'] in proxy_resolution:
indt = [i for i, s in enumerate(proxy_definition) if proxy_metadata[row_index]['Archive'] in s]
proxy_measurement = [proxy_definition[proxy_types[indt[k]]] for k in range(len(indt))]
l1 = proxy_metadata[row_index]['Variable Short Names'].split(",")
l2 = [item.strip("[").strip("]").strip("'").strip().strip("'") for item in l1] # clean the crud...
l3 = [str(l2[k]) for k in range(len(l2))]
# Common elements in lists?
for indm in range(len(proxy_measurement)):
common_set = set(l3)&set(proxy_measurement[indm])
if common_set: # if common element has been found
indtype = indt[indm]
# Add chronology ID to appropriate list in dictionary
# Do a check on consistency between 'Unique Identifier' & 'Filename.txt' ... sometimes doesn't match!
siteid_from_filename = proxy_metadata[row_index]['Filename.txt'][:-4] # strip the '.txt'
if str(proxy_metadata[row_index]['Unique Identifier']) != siteid_from_filename:
print('Filename & Unique Identifier DO NOT MATCH: using filename instead ...', siteid_from_filename, \
'vs', str(proxy_metadata[row_index]['Unique Identifier']))
proxy_list[proxy_types[indtype]].append(str(siteid_from_filename))
else:
proxy_list[proxy_types[indtype]].append(str(proxy_metadata[row_index]['Unique Identifier']))
# Create lists of sites to assimilate / keep for recon. evaluation
if proxy_frac < 1.0:
# List all sites, regardless of proxy type
mergedlist = []
tmp = [proxy_list[x] for x in proxy_list]
nbtype = len(tmp)
for k in range(nbtype):
mergedlist.extend(tmp[k])
nbsites = len(mergedlist)
nbsites_assim = int(nbsites*proxy_frac)
# random selection over merged site list
ind_assim = sample(list(range(0, nbsites)), nbsites_assim)
ind_eval = set(range(0,nbsites)) - set(ind_assim) # list indices of sites not chosen
p_assim = [mergedlist[p] for p in ind_assim]
p_eval = [mergedlist[p] for p in ind_eval]
#ind = [i for i, s in enumerate(proxy_definition) if proxy_metadata[row_index]['Archive type'] in s]
# Re-populate lists by proxy type
for t in proxy_types:
inda = [i for i, s in enumerate(p_assim) if s in proxy_list[t]]
sites_assim[t] = [p_assim[k] for k in inda]
inde = [i for i, s in enumerate(p_eval) if s in proxy_list[t]]
sites_eval[t] = [p_eval[k] for k in inde]
else:
sites_assim = proxy_list
# leave sites_eval list empty
# print ' '
# for t in proxy_types:
# print t, proxy_list[t]
# print ' '
print('Assim:', sites_assim)
print(' ')
print('Eval:', sites_eval)
return sites_assim, sites_eval
# =========================================================================================
def colonReader(string, fCon, fCon_low, end):
'''This function seeks a specified string (or list of strings) within
the transcribed file fCon (lowercase version fCon_low) until a specified
character (typically end of the line) is found.x
If a list of strings is provided, make sure they encompass all possibilities
From Julien Emile-Geay (Univ. of Southern California)
'''
if isinstance(string, str):
lstr = string + ': ' # append the annoying stuff
Index = fCon_low.find(lstr)
Len = len(lstr)
if Index != -1:
endlIndex = fCon_low[Index:].find(end)
rstring = fCon[Index+Len:Index+endlIndex] # returned string
if rstring[-1:] == '\r': # strip the '\r' character if it appears
rstring = rstring[:-1]
return rstring.strip()
else:
#print "Error: property " + string + " not found"
return ""
else:
num_str = len(string)
rstring = "" # initialize returned string
for k in range(0,num_str): # loop over possible strings
lstr = string[k] + ': ' # append the annoying stuff
Index = fCon_low.find(lstr)
Len = len(lstr)
if Index != -1:
endlIndex = fCon_low[Index:].find(end)
rstring = fCon[Index+Len:Index+endlIndex]
if rstring[-1:] == '\r': # strip the '\r' character if it appears
rstring = rstring[:-1]
if rstring == "":
#print "Error: property " + string[0] + " not found"
return ""
else:
return rstring.strip()
# =========================================================================================
def read_proxy_data_NCDCtxt_site(datadir, site, measurement):
#==========================================================================================
# Purpose: Reads data from a selected site (chronology) in NCDC proxy dataset
#
# Input :
# - datadir : Directory where proxy data files are located.
# - site : Site ID (ex. 00aust01a)
# - measurement : List of possible proxy measurement labels for specific proxy type
# (ex. ['d18O','d18o','d18o_stk','d18o_int','d18o_norm'] for delta 18 oxygen isotope
# measurements)
#
# Returns :
# - id : Site id read from the data file
# - lat/lon : latitude & longitude of the site
# - alt : Elevation of the site
# - time : Array containing the time of uploaded data
# - value : Array of uploaded proxy data
#
# Author(s): Robert Tardif, Univ. of Washington, Dept. of Atmospheric Sciences
# based on "ncdc_file_parser.py" code from Julien Emile-Geay
# (Univ. of Southern California)
#
# Date : March 2015
#
# Revision : None
#
#==========================================================================================
import os
import numpy as np
# Possible header definitions of time in data files ...
time_defs = ['age','Age_AD','age_AD','age_AD_ass','age_AD_int','Midpt_year',\
'age_yb1950','yb_1950','yrb_1950',\
'yb_1989','age_yb1989',\
'yr_b2k','yb_2k','ky_b2k','kyb_2k','kab2k','ka_b2k','ky_BP','kyr_BP','ka_BP','age_kaBP',\
'yr_BP','calyr_BP','Age(yrBP)','age_calBP']
filename = datadir+'/'+site+'.txt'
if os.path.isfile(filename):
print('File:', filename)
# Define root string for filename
file_s = filename.replace(" ", '_') # strip all whitespaces if present
fileroot = '_'.join(file_s.split('.')[:-1])
# Open the file and port content to a string object
filein = open(filename,'U') # use the "universal newline mode" (U) to handle DOS formatted files
fileContent = filein.read()
fileContent_low = fileContent.lower()
# Initialize empty dictionary
d = {}
# Assign default values to some metadata
d['ElevationUnit'] = 'm'
d['TimeUnit'] = 'y_ad'
# note: 8240/2030 ASCII code for "permil"
# ===========================================================================
# Extract metadata from file
# ===========================================================================
try:
# 'Archive' is the proxy type
d['Archive'] = colonReader('archive', fileContent, fileContent_low, '\n')
# Other info
d['Title'] = colonReader('study_name', fileContent, fileContent_low, '\n')
investigators = colonReader('investigators', fileContent, fileContent_low, '\n')
d['Investigators'] = investigators.replace(';',' and') # take out the ; so that turtle doesn't freak out.
d['PubDOI'] = colonReader('doi', fileContent, fileContent_low, '\n')
d['SiteName'] = colonReader('site_name', fileContent, fileContent_low, '\n')
str_lst = ['northernmost_latitude', 'northernmost latitude'] # documented instances of this field property
d['NorthernmostLatitude'] = float(colonReader(str_lst, fileContent, fileContent_low, '\n'))
str_lst = ['southernmost_latitude', 'southernmost latitude'] # documented instances of this field property
d['SouthernmostLatitude'] = float(colonReader(str_lst, fileContent, fileContent_low, '\n'))
str_lst = ['easternmost_longitude', 'easternmost longitude'] # documented instances of this field property
d['EasternmostLongitude'] = float(colonReader(str_lst, fileContent, fileContent_low, '\n'))
str_lst = ['westernmost_longitude', 'westernmost longitude'] # documented instances of this field property
d['WesternmostLongitude'] = float(colonReader(str_lst, fileContent, fileContent_low, '\n'))
elev = colonReader('elevation', fileContent, fileContent_low, '\n')
if elev != 'nan' and len(elev)>0:
elev_s = elev.split(' ')
d['Elevation'] = float(''.join(c for c in elev_s[0] if c.isdigit())) # to only keep digits ...
else:
d['Elevation'] = float('NaN')
d['CollectionName'] = colonReader('collection_name', fileContent, fileContent_low, '\n')
d['EarliestYear'] = float(colonReader('earliest_year', fileContent, fileContent_low, '\n'))
d['MostRecentYear'] = float(colonReader('most_recent_year', fileContent, fileContent_low, '\n'))
d['TimeUnit'] = colonReader('time_unit', fileContent, fileContent_low, '\n')
if not d['TimeUnit']:
d['TimeUnit'] = colonReader('time unit', fileContent, fileContent_low, '\n')
except EmptyError as e:
print(e)
# ===========================================================================
# Extract information from the "Variables" section of the file
# ===========================================================================
# Find beginning of block
sline_begin = fileContent.find('# Variables:')
if sline_begin == -1:
sline_begin = fileContent.find('# Variables')
# Find end of block
sline_end = fileContent.find('# Data:')
if sline_end == -1:
sline_end = fileContent.find('# Data\n')
VarDesc = fileContent[sline_begin:sline_end].splitlines()
nvar = 0 # counter for variable number
for line in VarDesc: # handle all the NCDC convention changes
# (TODO: more clever/general exception handling)
if line and line[0] != '' and line[0] != ' ' and line[0:2] != '#-' and line[0:2] != '# ' and line != '#':
#print line
nvar = nvar + 1
line2 = line.replace('\t',',') # clean up
sp_line = line2.split(',') # split line along commas
if len(sp_line) < 9:
continue
else:
d['DataColumn' + format(nvar, '02') + '_ShortName'] = sp_line[0].strip('#').strip(' ')
d['DataColumn' + format(nvar, '02') + '_LongName'] = sp_line[1]
d['DataColumn' + format(nvar, '02') + '_Material'] = sp_line[2]
d['DataColumn' + format(nvar, '02') + '_Uncertainty'] = sp_line[3]
d['DataColumn' + format(nvar, '02') + '_Units'] = sp_line[4]
d['DataColumn' + format(nvar, '02') + '_Seasonality'] = sp_line[5]
d['DataColumn' + format(nvar, '02') + '_Archive'] = sp_line[6]
d['DataColumn' + format(nvar, '02') + '_Detail'] = sp_line[7]
d['DataColumn' + format(nvar, '02') + '_Method'] = sp_line[8]
d['DataColumn' + format(nvar, '02') + '_CharOrNum'] = sp_line[9].strip(' ')
# ===========================================================================
# Extract the data from the "Data" section of the file
# ===========================================================================
# Find line number at beginning of data block
sline = fileContent.find('# Data:')
if sline == -1:
sline = fileContent.find('# Data\n')
fileContent_datalines = fileContent[sline:].splitlines()
start_line_index = 0
line_nb = 0
for line in fileContent_datalines: # skip lines without actual data
#print line
if not line or line[0]=='#' or line[0] == ' ':
start_line_index += 1
else:
start_line_index2 = line_nb
break
line_nb +=1
#print start_line_index, start_line_index2
# Extract column descriptions (headers) of the data matrix
DataColumn_headers = fileContent_datalines[start_line_index].splitlines()[0].split('\t')
# Strip possible blanks in column headers
DataColumn_headers = [item.strip() for item in DataColumn_headers]
nc = len(DataColumn_headers)
#print '-:' + str(nvar) + ' variables identified in metadata'
#print '-:' + str(nc) + ' columns in data matrix'
# Which column contains the important data (time & proxy values) to be extracted?
time_list = []
data_list = []
# Time
TimeColumn_ided = False
TimeColumn_tag = list(set(DataColumn_headers).intersection(time_defs))
if len(TimeColumn_tag) > 0:
if len(TimeColumn_tag) == 1: # single match -> ok
time_col_index = DataColumn_headers.index(', '.join(TimeColumn_tag))
TimeColumn_ided = True
else:
print('TimeColumn: More than one match ...do what then?')
# Proxy data
DataColumn_ided = False
DataColumn_tag = list(set(DataColumn_headers).intersection(measurement))
if len(DataColumn_tag) > 0:
if len(DataColumn_tag) == 1: # single match -> ok
data_col_index = DataColumn_headers.index(', '.join(DataColumn_tag))
DataColumn_ided = True
else:
print('DataColumn: More than one match ...do what then?')
print('Taking first one...')
DataColumn_tag.remove(DataColumn_tag[1])
data_col_index = DataColumn_headers.index(', '.join(DataColumn_tag))
DataColumn_ided = True
# If both columns identified, then load arrays with the data
if TimeColumn_ided and DataColumn_ided:
datalines = fileContent_datalines[start_line_index+1:] # +1 to skip 1st line (header line)
for line in datalines:
datalist = line.split()
# if line not empty
if datalist:
try:
# If data not empty, not NaN & only digits -> OK then fill lists
if datalist and datalist[time_col_index] and datalist[data_col_index] and \
is_number(datalist[data_col_index]) and datalist[data_col_index].lower() != 'nan':
time_list.append(datalist[time_col_index])
data_list.append(datalist[data_col_index])
except:
continue
# transform to numpy arrays => proxy time series
time = np.asarray(time_list,dtype=np.float64)
value = np.asarray(data_list,dtype=np.float64)
# proxy identifier and geo location
id = d['CollectionName']
alt = d['Elevation']
# Something crude in assignement of lat/lon:
if d['NorthernmostLatitude'] != d['SouthernmostLatitude']:
lat = (d['NorthernmostLatitude'] + d['SouthernmostLatitude'])/2.0
else:
lat = d['NorthernmostLatitude']
if d['EasternmostLongitude'] != d['WesternmostLongitude']:
lon = (d['EasternmostLongitude'] + d['WesternmostLongitude'])/2.0
else:
lon = d['EasternmostLongitude']
# Modify "time" array into "years AD" if not already
#print 'TimeUnit:', d['TimeUnit']
tdef = d['TimeUnit']
tdef_parsed = tdef.split('_')
if len(tdef_parsed) == 2 and tdef_parsed[0] and tdef_parsed[1]:
# tdef has expected structure ...
if tdef_parsed[0] == 'yb' and is_number(tdef_parsed[1]):
time = float(tdef_parsed[1]) - time
elif tdef_parsed[0] == 'kyb' and is_number(tdef_parsed[1]):
time = float(tdef_parsed[1]) - 1000.0*time
elif tdef_parsed[0] == 'y' and tdef_parsed[1] == 'ad':
pass # do nothing, time already in years_AD
else:
print('Unrecognized time definition. Returning empty arrays!')
time = np.asarray([],dtype=np.float64)
value = np.asarray([],dtype=np.float64)
else:
print('*** WARNING *** Unexpected time definition: string has more elements than expected. Returning empty arrays!')
time = np.asarray([],dtype=np.float64)
value = np.asarray([],dtype=np.float64)
# Old code ... to handle the mishmash of time definitions ...
# if tdef == 'kaB1950' or tdef == 'KYrBP' or tdef == 'kyr BP' or tdef == 'ky BP':
# time = 1950.0 - 1000.0*time
# elif tdef == 'yb 1950' or tdef == 'yb_1950' or tdef == 'yrb_1950' or tdef == 'age_yb1950' or tdef == 'years before 1950' \
# or tdef == 'years before 1950 AD' or tdef == 'cal yr BP' or tdef == 'cal year BP' or tdef == 'yrs BP':
# time = 1950.0 - time
# elif tdef == 'yb 1989' or tdef == 'age_yb1989':
# time = 1989.0 - time
# elif tdef == 'years before 2000 AD' or tdef == 'yrs b2k' or tdef == 'yb 2k' or tdef == 'yrs b2k' or tdef == 'yrs b2k AD':
# time = 2000.0 - time
# elif tdef == 'ka b2k' or tdef == 'kyb 2k' or tdef == 'kyb_2k' or tdef == 'ky b2k':
# time = 2000.0 - 1000.0*time
# else:
# # be careful: all else is considered years_AD
# time = time
# If subannual, average up to annual --------------------------------------------------------
years_all = [int(time[k]) for k in range(0,len(time))]
years = list(set(years_all)) # 'set' is used to get unique values in list
years.sort() # sort the list
time_annual = np.asarray(years,dtype=np.float64)
value_annual = np.empty(len(years), dtype=np.float64)
# Loop over years in dataset
for i in range(0,len(years)):
ind = [j for j, k in enumerate(years_all) if k == years[i]]
value_annual[i] = np.nanmean(value[ind],axis=0)
# Transform longitude in [0,360] domain
if lon < 0:
lon = 360 + lon
else:
print('File NOT FOUND:', filename)
# return empty arrays
id = site
lat = []
lon = []
alt = []
time = np.asarray([],dtype=np.float64)
value = np.asarray([],dtype=np.float64)
#return id, lat, lon, alt, time, value
return id, lat, lon, alt, time_annual, value_annual
| modons/LMR | load_proxy_data.py | load_proxy_data.py | py | 45,538 | python | en | code | 23 | github-code | 90 |
8219068868 | from django.db import models
from wagtail.admin.edit_handlers import StreamFieldPanel
from wagtail.core import blocks
from wagtail.core.fields import RichTextField, StreamField
from wagtail.core.models import Page
from wagtail.documents.blocks import DocumentChooserBlock
from . import blocks as test_blocks
class PageWithRichText(Page):
test_textfield = models.TextField(blank=True)
test_richtextfield = RichTextField(blank=True)
class PageWithStreamField(Page):
test_streamfield = StreamField(
[
("char_block", blocks.CharBlock(max_length=255)),
("boolean_block", blocks.BooleanBlock()),
("text_block", blocks.TextBlock()),
("email_block", blocks.EmailBlock()),
("url_block", blocks.URLBlock()),
("rich_text_block", blocks.RichTextBlock()),
("raw_html_block", blocks.RawHTMLBlock()),
("blockquote_block", blocks.BlockQuoteBlock()),
("struct_block", test_blocks.RegularStructBlock()),
("rich_text_struct_block", test_blocks.RichTextStructBlock()),
("nested_struct_block", test_blocks.NestedStructBlock()),
("list_block", blocks.ListBlock(blocks.TextBlock())),
(
"list_block_with_struct_block",
blocks.ListBlock(test_blocks.RegularStructBlock()),
),
("stream_block", test_blocks.RegularStreamBlock()),
("nested_streamblock", test_blocks.NestedStreamBlock()),
("stream_block_with_struct_block", test_blocks.StreamWithStructBlock()),
("document_block", DocumentChooserBlock()),
],
blank=True,
)
content_panels = Page.content_panels + [StreamFieldPanel("test_streamfield")]
| fourdigits/wagtail-xliff-translation | test_app/models.py | models.py | py | 1,770 | python | en | code | 11 | github-code | 90 |
34325876554 | import pygame as pg
pg.init()
windowSize = [800,600]
screen = pg.display.set_mode(windowSize)
bard = pg.image.load('Bard_Render.png')
dcap = pg.image.load('dcap.png')
x = 0
y = 0
screen.blit(bard,(x+10,y+50))
screen.blit(dcap,(x-120,y-170))
done = False
while not done:
for event in pg.event.get():
if event.type == pg.QUIT:
done = True
pg.display.flip()
pg.quit()
| MatthewRandell/The-Devils-Trap | layeredImages.py | layeredImages.py | py | 428 | python | en | code | 0 | github-code | 90 |
37378216789 | from typing import Iterable
def flatten(xs):
"""https://stackoverflow.com/a/2158532
"""
for x in xs:
if isinstance(x, Iterable) and not isinstance(x, (str, bytes)):
yield from flatten(x)
else:
yield x
| janchaloupka/web-scraper-nabidek-pronajmu | src/utils.py | utils.py | py | 254 | python | en | code | 25 | github-code | 90 |
5908474459 | # Problem description:
# https://github.com/HackBulgaria/Programming0-1/tree/master/week2/3-Simple-Algorithms
n = int(input('Enter n: '))
divisors = []
for i in range(1, n):
if n % i == 0:
divisors += [i]
sum_divisors = sum(divisors)
if sum_divisors == n:
print(f'{n} is a perfect number!')
else:
print(f'Sorry! {n} is not a perfect number.')
| keremidarski/python_playground | Programming 0/week 2/12_is_perfect.py | 12_is_perfect.py | py | 385 | python | en | code | 0 | github-code | 90 |
36845583340 | import numpy as np
import pandas as pd
import glob
import re
import os
from scipy import stats
import statsmodels.formula.api as smf
import statsmodels.api as sm
from sklearn import preprocessing,pipeline,linear_model,model_selection,metrics,multiclass,inspection
import pylab as plt
import seaborn as sns
from statannot import add_stat_annotation
import missingno as msn
from lifelines.plotting import plot_lifetimes
from sksurv.datasets import load_veterans_lung_cancer
from sksurv.nonparametric import kaplan_meier_estimator
from sksurv.preprocessing import OneHotEncoder
from sksurv.linear_model import CoxPHSurvivalAnalysis, CoxnetSurvivalAnalysis
from sksurv.metrics import concordance_index_censored,cumulative_dynamic_auc
from sksurv import ensemble
import pickle
import joblib
import sys
sys.path.insert(1,'/scratch/c.c21013066/software/biobankAccelerometerAnalysis/accelerometer')
sys.path.insert(1,'/scratch/c.c21013066/UKBIOBANK_DataPreparation/phenotypes')
sys.path.insert(1,'/scratch/c.c21013066/Paper/ProdromalUKBB/analyses/3_logistic_regression')
import utils
import plots
import load_modalities
import datetime
import yaml
img_path = '/scratch/c.c21013066/images/ukbiobank/accelerometer/models'
model_path = '/scratch/c.c21013066/data/ukbiobank/analyses/survival/prodromal/noOsteo'
data_path = '/scratch/c.c21013066/data/ukbiobank'
covs,allfeatures,allfeatures_scale,blood,blood_scale,lifestyle,lifestyle_scale,genetics,genetics_scale,prod,prod_acc = load_modalities.load_features(f'{data_path}')
models =['diag_ProdPopulationNoPD']
fnames = ['intercept','covariates',
'genetics+family','lifestyle_nofam','blood','acc','all_acc_features','prodromalsigns_beforePD',
'genetics+family+all_acc_features','lifestyle+all_acc_features','blood+all_acc_features','prodromalsigns_beforePD+all_acc_features',
'all_acc_features+blood+lifestyle+genetics+prodromalsigns_beforePD','prodromalsigns_beforeacc','prodromalsigns_beforeacc+all_acc_features',
'all_acc_features+blood+lifestyle+genetics+prodromalsigns_beforeacc']
cols = [['Intercept'],np.hstack(['Intercept',covs]),
np.hstack([covs,genetics,'Intercept']),np.hstack([covs,lifestyle,'Intercept']),
np.hstack([covs,blood,'Intercept']),np.hstack(['No_wear_time_bias_adjusted_average_acceleration',covs,'Intercept']),
np.hstack([allfeatures,'Intercept']),np.hstack([covs,prod,'Intercept']),
np.hstack([genetics,allfeatures,'Intercept']),np.hstack([lifestyle,allfeatures,'Intercept']),
np.hstack([blood,allfeatures,'Intercept']),np.hstack([prod,allfeatures,'Intercept']),np.hstack([allfeatures,blood,lifestyle,genetics,prod,'Intercept']),
np.hstack([covs,prod_acc,'Intercept']),np.hstack([prod_acc,allfeatures,'Intercept']),np.hstack([allfeatures,blood,lifestyle,genetics,prod_acc,'Intercept'])]
scale_cols = [[],covs[:1],
np.hstack([covs[:1],genetics_scale]),
np.hstack([covs[:1],lifestyle_scale]),np.hstack([covs[:1],blood_scale]),
np.hstack(['No_wear_time_bias_adjusted_average_acceleration',covs[:1]]),allfeatures_scale,
covs[:1],
np.hstack([genetics_scale,allfeatures_scale]),np.hstack([lifestyle_scale,allfeatures_scale]),
np.hstack([blood_scale,allfeatures_scale]),allfeatures_scale,np.hstack([allfeatures_scale,blood_scale,lifestyle_scale,genetics_scale]),
covs[:1],allfeatures_scale,np.hstack([allfeatures_scale,blood_scale,lifestyle_scale,genetics_scale])]
features_all = np.hstack([allfeatures,blood,lifestyle,genetics,prod,prod_acc])
for pop_kind in ['_allHC']:
if pop_kind == '_allHC':
merged = pd.read_csv(f'{data_path}/merged_data/populationNoOsteoAllHC.csv').set_index('eid')
prodage = pd.read_csv(f'{data_path}/merged_data/unaffectedNoOsteoMatchedHC.csv').set_index('eid')
merged.loc[prodage[prodage['diag_ProdHC']==1].index,'acc_time_to_diagnosis'] = prodage.loc[prodage['diag_ProdHC']==1,'acc_time_to_diagnosis'].values
else:
print('undefined condition')
break
merged.loc[merged['diag_ProdPopulationNoPD']==0,'acc_time_to_diagnosis'] = (pd.Timestamp(datetime.datetime(2021,3,1)) - pd.to_datetime(merged.loc[merged['diag_ProdPopulationNoPD']==0,'date_accelerometry']) )/ np.timedelta64(1,'Y')
merged['Intercept'] = 1
print(merged.groupby('diag_ProdPopulationNoPD')['acc_time_to_diagnosis'].agg(['min','max']))
# if pop_kind == '_allHC':
# # get population cohort which uses all available HC
# hc = pd.read_csv(f'{data_path}/phenotypes/accelerometer/allHCnoOsteo_prodromalsigns.csv',
# index_col=0)
# hc['acc_time_to_diagnosis'] = hc['ParkinsonDisease_age'] - hc['accelerometry_age']
# for name in models:
# hc[name] = 0
# hc = pd.merge(hc,score_best,right_index=True,left_index=True,how='left').rename(columns={trait:'PRS'})
# hc = pd.merge(hc,PRSs,right_index=True,left_index=True,how='left',suffixes=['_drop',''])
# hc = hc.drop(columns=hc.filter(regex="_drop").columns)
# merged = pd.concat([merged[np.hstack([models,features_all,'acc_time_to_diagnosis','Status'])],hc[np.hstack([models,features_all,'acc_time_to_diagnosis','Status'])]])
# # Only using the subset of the columns present in the original data
# merged = merged.dropna(subset=features_all,how='any',axis='rows')
# merged = merged[~merged.index.duplicated(keep='first')]
for name in models:
df = merged.dropna(subset=[name])
for features,scale,fname in zip([cols[15]],[scale_cols[15]],[fnames[15]]):
try:
os.mkdir(f'{model_path}/{fname}')
except OSError as error:
print(error)
try:
os.mkdir(f'{model_path}/{fname}/{name}{pop_kind}')
except OSError as error:
print(error)
pred = np.hstack([name,'acc_time_to_diagnosis',features])
df_r= df.loc[:,pred]
#df_r = df_r.dropna(subset=allfeatures,how='any',axis='rows')
outer_cv = model_selection.StratifiedKFold(n_splits=5, random_state=123,shuffle=True)
time_points = np.arange(2.5, 7,0.1)
cph_aucs = pd.DataFrame(index=np.arange(5),columns=['mean'])
for cv,(train_id,test_id) in enumerate(outer_cv.split(df[pred],df[name])):
X_train = df.iloc[train_id][pred]
X_test = df.iloc[test_id][pred]
y_train = df.iloc[train_id][name]
y_test = df.iloc[test_id][name]
if len(scale)>0:
scaler = preprocessing.StandardScaler().fit(X_train[scale])
X_train[scale] = scaler.transform(X_train[scale])
X_test[scale] = scaler.transform(X_test[scale])
df_dummy = X_train.copy()#pd.get_dummies(X_train ,drop_first=True)
df_dummy[name] = df_dummy[name].astype('?')
df_dummy['acc_time_to_diagnosis'] = df_dummy['acc_time_to_diagnosis'].astype('<f8')
df_dummy = df_dummy.rename(columns={name:'Status','acc_time_to_diagnosis':'Survival_in_years'})
dt=dtype=[('Status', '?'), ('Survival_in_years', '<f8')]
data_y = np.array([tuple(row) for row in df_dummy[['Status','Survival_in_years']].values], dtype=dt)
df_dummy_test = pd.get_dummies(X_test, drop_first=True)
df_dummy_test[name] = df_dummy_test[name].astype('?')
df_dummy_test['acc_time_to_diagnosis'] = df_dummy_test['acc_time_to_diagnosis'].astype('<f8')
df_dummy_test = df_dummy_test.rename(columns={name:'Status','acc_time_to_diagnosis':'Survival_in_years'})
data_y_test = np.array([tuple(row) for row in df_dummy_test[['Status','Survival_in_years']].values], dtype=dt)
rsf = ensemble.RandomSurvivalForest(n_estimators=1000,
min_samples_split=10,
min_samples_leaf=15,
n_jobs=-1,
random_state=123)
rsf.fit(df_dummy[pred[2:]], data_y)
print(time_points)
cph_risk_scores = rsf.predict(df_dummy_test[pred[2:]])
cph_auc, cph_mean_auc = cumulative_dynamic_auc(
data_y, data_y_test, cph_risk_scores,time_points
)
cph_aucs.loc[cv,time_points] = cph_auc
cph_aucs.loc[cv,'mean'] = cph_mean_auc
joblib.dump(rsf, f'{model_path}/{fname}/{name}{pop_kind}/modelrsf_CV{cv}.joblib')
pred_surv = rsf.predict_survival_function(df_dummy_test[pred[2:]],return_array=True)
np.save(f'{model_path}/{fname}/{name}{pop_kind}/rsf_testpred_CV{cv}.csv',pred_surv)
df_dummy_test.to_csv(f'{model_path}/{fname}/{name}{pop_kind}/rsftest_cv{cv}.csv')
df_dummy_test['y_risk'] = cph_risk_scores
df_dummy_test[['Status','Survival_in_years','y_risk']].to_csv(f'{model_path}/{fname}/{name}{pop_kind}/rsf_testrisk_CV{cv}.csv')
#plt.savefig('/scratch/c.c21013066/images/ukbiobank/accelerometer/models/coxphsurvival_prodhc_auc_time_test.png',bbox_inches="tight")
#result = inspection.permutation_importance(
# rsf, df_dummy_test[pred[2:]], data_y_test, n_repeats=15, random_state=123
# )
#features = pd.DataFrame(
# {k: result[k] for k in ("importances_mean", "importances_std",)},
# index=pred[2:]
# ).sort_values(by="importances_mean", ascending=False)
#features.to_csv(f'/scratch/c.c21013066/data/ukbiobank/analyses/survival/prodromal/{fname}/{name}/rsffeatures_cv{cv}.csv')
cph_aucs.to_csv(f'{model_path}/{fname}/{name}{pop_kind}/rsf_aucs_5cv.csv')
cph_aucs.to_csv(f'{model_path}/{fname}/{name}{pop_kind}/rsf_aucs_5cv.csv') | aschalkamp/UKBBprodromalPD | analyses/4_survival_model/rsf_POPmodels.py | rsf_POPmodels.py | py | 10,162 | python | en | code | 8 | github-code | 90 |
1919468004 | ## space:o(n)
## time:o(n)
class Solution:
def isPalindrome(self, s: str) -> bool:
# create two pointers left and right for the string
l,r = 0 , len(s)-1
## both pointers should be in bound
while(l<r):
## check if pointers are in bound(range) and not alphanumeric
while l < r and not self.alphaNum(s[l]):
l+=1
while l < r and not self.alphaNum(s[r]):
r-=1
## now check if first and last character are equal after converting them into lower case
if s[l].lower() != s[r].lower():
return False
## update the pointers
l,r = l+1 , r-1
return True
## now check if both pointer values
## create a function for checking alphnumeric or not
def alphaNum(self,c):
## ord method return the ascii value of the character
##check if the value lies between ascii values of uppercase , lowercase , and integers 0 to 9
return (ord('A') <= ord(c) <= ord('Z')
or ord ('a') <= ord(c) <= ord('z')
or ord('0') <= ord(c) <= ord('9'))
#solution_instance = Solution()
#result = solution_instance.isPalindrome("A man, a plan, a canal: #Panama")
#print(result) # Should print True
#
| mohdabdulrahman297/Leetcode | 0125-valid-palindrome/0125-valid-palindrome.py | 0125-valid-palindrome.py | py | 1,599 | python | en | code | 0 | github-code | 90 |
18308718100 | from pyrogram import Client, filters
from plugins.menu import keyboards
import utils
import db
@Client.on_message(filters.regex(pattern='^.*меню.*$') & filters.private & utils.check_user)
async def menu(_, message):
"""
Функция которая срабатывает когда пользователь хочет перейти в режим работы с меню
:param _: Клиент для работы с телеграмом, нам он не нужен
:param message: сообщение
:return:
"""
utils.update_username_in_db_if_not_matches(message.from_user.id, message.from_user.username)
text = f"Ты вышел в главное меню.\n" \
f"Выбери, что будем делать дальше на клавиатуре внизу экрана"
db.set_task_type(message.from_user.id, 'menu')
await message.reply_text(text, reply_markup=keyboards.menu_keyboard)
@Client.on_message(filters.command('keyboard') & filters.private & utils.check_user &
utils.task_type_filter('menu'))
async def keyboard(_, message):
"""
Возвращает юзеру клавиатуру
:param _: Клиент для работы с телеграмом, нам он не нужен
:param message: сообщение
:return:
"""
await message.reply_text('Воть ^-^', reply_markup=keyboards.menu_keyboard)
| tardigrada-agency/telegram_bot | plugins/menu/menu.py | menu.py | py | 1,437 | python | ru | code | 0 | github-code | 90 |
18114013289 | from enum import Enum
from queue import Queue
import collections
import sys
import math
BIG_NUM = 2000000000
MOD = 1000000007
EPS = 0.000000001
global n,k
global ws
def iSok(Pmax):
truck = 0
now = 0
for i in range(n):
if ws[i]>Pmax:
return False
if now+ws[i]>Pmax:
truck += 1
now = ws[i]
else:
now += ws[i]
if now > 0:
truck += 1
if truck > k:
return False
else:
return True
n,k = map(int,input().split())
now = 0
ws = [int(input()) for i in range(n)]
amin = 0
amax = sum(ws)
while amax-amin >1:
amid = (amin + amax)//2
if iSok(amid):
amax = amid
else:
amin = amid
print(amax)
| Aasthaengg/IBMdataset | Python_codes/p02270/s686057966.py | s686057966.py | py | 733 | python | en | code | 0 | github-code | 90 |
18445848999 | import sys
input = sys.stdin.readline
# A - Anti-Adjacency
N, K = map(int, input().split())
count = 0
for i in range(1, N + 1, 2):
count += 1
if count >= K:
print('YES')
else:
print('NO') | Aasthaengg/IBMdataset | Python_codes/p03129/s857292987.py | s857292987.py | py | 193 | python | en | code | 0 | github-code | 90 |
33121232686 | #this file stores standard roots for our website
from flask import Blueprint,render_template, request, flash, redirect, url_for
from flask_login import login_required, current_user
from website.models import Contract_employees, Non_contract_employees, User
from website import db
#define the file as blueprint of the application
views = Blueprint('views',__name__)
@views.route('/')
@login_required
def home():
contract_employee=Contract_employees.query.all()
non_contract_employee=Non_contract_employees.query.all()
return render_template("home.html", new_user=current_user,no_contract_info=non_contract_employee,employees_info=contract_employee, priv=current_user.priv),200
#Add a new contract employee
@views.route("/new_contract_employee",methods=['POST','GET'])
@login_required
def new_contract_employee():
if request.method=='POST':
firstname=request.form.get('firstname')
lastname=request.form.get('lastname')
email=request.form.get('email')
address=request.form.get('address')
joined=request.form.get('joined')
role=request.form.get('role')
employee=Contract_employees(firstname=firstname,
lastname=lastname,
email=email,
address=address,
joined=joined,
role=role)
db.session.add(employee)
db.session.commit()
flash('Employee has been added')
return redirect(url_for("views.home"))
return render_template("new_contract_employee.html",new_user=current_user)
@views.route("/new_no_contract_employee",methods=['POST','GET'])
@login_required
def new_no_contract_employee():
if request.method=='POST':
firstname=request.form['firstname']
lastname=request.form['lastname']
email=request.form['email']
contact=request.form['contact']
role=request.form['role']
employee=Non_contract_employees(firstname=firstname,
lastname=lastname,
email=email,
contact=contact,
role=role)
db.session.add(employee)
db.session.commit()
flash('No contract employee has been added')
return redirect(url_for("views.home"))
return render_template("new_no_contract_employee.html", new_user=current_user)
@views.route("/edit_employee/<string:id>",methods=['POST','GET'])
@login_required
def edit_employee(id):
employee=Contract_employees.query.get_or_404(id)
if request.method=='POST':
employee.firstname=request.form.get('Firstname')
employee.lastname=request.form.get('Lastname')
employee.email=request.form.get('Email')
employee.address=request.form.get('Address')
employee.joined=request.form.get('Joined')
employee.role=request.form.get('Role')
db.session.add(employee)
db.session.commit()
flash('Employee has been updated')
return redirect(url_for("views.home"))
return render_template("edit_employee.html",employees_info=employee, new_user=current_user)
#Edit non-contract employee
@views.route("/edit_no_contract_employee/<string:id>",methods=['POST','GET'])
@login_required
def edit_no_contract_employee(id):
employee=Non_contract_employees.query.get_or_404(id)
if request.method=='POST':
employee.firstname=request.form.get('Firstname')
employee.lastname=request.form.get('Lastname')
employee.email=request.form.get('Email')
employee.contact=request.form.get('Contact')
employee.role=request.form.get('Role')
db.session.commit()
flash('No contract employee has been updated')
return redirect(url_for("views.home"))
return render_template("edit_no_contract_employee.html",no_contract_info=employee, new_user=current_user)
#Delete a contract emoployee
@views.route("/delete_employee/<int:id>",methods=['GET','POST'])
@login_required
def delete_employee(id):
if current_user.priv=="1":
flash('You are not permitted to execute this action')
return redirect(url_for("views.home", new_user=current_user))
else:
employee=Contract_employees.query.get_or_404(id)
db.session.delete(employee)
db.session.commit()
flash('Employee deleted')
return redirect(url_for("views.home", new_user=current_user))
#Delete a non-contract emoployee
@views.route("/delete_no_contract_employee/<int:id>",methods=['GET','POST'])
@login_required
def delete_no_contract_employee(id):
if current_user.priv=="1":
flash('You are not permitted to execute this action')
return redirect(url_for("views.home", new_user=current_user))
else:
employee=Non_contract_employees.query.get(id)
db.session.delete(employee)
db.session.commit()
flash('Non-contract employee deleted')
return redirect(url_for("views.home", new_user=current_user))
| leannesal/flask-app | src/website/views.py | views.py | py | 5,120 | python | en | code | 0 | github-code | 90 |
14607662691 | import pickle
from pandas import DataFrame
import numpy as np
import pandas as pd
import streamlit as st
import xgboost as xgb
from xgboost import XGBClassifier,XGBRegressor
from streamlit import beta_columns
from PIL import Image
import streamlit as st
#from sklearn.externals import joblib
import sqlite3
import pyodbc
import os
from streamlit import caching
import shutil
import datetime
from pathlib import Path
import joblib
#############################################################
#############################################################
conn2 = pyodbc.connect('Driver={ODBC Driver 17 for SQL Server};'
'Server= VAIO;'
'Database=Blast;'
'Trusted_Connection=yes;')
c2 = conn2.cursor()
conn = sqlite3.connect('data.db')
c = conn.cursor()
# Security
#passlib,hashlib,bcrypt,scrypt
import hashlib
def make_hashes(password):
return hashlib.sha256(str.encode(password)).hexdigest()
def check_hashes(password,hashed_text):
if make_hashes(password) == hashed_text:
return hashed_text
return False
#################################################
#def create_table():
#c2.execute('CREATE TABLE IF NOT EXISTS Blast(BBN TEXT,RT TEXT,HD INTEGER,LF INTEGER,LM INTEGER,WM INTEGER,WL INTEGER,RLW INTEGER,FE INTEGER)')
def add_data(BBN,sdt,edt,opl,opo,opd,opk,bot,bet,tyde,exdr,RT,Pos,HD,RLW,BO,B,SO,S,ST,SC,QT,QP,HN,ET,Q,PT,PQ,ISS,PSN,AL,TD,VB,WB,SP,SD,BU,FE,MU,OV,TF,Di,AEmulan,DRCT,DRCM,DRCB,FDT,FDM,FDB,OFT,OFM,OFB,RTT,RTM,RTB,LF,LM,LL,WF,WM,WL,QAnfo,QAzar,QEmulan,twbooster,thbooster,Abooster,deli,delh,delr,exwa,patt,tyci):
c2.execute('INSERT INTO bdata2(BBN,sdt,edt,opl,opo,opd,opk,bot,bet,tyde,exdr,RT,Pos,HD,RLW,BO,B,SO,S,ST,SC,QT,QP,HN,ET,Q,PT,PQ,ISS,PSN,AL,TD,VB,WB,SP,SD,BU,FE,MU,OV,TF,Di,AEmulan,DRCT,DRCM,DRCB,FDT,FDM,FDB,OFT,OFM,OFB,RTT,RTM,RTB,LF,LM,LL,WF,WM,WL,QAnfo,QAzar,QEmulan,twbooster,thbooster,Abooster,deli,delh,delr,exwa,patt,tyci) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',(BBN,sdt,edt,opl,opo,opd,opk,bot,bet,tyde,exdr,RT,Pos,HD,RLW,BO,B,SO,S,ST,SC,QT,QP,HN,ET,Q,PT,PQ,ISS,PSN,AL,TD,VB,WB,SP,SD,BU,FE,MU,OV,TF,Di,AEmulan,DRCT,DRCM,DRCB,FDT,FDM,FDB,OFT,OFM,OFB,RTT,RTM,RTB,LF,LM,LL,WF,WM,WL,QAnfo,QAzar,QEmulan,twbooster,thbooster,Abooster,deli,delh,delr,exwa,patt,tyci))
conn2.commit()
def add_path(p):
c2.execute('INSERT INTO bdata2(image) VALUES (?)',(p))
conn2.commit()
#############################################
def create_usertable():
c.execute('CREATE TABLE IF NOT EXISTS userstable(username TEXT,password TEXT)')
def add_userdata(username,password):
c.execute('INSERT INTO userstable(username,password) VALUES (?,?)',(username,password))
conn.commit()
def login_user(username,password):
c.execute('SELECT * FROM userstable WHERE username =? AND password = ?',(username,password))
data = c.fetchall()
return data
def view_all_users():
c.execute('SELECT * FROM userstable')
data = c.fetchall()
return data
#def main():
st.markdown('<style>body{background-color: rgb(159, 177, 188);}</style>',unsafe_allow_html=True)
#st.markdown('<html><style>div{background-image:linear-gradient(to right,red,blue);}</style><html>',unsafe_allow_html=True)
st.markdown("""<style>.css-1aumxhk {
background-color:rgb(110, 136, 151);
background-image: none;
color: #ffffff
}</style>""", unsafe_allow_html=True)
st.markdown("""
<style>
.big-font {
font-size:37px !important;font-family:"B mitra", serif; color:rgb(47, 82, 102);text-align: center;
}
</style><p class="big-font">سامانه پایش عملیات آتشکاری معدن سنگ آهن چغارت</p>
""", unsafe_allow_html=True)
#st.title("Record and Analyze Blast Operation Data")
menu = ["Home","Login","SignUp"]
choice = st.sidebar.selectbox("Menu",menu)
if choice == "Home":
st.markdown("""
<style>
.big-font {
font-size:37px !important;font-family:"B mitra", serif; color:rgb(47, 82, 102);text-align: center;
}
</style><p class="big-font">(امور ناریه حراست شرکت سنگ آهن مرکزی ایران)</p>
""", unsafe_allow_html=True)
#st.subheader("Explosion Monitoring Software")
pic152 = Image.open('mine.jpg')
st.image(pic152, use_column_width=True)
elif choice == "Login":
#st.subheader("Login Section")
username = st.sidebar.text_input("User Name")
password = st.sidebar.text_input("Password",type='password')
if st.sidebar.checkbox("Login"):
# if password == '12345':
create_usertable()
hashed_pswd = make_hashes(password)
result = login_user(username,check_hashes(password,hashed_pswd))
if result:
#st.success("Logged In as {}".format(username))
#st.header("Goharzamin Iron Ore Mine CIBB")
pages = ["Home", "Input Blast Data","Edit Blast Data","Prediction"]
page = st.sidebar.radio("Menu Bar", options=pages)
#st.title(page)
if page == "Input Blast Data":
#st.subheader("متغیرها را وارد کنید")
st.markdown("""
<style>
.big-font {
font-size:40px !important;font-family:"B mitra", serif; color: rgb(47, 82, 102);text-align: center;
}
</style><p class="big-font">فرم ورودی داده ها</p>
""", unsafe_allow_html=True)
col64, col65,col66=st.beta_columns(3)
with col64:
BBN = st.text_input('شماره بلوک انفجاری')
with col65:
today = datetime.date.today()
sdt = st.date_input("تاریخ شروع حفاری",value=today)
with col66:
edt = st.date_input("تاریخ اتمام حفاری")
col67, col68,col69,col70=st.beta_columns(4)
with col67:
opl = st.text_input('نماینده پیمانکار')
with col68:
opo = st.text_input("نماینده کارفرما")
with col69:
opd = st.text_input("نماینده دستگاه نظارت")
with col70:
opk = st.text_input("سرپرست استخراج")
col67, col68,col69,col70=st.beta_columns(4)
with col67:
bot = st.number_input('تراز فعلی بلوک')
with col68:
bet = st.number_input("تراز نهایی بلوک")
with col69:
tyde = st.text_input("کد و نوع دستگاه حفاری")
with col70:
exdr = st.number_input("اضافه حفاری")
col1, col2,col3,col4 =st.beta_columns(4)
with col1:
STONE=['Magnetite', 'Hematite', 'Soil', 'Waste Rock', 'Cong-Waste Rock',
'Conglomerate', 'magn-Waste Rock', 'Soil-Cong', 'Soil-Waste Rock',
'So-Co-Waste Rock']
RT=st.selectbox('نوع سنگ',STONE)
with col2:
Posi=['شمال', 'جنوب', 'غرب', 'شرق', 'شمال غرب',
'شمال شرق', 'جنوب غرب', 'جنوب شرق']
Pos = st.selectbox('موقعیت:',Posi)
with col3:
HD=st.selectbox('قطر چال',(8.5,10.5,6.5,7.5))
with col4:
RLW = st.number_input("نسبت طول به عرض بلوک")
col5, col6,col7,col8 =st.beta_columns(4)
with col5:
BO = st.number_input("بردن (بار سنگ) طراحی")
with col6:
B = st.number_input("بردن عملیاتی")
with col7:
SO = st.number_input("اسپیسینگ طراحی")
with col8:
S = st.number_input("اسپیسینگ عملیاتی")
col9, col10,col11,col12 =st.beta_columns(4)
with col9:
ST = st.number_input("طول انسداد ", step= 0.1)
with col10:
SC = st.number_input("خرج ویژه")
with col11:
QT = st.number_input("مواد منفجره مصرفی در بلوک ",step=1.0)
with col12:
QP = st.number_input("پرایمر مصرفی در بلوک ")
col13, col14,col15,col16 =st.beta_columns(4)
with col13:
HN = st.number_input("تعداد چال در بلوک",step=1.0)
with col14:
EETT=['Heavy ANFO', 'ANFO/AZAR', 'ANFO/EMULLAN', 'EMULAN', 'ANFO']
ET = st.selectbox("نوع ماده منفجره اصلی",EETT)
with col15:
Q = st.number_input("مقدار ماده منفجره در یک چال")
with col16:
PPTT =['EMULAN 30mm', 'EMULAN 35mm', 'EMULAN 40mm', 'EMULAN 65mm',
'EMULAN 90mm','booster Ib','booster(1/2)b','booster 2P','booster 3P']
PT = st.selectbox("نوع تقویت کننده انفجاری",PPTT)
col17, col18,col19,col20 =st.beta_columns(4)
with col17:
PQ = st.number_input("مقدار تقویت کننده در یک چال")
with col18:
IISS = ['Nonel PHC','Nonel PMS','Cortex','Nonel\Cortex']
ISS = st.selectbox("سیستم انفجاری",IISS)
with col19:
PSNN = ['Bottom','bottom\Middle']
PSN = st.selectbox("محل استقرار پرایمر",PSNN)
with col20:
AL = st.number_input("عمق میانگین چالها",step=0.1)
col21, col22,col23,col24 =st.beta_columns(4)
with col21:
TD = st.number_input("حفاری کل بلوک",step=0.1)
with col22:
VB = st.number_input("حجم سنگ بلوک")
with col23:
WB = st.number_input("تناژ سنگ بلوک")
with col24:
SP = st.number_input("پرایمر ویژه")
col25, col26,col27,col28 =st.beta_columns(4)
with col25:
SD = st.number_input("حفاری ویژه")
with col26:
BU = st.number_input("بولدر و ناکنی")
with col27:
FE = st.number_input("کارایی خردشدگی")
with col28:
MMUU = ['TYPE 1','TYPE 2','TYPE 3','TYPE 4','TYPE 5','TYPE 6','TYPE 7']
MU = st.selectbox("وضعیت کپه سنگ خرد شده",MMUU)
col29, col30,col31,col32 =st.beta_columns(4)
with col29:
OOVV = ['TYPE 1','TYPE 2','TYPE 3','TYPE 4','TYPE 5','TYPE 6','TYPE 7','TYPE 8']
OV = st.selectbox("شرایط شکستگی ناخواسته",OOVV)
with col30:
TTFF= ['TYPE 1','TYPE 2','TYPE 3','TYPE 4','TYPE 5']
TF = st.selectbox("وضعیت کف پله و پاشنه",TTFF)
with col31:
DDii = ['N','Y','M']
Di = st.selectbox("شرایط ترقیق",DDii)
with col32:
AEmulan = st.number_input("امولایت کارتریجی کل پترن(کیلوگرم)",step=1.0)
col33, col34,col35 =st.beta_columns(3)
with col33:
D1 = ['Integrated','Block','Crushed']
DRCT = st.selectbox("شرایط توده سنگی(1/3 بالایی)",D1)
with col34:
D2 = ['Integrated','Block','Crushed']
DRCM = st.selectbox("شرایط توده سنگی(1/3 میانی)",D2)
with col35:
D3 = ['Integrated','Block','Crushed']
DRCB = st.selectbox("شرایط توده سنگی(1/3 پایینی)",D3)
col36,col37,col38=st.beta_columns(3)
with col36:
D4 = ['M1','L1']
FDT = st.selectbox("فاصله داری شکستگی ها(1/3 بالایی)",D4)
with col37:
D5 = ['M1','L1']
FDM = st.selectbox("فاصله داری شکستگی ها(1/3 میانی)",D5)
with col38:
D6 = ['M1','L1']
FDB = st.selectbox("فاصله داری شکستگی ها(1/3 پایینی)",D6)
col39, col40,col41 =st.beta_columns(3)
with col39:
V1 = ['No layer','Horizontal','Sloping inwards','Sloping outwards']
OFT = st.selectbox("جهت داری شکستگی ها(1/3 بالایی)",V1)
with col40:
V2 = ['No layer','Horizontal','Sloping inwards','Sloping outwards']
OFM = st.selectbox("جهت داری شکستگی ها(1/3 میانی)",V2)
with col41:
V3 = ['No layer','Horizontal','Sloping inwards','Sloping outwards']
OFB = st.selectbox("جهت داری شکستگی ها(1/3 پایینی)",V3)
col45, col46,col47 =st.beta_columns(3)
with col45:
W1 = ['Soft Ore','Hard Ore','Soil','Soft Waste','Conglomerate']
RTT = st.selectbox("جنس بلوک انفجاری(1/3 بالایی)",W1)
with col46:
W2 = ['Soft Ore','Hard Ore','Soil','Soft Waste','Conglomerate']
RTM = st.selectbox("جنس بلوک انفجاری(1/3 میانی)",W2)
with col47:
W3 = ['Soft Ore','Hard Ore','Soil','Soft Waste','Conglomerate']
RTB = st.selectbox("جنس بلوک انفجاری(1/3 پایینی)",W3)
col48, col49,col50 =st.beta_columns(3)
with col48:
LF = st.number_input("میانگین عمق چالها-ردیف اول")
with col49:
LM = st.number_input("میانگین عمق چالها-ردیف میانی")
with col50:
LL = st.number_input("میانگین عمق چالها-ردیف آخر")
col51, col52,col53 =st.beta_columns(3)
with col51:
WF = st.number_input(" عمق آب به عمق چال-ردیف اول")
with col52:
WM = st.number_input("عمق آب به عمق چال-ردیف میانی")
with col53:
WL = st.number_input("عمق آب به عمق چال-ردیف آخر")
col54, col55,col56 =st.beta_columns(3)
with col54:
QAnfo = st.number_input("میزان آنفو مصرفی بلوک (کیلو گرم)")
with col55:
QAzar = st.number_input("میزان پودر آذر مصرفی بلوک (کیلو گرم)")
with col56:
QEmulan = st.number_input("میزان امولایت مصرفی بلوک (کیلو گرم)")
col57, col58,col59 =st.beta_columns(3)
with col57:
twbooster = st.number_input("تعداد بوستر مصرفی 2 پوندی")
with col58:
thbooster = st.number_input("تعداد بوستر مصرفی 3 پوندی")
with col59:
Abooster = st.number_input("بوستر مصرفی کل پترن (کیلو گرم)")
col71, col72,col73,=st.beta_columns(3)
with col71:
deli = st.number_input("تاخیر چاشنی درون چال")
with col72:
delh = st.number_input("تاخیر سطحی بین چالها")
with col73:
delr = st.number_input("تاخیر سطحی بین ردیفی")
col74,col75,col76=st.beta_columns(3)
with col74:
exwa = st.text_input('روش انفجار')
with col75:
patt = st.text_input("الگوی انفجار")
with col76:
tyci = st.text_input('نوع مداربندی')
pic = st.file_uploader("بارگذاری عکس بلوک انفجاری", type="jpg")
if st.button("ثبت بلوک انفجاری"):
add_data(BBN,sdt,edt,opl,opo,opd,opk,bot,bet,tyde,exdr,RT,Pos,HD,RLW,BO,B,SO,S,ST,SC,QT,QP,HN,ET,Q,PT,PQ,ISS,PSN,AL,TD,VB,WB,SP,SD,BU,FE,MU,OV,TF,Di,AEmulan,DRCT,DRCM,DRCB,FDT,FDM,FDB,OFB,OFM,OFB,RTT,RTM,RTB,LF,LM,LL,WF,WM,WL,QAnfo,QAzar,QEmulan,twbooster,thbooster,Abooster,deli,delh,delr,exwa,patt,tyci)
t = open(pic.name, "wb")
t.write(pic.read())
t.close()
b = '.jpg'
os.rename(pic.name,BBN+b)
source = BBN+b
destination = "demo"
new_path = shutil.move(source, destination)
p = os.path.abspath(source)
add_path(p)
st.success("You have successfully added Blast")
sql_query = pd.read_sql_query('SELECT * FROM Blast.dbo.bdata2',conn2)
st.write("DATABASE")
st.write(sql_query)
#st.sidebar.header("Goharzamin Iron Ore Mine CIBB")
elif page == "Home":
#st.write("شرکت بهینه راهبرد انفجار")
pic1 = Image.open('blasting-pic.jpg')
st.image(pic1, use_column_width=True)
elif page == "Prediction":
st.sidebar.header("متغیرها را وارد کنید")
def user_input_features():
LF = st.sidebar.slider("میانگین عمق چالها در ردیف اول", 0.00, 20.00, 0.00)
LM = st.sidebar.slider("میانگین عمق چالها در ردیفهای میانی", 0.00, 20.00, 0.0)
WM = st.sidebar.slider("نسبت عمق آب به عمق چال ردیفهای میانی", 0.00, 1.00, 0.00)
WL = st.sidebar.slider("نسبت عمق آب به عمق چال ردیف آخر", 0.00, 1.00, 0.00)
RLW = st.sidebar.slider("نسبت طول به عرض بلوک انفجاری", 0.00, 25.00, 0.00)
data = {
'LF':LF,
'LM':LM,
'WM':WM,
'WL':WL,
'RLW':RLW,
}
return pd.DataFrame(data, index=[0])
df = user_input_features()
# Main
st.header("***Mine to Mill Optimization Projecte***")
#st.subheader(" پیش بینی کارایی خردایش ")
st.write("\n")
st.write("\n")
st.write("متغیرهای ورودی کاربر:")
st.write(df)
with open("xgboost.pkl", "rb") as f:
mdl = joblib.load(f)
predictions = mdl.predict(df)[0]
st.write("\n")
st.write("\n")
st.subheader("پیش بینی بر اساس مدل : XgBoost")
st.write(f"The predicted Fragmentation is: {(predictions)}")
with open("xgbBU.pkl", "rb") as f:
mdl = joblib.load(f)
predbu = mdl.predict(df)[0]
st.write(f"The predicted Bulder is: {(predbu)}")
st.write("\n")
with open("xgbTF.pkl", "rb") as f:
mdl = joblib.load(f)
predTF = mdl.predict(df)[0]
st.write(f"The predicted TF is: {(predTF)}")
if predTF == "type 1":
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">عدم نیاز به تسطیح؛ نبود پاشنه؛ نبود قوزک و ناهمواری های کف</p>
""", unsafe_allow_html=True)
#st.write("عدم نیاز به تسطیح؛ نبود پاشنه؛ نبود قوزک و ناهمواری های کف")
elif predTF == "type 2":
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">نیاز به بلدوزرکاری کم؛نیاز به چکش کاری کم؛نبود پاشنه؛قوزک و ناهمواری های کم در کف پله؛کارایی خوب بارکننده جهت بارگیری از کف پله</p>
""", unsafe_allow_html=True)
#st.write('نیاز به بلدوزرکاری کم؛ - نیاز به چکش¬کاری کم؛- نبود پاشنه؛ قوزک و ناهمواری های کم در کف پله؛ - کارایی خوب بارکننده جهت بارگیری از کف پله')
elif predTF == "type 3":
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">نیاز به بلدوزرکاری کم؛ نیاز به چکش کاری متوسط؛ وجود پاشنه کوچک در برخی نقاط بلوک؛قوزک و ناهمواری های کم در کف؛ کارایی خوب بارکننده جهت بارگیری از کف</p>
""", unsafe_allow_html=True)
#st.write('نیاز به بلدوزرکاری کم؛ نیاز به چکش کاری متوسط؛ وجود پاشنه کوچک در برخی نقاط بلوک؛قوزک و ناهمواری¬های کم در کف؛ کارایی خوب بارکننده جهت بارگیری از کف')
elif predTF == 'type 4':
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">بلدوزرکاری متوسط؛نیاز به چکش کاری زیاد؛وجود پاشنه در برخی از نقاط؛وجود قوزک کم و ناهمواری کم زمین؛کارایی متوسط بارکننده جهت بارگیری از کف</p>
""", unsafe_allow_html=True)
#st.write('بلدوزرکاری متوسط؛نیاز به چکش کاری زیاد؛وجود پاشنه در برخی از نقاط؛وجود قوزک کم و ناهمواری کم زمین؛کارایی متوسط بارکننده جهت بارگیری از کف')
else:
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">نیاز به بلدوزرکاری متوسط؛نیاز به چکش کاری زیاد؛وجود قوزک زیاد و شرایط ناهموار زمین؛وجود پاشنه نیازمند حفاری مجدد و کارایی بد بارکننده جهت بارگیری از کف</p>
""", unsafe_allow_html=True)
#t.write("نیاز به بلدوزرکاری متوسط؛نیاز به چکش کاری زیاد؛وجود قوزک زیاد و شرایط ناهموار زمین؛وجود پاشنه نیازمند حفاری مجدد و کارایی بد بارکننده جهت بارگیری از کف")
with open("xgbOV.pkl", "rb") as f:
mdl = joblib.load(f)
predOV = mdl.predict(df)[0]
st.write(f"The predicted OV is: {(predOV)}")
if predOV == "type 1":
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">وجود دیواره های صاف وبا شیب مناسب (تقریبا80)-عدم نیاز به لق گیری-محل استقرار دستگاه حفاری مناسب با ایمنی بالا -نبود ترک و شکستگی سطحی بلوک -عدم نیاز به بلدوزرکاری"</p>
""", unsafe_allow_html=True)
#st.write("وجود دیواره های صاف وبا شیب مناسب (تقریبا80)-عدم نیاز به لق گیری-محل استقرار دستگاه حفاری مناسب با ایمنی بالا -نبود ترک و شکستگی سطحی بلوک -عدم نیاز به بلدوزرکاری")
elif predOV == "type 2":
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">وجود دیواره های صاف وبا شیب مناسب (تقریبا80)- نیاز به لق گیری کم دیواره-محل استقرار دستگاه حفاری مناسب با ایمنی بالا -نبود ترک و شکستگی در سطح بلوک - نیاز به بلدوزرکاری کم</p>
""", unsafe_allow_html=True)
#st.write('وجود دیواره های صاف وبا شیب مناسب (تقریبا80)- نیاز به لق گیری کم دیواره-محل استقرار دستگاه حفاری مناسب با ایمنی بالا -نبود ترک و شکستگی در سطح بلوک - نیاز به بلدوزرکاری کم')
elif predOV == "type 3":
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">وجود دیواره های صاف وبا شیب مناسب (تقریبا80)-نیاز به لق گیری متوسط دیواره-محل استقرار دستگاه حفاری نسبتا مناسب با ایمنی متوسط -نبود ترک و شکستگی در سطح بلوک- عدم نیاز به بلدوزرکاری</p>
""", unsafe_allow_html=True)
#st.write('وجود دیواره های صاف وبا شیب مناسب (تقریبا80)-نیاز به لق گیری متوسط دیواره-محل استقرار دستگاه حفاری نسبتا مناسب با ایمنی متوسط -نبود ترک و شکستگی در سطح بلوک- عدم نیاز به بلدوزرکاری')
elif predOV == 'type 4':
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">وجود دیواره های صاف وبا شیب مناسب (تقریبا80)-نیاز به لق گیری کم دیواره-محل استقرار دستگاه حفاری نسبتا مناسب با ایمنی متوسط-وجود ترک و شکستگی کم در سطح بلوک -نیاز به بلدوزرکاری کم</p>
""", unsafe_allow_html=True)
#st.write('وجود دیواره های صاف وبا شیب مناسب (تقریبا80)-نیاز به لق گیری کم دیواره-محل استقرار دستگاه حفاری نسبتا مناسب با ایمنی متوسط-وجود ترک و شکستگی کم در سطح بلوک -نیاز به بلدوزرکاری کم')
elif predOV == 'type 5':
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">وجود دیواره های صاف وبا شیب مناسب (تقریبا80)-نیاز به لق گیری متوسط دیواره-محل استقرار دستگاه حفاری نسبتا مناسب با ایمنی متوسط -وجود ترک و شکستگی کم در سطح بلوک -نیاز به بلدوزرکاری کم</p>
""", unsafe_allow_html=True)
#st.write('وجود دیواره های صاف وبا شیب مناسب (تقریبا80)-نیاز به لق گیری متوسط دیواره-محل استقرار دستگاه حفاری نسبتا مناسب با ایمنی متوسط -وجود ترک و شکستگی کم در سطح بلوک -نیاز به بلدوزرکاری کم')
elif predOV == 'type 6':
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">وجود دیواره های صاف وبا شیب مناسب (تقریبا80)-نیاز به لق گیری زیاد دیواره-محل استقرار دستگاه حفاری نا مناسب با ایمنی کم-وجود ترک و شکستگی زیاد در سطح بلوک-نیاز به بلدوزرکاری زیاد</p>
""", unsafe_allow_html=True)
#st.write('وجود دیواره های صاف وبا شیب مناسب (تقریبا80)-نیاز به لق گیری زیاد دیواره-محل استقرار دستگاه حفاری نا مناسب با ایمنی کم -وجود ترک و شکستگی زیاد در سطح بلوک-نیاز به بلدوزرکاری زیاد')
elif predOV == 'type 7':
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">محل استقرار دستگاه حفاری نسبتا مناسب با ایمنی متوسط-نبود ترک و شکستگی در سطح بلوک-عدم نیاز به بلدوزرکاری-وجود دیواره با شکم دادگی بدلیل شرایط بد، جنس زمین- نیاز به لق گیری متوسط</p>
""", unsafe_allow_html=True)
#st.write("*-محل استقرار دستگاه حفاری نسبتا مناسب با ایمنی متوسط -نبود ترک و شکستگی در سطح بلوک - عدم نیاز به بلدوزرکاری")
#st.write("-وجود دیواره با شکم دادگی بدلیل شرایط بد جنس زمین - نیاز به لق گیری متوسط")
else:
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">وجود دیواره با شیب ملایم (55 تا 70) بدلیل وجود گسل -نیاز به لق گیری زیاد-محل استقرار دستگاه حفاری نا مناسب ایمنی کم-وجود ترک و شکستگی زیاد در سطح بلوک -نیاز به بلدوزرکاری زیاد</p>
""", unsafe_allow_html=True)
#st.write("محل استقرار دستگاه حفاری نا مناسب ایمنی کم -وجود ترک و شکستگی زیاد در سطح بلوک -نیاز به بلدوزرکاری زیاد")
#st.write("وجود دیواره با شیب ملایم (55 تا 70) بدلیل وجود گسل -نیاز به لق گیری زیاد ")
with open("xgbMU.pkl", "rb") as f:
mdl = joblib.load(f)
predMU= mdl.predict(df)[0]
st.write(f"The predicted MU is: {(predMU)}")
if predMU == "type 1":
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">مساحت خیلی زیاد پاک سازی؛ایمنی مناسب بارگیری؛ استقرار آسان دستگاههای بارکننده؛ قفل شدگی ندارد؛ کارایی خوب بارکننده کوچک در باطله و خاک؛ کارایی خوب بارکننده متوسط در سنگ آهن</p>
""", unsafe_allow_html=True)
pic1 = Image.open('1.jpg')
st.image(pic1, use_column_width=True)
#st.write('مساحت خیلی زیاد پاک سازی؛ایمنی مناسب بارگیری؛ استقرار آسان دستگاههای بارکننده؛ قفل شدگی ندارد؛ کارایی خوب بارکننده کوچک در باطله و خاک؛ کارایی خوب بارکننده متوسط در سنگ آهن')
elif predMU =="type 2":
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">مساحت کم پاک سازی؛ ایمنی متوسط بارگیری؛ استقرار سخت دستگاه بارکننده؛ احتمال قفل شدگی زیاد بار؛ نیاز به بلدوزر برای کم کردن ارتفاع بار (پایین نشاندن ارتفاع بار)؛ کارایی بد بارکننده ها؛</p>
""", unsafe_allow_html=True)
pic2 == Image.open('2.jpg')
st.image(pic2, use_column_width=True)
#st.write('مساحت کم پاک سازی؛ ایمنی متوسط بارگیری؛ استقرار سخت دستگاه بارکننده؛ احتمال قفل شدگی زیاد بار؛ نیاز به بلدوزر برای کم کردن ارتفاع بار (پایین نشاندن ارتفاع بار)؛ کارایی بد بارکننده ها؛ ')
elif predMU == "type 3":
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">استقرار سخت دستگاه بارکننده؛ احتمال قفل¬شدگی زیاد بار؛ نیاز به بلدوزر برای کم کردن ارتفاع بار (پایین نشاندن ارتفاع بار)؛ کارایی بد بارکننده ها؛- مساحت کم پاک سازی؛ ایمنی بد بارگیری؛</p>
""", unsafe_allow_html=True)
pic3 = Image.open('3.jpg')
st.image(pic3, use_column_width=True)
#st.write('استقرار سخت دستگاه بارکننده؛ احتمال قفل¬شدگی زیاد بار؛ نیاز به بلدوزر برای کم کردن ارتفاع بار (پایین نشاندن ارتفاع بار)؛ کارایی بد بارکننده ها؛- مساحت کم پاک سازی؛ ایمنی بد بارگیری؛')
elif predMU == 'type 4':
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">مساحت زیاد پاک سازی؛ ایمنی خوب بارگیری؛استقرار آسان دستگاه بارکننده؛ قفل شدگی ندارد؛ کارایی خوب بارکننده؛</p>
""", unsafe_allow_html=True)
pic4 = Image.open('4.jpg')
st.image(pic4, use_column_width=True)
#st.write('مساحت زیاد پاک سازی؛ ایمنی خوب بارگیری؛استقرار آسان دستگاه بارکننده؛ قفل شدگی ندارد؛ کارایی خوب بارکننده متوسط؛ ')
elif predMU == 'type 5':
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">مساحت متوسط پاک سازی؛ایمنی خوب بارگیری؛استقرار نسبتا آسان دستگاه بارکننده؛احتمال قفل شدگی متوسط بار؛کارایی خوب بارکننده ؛ </p>
""", unsafe_allow_html=True)
pic4 = Image.open('5.jpg')
st.image(pic4, use_column_width=True)
#st.write('مساحت متوسط پاک سازی؛ - ایمنی خوب بارگیری؛ - استقرار نسبتا آسان دستگاه بارکننده؛ - احتمال قفل شدگی متوسط بار؛ - کارایی خوب بارکننده متوسط؛ ')
elif predMU == 'type 6':
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">مساحت متوسط پاک سازی؛ایمنی متوسط بارگیری؛ استقرار نسبتا آسان دستگاه بارکننده؛احتمال قفل¬شدگی متوسط بار؛ نیاز به بلدوزر برای کم کردن ارتفاع بار(پایین نشاندن ارتفاع بار) ؛کارایی متوسط بارکننده؛</p>
""", unsafe_allow_html=True)
pic4 = Image.open('6.jpg')
st.image(pic4, use_column_width=True)
#st.write('مساحت متوسط پاک سازی؛ ایمنی متوسط بارگیری؛ استقرار نسبتا آسان دستگاه بارکننده؛ احتمال قفل¬شدگی متوسط بار؛ نیاز به بلدوزر برای کم کردن ارتفاع بار(پایین نشاندن ارتفاع بار) ؛کارایی متوسط بارکننده متوسط؛')
else:
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">مساحت زیاد پاک سازی؛ایمنی خوب بارگیری؛استقرار آسان دستگاه بارکننده؛احتمال قفل شدگی کم بار؛کارایی خوب بارکننده؛</p>
""", unsafe_allow_html=True)
pic5 = Image.open('7.jpg')
st.image(pic5, use_column_width=True)
#st.write('مساحت زیاد پاک سازی؛ایمنی خوب بارگیری؛استقرار آسان دستگاه بارکننده؛ احتمال قفل شدگی کم بار؛کارایی خوب بارکننده متوسط؛')
elif page == "Edit Blast Data":
st.markdown("""
<style>
.big-font {
font-size:25px !important;font-family:"B mitra", serif;
}
</style> <p class="big-font">اصلاح و ویرایش داده ها</p>
""", unsafe_allow_html=True)
#St.write("Edit Blast Data")
#BBNe = st.text_input('شماره بلوک انفجاری')
var=['RT','BBN','HD','LF','LM','WM','WL','RLW','FE']
kk=st.selectbox('متغیر را برای تغییر مقدار انتخاب کنید:',var)
if kk == 'RT':
def UP_DATE(i,j):
c2.execute('UPDATE bdata2 SET RT =? WHERE BBN =? ',(i,j))
conn2.commit()
m = st.text_input(':شماره بلوک انفجاری')
STONE=['Magnetite', 'Hematite', 'Soil', 'Waste Rock', 'Cong-Waste Rock',
'Conglomerate', 'magn-Waste Rock', 'Soil-Cong', 'Soil-Waste Rock',
'So-Co-Waste Rock']
k = st.selectbox('New Rock Type:',STONE)
if st.button("UPDATE row"):
UP_DATE(k,m)
st.success("You have successfully Update Blast")
script =("""SELECT * FROM Blast.dbo.bdata2""")
c2.execute(script)
ql_query = pd.read_sql_query(script,conn2)
st.write(ql_query)
elif kk == 'HD':
def UP_DATE(i,j):
c2.execute('UPDATE bdata2 SET HD =? WHERE BBN =? ',(i,j))
conn2.commit()
m = st.text_input(':شماره بلوک انفجاری')
k = st.radio('New Hole Diameter:',( 8.5, 10.5, 6.5, 7.5))
UP_DATE(k,m)
elif kk == 'LF':
def UP_DATE(i,j):
c2.execute('UPDATE bdata2 SET LF =? WHERE BBN =? ',(i,j))
conn2.commit()
m = st.text_input(':شماره بلوک انفجاری')
k = st.slider("New LF", 0, 20, 0)
UP_DATE(k,m)
elif kk == 'LM':
def UP_DATE(i,j):
c2.execute('UPDATE bdata2 SET LM =? WHERE BBN =? ',(i,j))
conn2.commit()
m = st.text_input(':شماره بلوک انفجاری')
k = st.slider("New LM", 0, 20, 0)
UP_DATE(k,m)
elif kk == 'WM':
def UP_DATE(i,j):
c2.execute('UPDATE bdata2 SET WM =? WHERE BBN =? ',(i,j))
conn2.commit()
m = st.text_input(':شماره بلوک انفجاری')
k = st.slider("New WM", 0.0, 1.0, 0.0)
UP_DATE(k,m)
elif kk == 'WL':
def UP_DATE(i,j):
c2.execute('UPDATE bdata2 SET WL =? WHERE BBN =? ',(i,j))
conn2.commit()
m = st.text_input(':شماره بلوک انفجاری')
k = st.slider("New WL", 0.0, 1.0, 0.0)
UP_DATE(k,m)
elif kk == 'RLW':
def UP_DATE(i,j):
c2.execute('UPDATE bdata2 SET RLW =? WHERE BBN =? ',(i,j))
conn2.commit()
m = st.text_input(':شماره بلوک انفجاری')
k = st.slider("New RLW", 0, 25, 0)
UP_DATE(k,m)
elif kk == 'FE':
def UP_DATE(i,j):
c2.execute('UPDATE bdata2 SET FE =? WHERE BBN =? ',(i,j))
conn2.commit()
m = st.text_input(':شماره بلوک انفجاری')
k = st.slider("New FE", 0.0, 1.0, 0.0)
UP_DATE(k,m)
elif kk == 'BBN':
def UP_DATE(i,j):
c2.execute('UPDATE bdata2 SET BBN =? WHERE id =? ',(i,j))
conn2.commit()
m = st.text_input(':Block ID')
k = st.text_input(':شماره جدید بلوک انفجاری')
UP_DATE(k,m)
else:
st.warning("Incorrect Username/Password")
elif choice == "SignUp":
st.subheader("Create New Account")
new_user = st.text_input("Username")
new_password = st.text_input("Password",type='password')
if st.button("Signup"):
create_usertable()
add_userdata(new_user,make_hashes(new_password))
st.success("You have successfully created a valid Account")
st.info("Go to Login Menu to login")
#if __name__ == '__main__':
#main()
#############################################################
#############################################################
| Mohammadseif/st | stcopy5.py | stcopy5.py | py | 47,797 | python | fa | code | 0 | github-code | 90 |
36423552004 | """
ПРИМЕЧАНИЕ: Мне пришлось удалить строчки database, т.к при подключении
к локальному серверу MySQL, программа не могла найти базу данных под именем.
Необходимо было в UI MySQL Workbench создавать вручную базу данных (SCHEMAS).
Поэтому, код ниже подключается к серверу, создавая базу данных на нем,
и после коннектится к созданной БД. Так же добавил вывод запроса в GUI Dearpygui.
При запуске необходимо проверить пароль в конфиге!
"""
# Модуль tabulate преображает возвращаемый запрос в соответствующую табличку(консоль)
import pymysql
import dearpygui.dearpygui as dpg
from tabulate import tabulate
from config import host, password, user
def tabulateQuery(rows, transform, width, height):
# Формирование заголовков таблицы (названия столбцов)
headers = rows[0].keys() if rows else []
# Преобразование списка словарей в список списков
rows = [list(row.values()) for row in rows]
# Форматирование результатов в виде таблицы
table = tabulate(rows, headers, tablefmt="grid")
with dpg.window(tag=transform, label=transform, width=400, height=200):
with dpg.table(header_row=True):
for header in headers:
dpg.add_table_column(label=header)
for row in rows:
with dpg.table_row():
for value in row:
dpg.add_text(str(value))
dpg.set_item_pos(transform, (width, height))
print(table)
def main():
try:
connection = pymysql.connect(
host=host,
port=3306,
user=user,
password=password,
cursorclass=pymysql.cursors.DictCursor
)
print("Connection to server ...OK")
try:
dpg.create_context()
cursor = connection.cursor()
# Создаем базу данных и подключаемся к ней
cursor.execute("CREATE DATABASE IF NOT EXISTS task_1;")
connection.commit()
cursor.execute("USE task_1;")
print("DB task_1 created ...OK")
# Удаляем табличку, если она была ранее создана
drom_query = "DROP TABLE IF EXISTS sales;"
cursor.execute(drom_query)
# Создаем таблицу
creat_query = "CREATE TABLE IF NOT EXISTS sales" \
"(id INT PRIMARY KEY AUTO_INCREMENT," \
"order_date DATE NOT NULL," \
"count_product INT);"
cursor.execute(creat_query)
print("Added table 'SALES' ...OK")
# Добавляем информацию в таблицу
insert_query = "INSERT sales(order_date, count_product) VALUES" \
"('2022-01-01', 156)," \
"('2022-01-02', 180)," \
"('2022-01-03', 21)," \
"('2022-01-04', 124)," \
"('2022-01-05', 341);"
cursor.execute(insert_query)
connection.commit()
print("Information added to table 'SALES'...OK")
# Вывод данных таблицы
print("\nТаблица 'SALES':")
cursor.execute("SELECT * FROM sales")
rows = cursor.fetchall()
tabulateQuery(rows, "SALES", 0, 0)
# Выборка по типу заказа
print("\nВыборка по типу заказа:")
cursor.execute("SELECT id AS 'id order'," \
"CASE WHEN count_product < 100 THEN 'small order'" \
"WHEN count_product > 100 AND count_product < 300 THEN 'avarage order'" \
"WHEN count_product > 300 THEN 'big order'" \
"ELSE 'error'" \
"END AS 'order type'" \
"FROM sales;")
rows = cursor.fetchall()
tabulateQuery(rows, "Query from SALES", 400, 0)
drom_query = "DROP TABLE IF EXISTS orders;"
cursor.execute(drom_query)
creat_query = "CREATE TABLE IF NOT EXISTS orders" \
"(id INT PRIMARY KEY AUTO_INCREMENT," \
"employee_id VARCHAR(45) NOT NULL," \
"amount FLOAT NOT NULL," \
"order_status VARCHAR(45) NOT NULL);"
cursor.execute(creat_query)
print("Added table 'ORDERS' ...OK")
insert_query = "INSERT orders(employee_id, amount, order_status) VALUES" \
"('e03', 15.00, 'OPEN')," \
"('e01', 25.50, 'OPEN')," \
"('e05', 100.70, 'CLOSED')," \
"('e02', 22.18, 'OPEN')," \
"('e04', 9.50, 'CANCELLED');"
cursor.execute(insert_query)
connection.commit()
print("Information added to table 'ORDERS'...OK")
print("\nТаблица 'ORDERS':")
cursor.execute("SELECT * FROM orders")
rows = cursor.fetchall()
tabulateQuery(rows, "ORDERS", 0, 200)
print("\nВыборка по полному статусу заказа:")
cursor.execute("SELECT id," \
"CASE WHEN order_status = 'OPEN' THEN 'Order is in open state'" \
"WHEN order_status = 'CLOSED' THEN 'Order is closed'" \
"WHEN order_status = 'CANCELLED' THEN 'Order is cancelled'" \
"ELSE 'error'" \
"END AS 'full_order_status'" \
"FROM orders;")
rows = cursor.fetchall()
tabulateQuery(rows, "Query from ORDERS", 400, 200)
dpg.create_viewport(title="Custom title", width=850, height=450)
dpg.setup_dearpygui()
dpg.show_viewport()
dpg.start_dearpygui()
dpg.destroy_context()
finally:
connection.close()
except Exception as ex:
print("Connection refused")
print(ex)
if __name__ == "__main__":
main() | BeTeLGeUse101/AppsSQL | Lesson_2/main.py | main.py | py | 6,752 | python | ru | code | 0 | github-code | 90 |
2818864125 | #Version1 - Basic program with one attempt
correct_num = 6
while True:
user_guess = int(input("What is your guess? "))
if user_guess == correct_num:
print("Your Answer is correct")
break
else:
print("Sorry your guess is incorrect")
break
# Version 2
#import required packages
import random
random.randint(1, 100)
#assign variables
lower_bound = 1
upper_bound = 100
guess_counter = 0
guess_remaining = 5
#Store the randomly generated num
correct_number = random.randint(1, 100)
print(f"Guess the num between {lower_bound} and {upper_bound}. You have {guess_remaining} chances")
while True:
user_guess = int(input("Enter your guess: ")) #Ask user to guess the number
guess_counter += 1 #Increase guess counter
guess_remaining = guess_remaining-1 #Decrease number of guesses remaining
#Display relevant messages if the user guess is low, high or the correct
if lower_bound <= user_guess <= upper_bound:
if user_guess == correct_number:
print(f"Congrats! You got it in {guess_counter} guess")
break
elif user_guess < correct_number:
print(f"Your guess is too low, try again! Guess remaining: {guess_remaining}")
else:
print(f"Your guess is too high, try again! Guess remaining: {guess_remaining}")
else:
print(f"Your guess is out of range. Please select a number between {lower_bound} and {upper_bound}. Guess remaining: {guess_remaining}")
if guess_remaining == 0:
print(f"Sorry you are out of guesses. Correct number is {correct_number}")
break | manasi561/number-guessing-game | number_guess.py | number_guess.py | py | 1,717 | python | en | code | 0 | github-code | 90 |
39328851620 | #####################################################
# Property by Your Engineering Solutions (Y.E.S.) #
# Engineers: Lorans Hirmez, Brandon Fong #
#####################################################
# How to test if a file/directory exists https://www.guru99.com/python-check-if-file-exists.html & https://stackabuse.com/creating-and-deleting-directories-with-python/
# How to create a file https://www.guru99.com/reading-and-writing-files-in-python.html
# Current Workflow:
# 1. Creates file
# 2. Calls a function to write into a file
# 3. Calles a function to close the file
# I.E. Init_File() -> Inject_Data() -> Close_File()
### LIBRARIES ###
from System import Client
from zipfile import ZipFile
from XML import xmlreader
import datetime
import os
import System
import shutil
FilesXML = xmlreader();
FTPDir = FilesXML.string('DirectoryForOutboundFTPFiles');
LogForMaxPowerDir = FilesXML.string('DirectoryForMaxPowerLogFiles');
FTPArchiveDir = FilesXML.string('ArchiveForOutboundFTPFiles');
LOGArchiveDir = FilesXML.string('ArchiveForMaxPowerLogFiles');
FTPFileType = FilesXML.string('FileTypeForFTP');
LOGFileType = FilesXML.string('FileTypeForLogs');
ZipExtension = FilesXML.string('FileTypeForZippedFolder');
ZippedFTP = FilesXML.string('ZippedFTP');
ZippedLog = FilesXML.string('ZippedLog');
# Makes directory if it does not exist
def MakeDir(makepath):
if(not (os.path.isdir(makepath))):
try:
os.mkdir(makepath)
except OSError:
print("\nCreation of the directory %s failed" % makepath);
else:
print("\nSuccessfully created the directory %s " % makepath);
else:
print("\nDirectory %s already exits.\n" % makepath);
# .csv files
class File_Handler:
# Creates .csv file
def Init_File():
Date_and_Time = datetime.datetime.now();
MakeDir(FTPDir);
global filename;
global fullpath;
filename = "/maxpower_" + Date_and_Time.strftime("%m%d%Y_%H%M%S") + FTPFileType;
fullpath = FTPDir + filename;
try:
# Puts file pointer to global var in System.py
System.File = open(fullpath,"w+");
except OSError:
print("Creation of file %s failed." % fullpath);
return 1;
else:
print("Successfully created file: %s" % fullpath);
return 0;
# Writes into file
def Inject_Data(wind_data, solar_data):
Date_and_Time = datetime.datetime.now();
try:
System.File.write("{}, {}, {}, {}\n" .format(Client.ID,
Date_and_Time.strftime("%Y-%m-%d %H:%M:%S"), wind_data, solar_data));
except OSError:
print("\nWriting of file failed\n");
return 1;
except AttributeError as ex:
print("in file");
print(ex);
else:
print("\nWriting successful\n");
return 0;
def Close_File():
System.File.close();
System.File = 0; # clear variable
# .log files
class Log_Handler:
# Creates .log file
def Init_File():
Date_and_Time = datetime.datetime.now();
MakeDir(LogForMaxPowerDir);
Logfilename = LogForMaxPowerDir + "/MaxPowerLog_" + Date_and_Time.strftime("%m%d%Y_%H%M%S") + LOGFileType;
try:
# Puts file pointer to global var in System.py
System.Log = open(Logfilename,"w+");
except OSError:
print("\nCreation of file %s failed." % Logfilename);
return 1;
else:
print("\nSuccessfully created file: %s" % Logfilename);
return 0;
def Write_Log(string):
try:
System.Log.write(string);
except OSError:
print("\nWriting log failed\n");
return 1;
except AttributeError as ex:
print("in log");
print(ex);
else:
return 0;
def Close_File():
System.Log.close();
System.Log = 0; # clear variable
print("\nMaintenance check in \\logs\\MaxPower. Delete files if space is needed");
# Archive files
class Archive_Handler:
def ArchiveFiles():
MakeDir(FTPArchiveDir);
MakeDir(LOGArchiveDir);
MakeDir(ZippedFTP);
MakeDir(ZippedLog);
## MOVES ##
# In FTP Folder
FTPFiles = os.listdir(FTPDir);
for f in FTPFiles:
if f.endswith(FTPFileType):
FilePath = FTPDir + "/" + f;
shutil.move(FilePath, FTPArchiveDir);
print("\nMoved %s to archive folder\n" % f);
# In logs\MaxPower Folder
LOGFiles = os.listdir(LogForMaxPowerDir);
for f in LOGFiles:
if f.endswith(LOGFileType):
FilePath = LogForMaxPowerDir + "/" + f;
shutil.move(FilePath, LOGArchiveDir);
print("\nMoved %s to archive folder\n" % f);
## ZIPS ##
# In FTP archive Folder
FTPArchivedFiles = os.listdir(FTPArchiveDir);
if (FTPArchivedFiles.__len__()) > 10:
print("\nBeginning to zip files in %s\n" % FTPArchiveDir)
filename = ZippedFTP + "/Archive_" + datetime.datetime.now().strftime("%m%d%Y_%H%M%S") + ZipExtension;
zipper = ZipFile(filename, 'w');
for f in FTPArchivedFiles:
if f.endswith(FTPFileType):
FilePath = FTPArchiveDir + "/" + f;
zipper.write(FilePath);
os.remove(FilePath);
zipper.close();
print("\nZipped files in %s\n" % FTPArchiveDir);
# In logs\MaxPower archive Folder
LOGArchivedFiles = os.listdir(LOGArchiveDir);
if (LOGArchivedFiles.__len__()) > 10:
print("\nBeginning to zip files in %s\n" % LOGArchiveDir)
filename = ZippedLog + "/Archive_" + datetime.datetime.now().strftime("%m%d%Y_%H%M%S") + ZipExtension;
zipper = ZipFile(filename, 'w');
for f in LOGArchivedFiles:
if f.endswith(LOGFileType):
FilePath = LOGArchiveDir + "/" + f;
zipper.write(FilePath);
os.remove(FilePath);
zipper.close();
print("\nZipped files in %s\n" % LOGArchiveDir);
| Dual-Power-Generation/DualPowerGeneration | MaxPowerTracker/MaxPower/Files.py | Files.py | py | 6,391 | python | en | code | 0 | github-code | 90 |
18026238589 | a = int(input())
ar = []
for i in range(a):
l = list(map(int,input().split(" ")))
ar.append(l)
ar.reverse()
count = 0
for r in ar:
x = r[0] + count
y = r[1]
m = x % y
if m != 0:
count += y - m
print(count) | Aasthaengg/IBMdataset | Python_codes/p03821/s094717840.py | s094717840.py | py | 237 | python | en | code | 0 | github-code | 90 |
10237087886 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from argparse import ArgumentParser
import codecs
import os
import random
import tempfile
import numpy as np
from prepare_dict import load_lexicon
from compare_lexicons import compare_lexicons
def create_tmp_file_name():
basedir = os.path.join(os.path.dirname(__file__), 'data', 'tmp')
fp = tempfile.NamedTemporaryFile(mode='w', dir=basedir, delete=False)
filename = fp.name
fp.close()
return filename
def split_words_and_transcriptions_for_cv(words_and_transcriptions, cv):
assert len(words_and_transcriptions) > 0, 'List of texts is empty!'
assert cv > 0, 'Number of folds for crossvalidation must be a positive integer value!'
assert len(words_and_transcriptions) >= cv, '{0} > {1}. Number of folds for crossvalidation is too large!'.format(
cv, len(words_and_transcriptions)
)
folds = list()
fold_size = len(words_and_transcriptions) // cv
words = sorted(list(words_and_transcriptions.keys()))
random.shuffle(words)
for fold_ind in range(cv):
start_text_idx = fold_ind * fold_size
end_text_idx = (fold_ind + 1) * fold_size
words_for_training = list()
for cur_word in sorted(words[:start_text_idx] + words[end_text_idx:]):
for cur_transcription in words_and_transcriptions[cur_word]:
words_for_training.append(u'{0}\t{1}\n'.format(cur_word, cur_transcription))
words_for_testing = list()
for cur_word in sorted(words[start_text_idx:end_text_idx]):
for cur_transcription in words_and_transcriptions[cur_word]:
words_for_testing.append(u'{0}\t{1}\n'.format(cur_word, cur_transcription))
folds.append((tuple(words_for_training), tuple(words_for_testing)))
return folds
def main():
parser = ArgumentParser()
parser.add_argument('-s', '--src', dest='word_list', required=True, type=str,
help='Source file with words without their phonetical transcriptions.')
parser.add_argument('-d', '--dst', dest='destination_lexicon', type=str, required=True,
help='Destination file into which the creating phonetic transcriptions shall be written.')
parser.add_argument('-m', '--model', dest='model_dir', type=str, default=None,
required=False, help='A directory with trained model.')
parser.add_argument('-p', '--pmass', dest='pmass', type=float, required=False, default=0.85,
help='% of total probability mass constraint for transcriptions generating.')
parser.add_argument('--seed', dest='seed', type=int, required=False, default=0, help='Random seed.')
args = parser.parse_args()
src_wordlist_name = os.path.normpath(args.word_list)
assert os.path.isfile(src_wordlist_name), u'File "{0}" does not exist!'.format(src_wordlist_name)
dst_vocabulary_name = os.path.normpath(args.destination_lexicon)
dst_vocabulary_dir = os.path.dirname(dst_vocabulary_name)
assert os.path.isdir(dst_vocabulary_dir), u'Directory "{0}" does not exist!'.format(dst_vocabulary_dir)
pmass = args.pmass
assert (pmass > 0.0) and (pmass <= 1.0), u'% of total probability mass constraint is wrong!'
if args.model_dir is None:
model_dir = os.path.join(os.path.dirname(__file__), 'model')
else:
model_dir = os.path.normpath(args.model_dir)
assert os.path.isdir(model_dir), 'A directory "{0}" does not exist!'.format(model_dir)
random.seed(args.seed)
words_and_transcriptions = dict()
with codecs.open(src_wordlist_name, mode='r', encoding='utf-8') as fp:
curline = fp.readline()
while len(curline) > 0:
prepline = curline.strip().lower()
if len(prepline) > 0:
assert prepline not in words_and_transcriptions, u'{0} is duplicated!'.format(prepline)
words_and_transcriptions[prepline] = []
curline = fp.readline()
del words_
tmp_file_for_result = create_tmp_file_name()
try:
print(u'Final recognition of transcriptions for words is started...')
cmd = u'phonetisaurus-apply --model "{0}" --word_list "{1}" -p {2} -a > "{3}"'.format(
os.path.join(model_dir, 'russian_g2p.fst'), src_wordlist_name, pmass, tmp_file_for_result
)
os.system(cmd)
print(u'')
print(u'Final recognition of transcriptions for words is finished...')
predicted_phonetic_dictionary = load_lexicon(tmp_file_for_result)
finally:
if os.path.isfile(tmp_file_for_result):
os.remove(tmp_file_for_result)
for cur_word in predicted_phonetic_dictionary:
if cur_word in words_and_transcriptions:
for cur_transcription in predicted_phonetic_dictionary[cur_word]:
if cur_transcription not in words_and_transcriptions[cur_word]:
words_and_transcriptions[cur_word].append(cur_transcription)
else:
words_and_transcriptions[cur_word] = predicted_phonetic_dictionary[cur_word]
with codecs.open(dst_vocabulary_name, mode='w', encoding='utf-8', errors='ignore') as fp:
for cur_word in sorted(list(words_and_transcriptions.keys())):
fp.write(u'{0} {1}\n'.format(cur_word, words_and_transcriptions[cur_word][0]))
for ind in range(1, len(words_and_transcriptions[cur_word])):
fp.write(u'{0}({1}) {2}\n'.format(cur_word, ind + 1, words_and_transcriptions[cur_word][ind]))
if __name__ == '__main__':
main()
| nsu-ai-team/russian_g2p_neuro | apply.py | apply.py | py | 5,569 | python | en | code | 19 | github-code | 90 |
30759856537 | """
Implements all functionality of spline initialization using non-uniform rational basis splines (NURBS),
including Particle Swarm Optimization, interpolation functions and calculation of RMS-VIF from a given IC.
Pseudo-code is given in the Paper.
"""
from geomdl import NURBS
from geomdl import utilities
import numpy as np
import pyswarms as ps
import pickle
import scipy
import datetime
import Solver
import PDE_FIND_lib as Find
class sim_options:
"""
A class to store all discretization parameters for efficient handling;
For parameter definitions see the main_file
"""
def __init__(self, x_min, x_max, x_nodes, t_steps, cfl, k, dx, x_values, D, P, combinations, a, equation, acc_space, acc_time, pairwise, hotime):
self.x_min = x_min
self.x_max = x_max
self.x_nodes = x_nodes
self.t_steps = t_steps
self.cfl = cfl
self.k = k # not used currently; only here for legacy reasons
self.dx = dx
self.x_values = x_values
self.D = D
self.P = P
self.combinations = combinations
self.a = a
self.equation = equation
self.acc_space = acc_space
self.acc_time = acc_time
self.pairwise = pairwise # not used currently; only here for legacy reasons
self.hotime = hotime
class curve_options:
"""Class to store all parameters of the NURBS"""
def __init__(self, n_ctr, curve_degree, evaluation_size, x_control, delta_x_control):
self.n_ctr = n_ctr
self.curve_degree = curve_degree
self.evaluation_size = evaluation_size
self.x_control = x_control
self.delta_x_control = delta_x_control
def create_initialization(x_min, x_max, x_nodes, t_steps, cfl, equation, a, D, P, acc_space, acc_time, n_ctr=10,
curve_degree=8, evaluation_size=1000, c1=0.5, c2=0.3, w=0.9, bound_amplitude=1.,
particles=50, iterations=100, combinations=None, hotime=0):
"""
Main function to optimize a NURBS with respect to RMS-VIF to be used as IC in a simulation.
Needs all simulation parameters, because the optimization
needs to build the PDE-FIND system for each function value evaluation.
:param x_min: Left bound of Omega
:param x_max: Right bound of Omega
:param x_nodes: Number of points in domain Omega
:param t_steps: Number of time steps of simulation
:param cfl: cfl Number of IC
:param equation: String of equation that the Spline is to be optimized for
:param a: Advection speed; Only used if 'equation' == 'Advection'
:param D: Highest derivative to be included in library
:param P: Highest polynomial order to multiply u with derivative basis functions
:param acc_space: Accuracy order of finite difference stencils to be used in space
:param acc_time: Accuracy order of finite difference stencils to be used in time
:param n_ctr: Number of NURBS control points within Omega
:param curve_degree: Degree of NURBS
:param evaluation_size: Number of evaluation points of NURBS used for interpolation
:param c1: Particle swarm parameter
:param c2: Particle swarm parameter
:param w: Particle swarm parameter
:param bound_amplitude: Maximum allowed y-value of each control point
:param particles: Number of particles for particle swarm optimization
:param iterations: Number of iterations for particle swarm optimization
:param combinations: List of cumulative orders to include in the library
:param hotime: Highest order time derivative to appended to library
:return: The optimized NURBS curve and the corresponding sim_options and curve_options
"""
# initializations:
dx = (x_max - x_min) / x_nodes
x_values = np.arange(x_min, x_max, dx)
pairwise = 0 # not used currently; only here for legacy reasons
k = 0 # not used currently; only here for legacy reasons
# set positions of NURBS control points:
# make sure last control point is at x_max --> otherwise curve not defined between last ctr and x_max
delta_x_control = (x_max - x_min) / n_ctr
# f(x_max) = f(x_min) due to periodicity --> ctr-point at x_max is defined by ctr-point at x_min
x_control = np.arange(x_min, x_max, delta_x_control)
# save NURBS and Simulation Parameters for consistency:
sim_param = sim_options(x_min, x_max, x_nodes, t_steps, cfl, k, dx, x_values, D, P, combinations, a,
equation, acc_space, acc_time, pairwise, hotime)
curve_param = curve_options(n_ctr, curve_degree, evaluation_size, x_control, delta_x_control)
# set attributes for pyswarms
max_bound = bound_amplitude * np.ones(n_ctr) # maximum value that control points may exhibit
min_bound = - max_bound # minimum value that control points may exhibit
bounds = (min_bound, max_bound)
# Set-up pyswarms hyperparameters
options = {'c1': c1, 'c2': c2, 'w': w}
kwargs = {"sim_param": sim_param, "curve_param": curve_param}
# Initialize instance of Particle Swarm Optimization
optimizer = ps.single.GlobalBestPSO(n_particles=particles, dimensions=n_ctr, options=options, bounds=bounds)
# Perform optimization; returns best cost and best control point y-values (x-values are set above)
best_cost, best_pos = optimizer.optimize(whole_swarm, iters=iterations, verbose=3, print_step=5, **kwargs)
# save cost history for plotting
pickle.dump(optimizer.cost_history, open('Saved_Data/Cost_history_' + str(equation) +
'{date:%Y_%m_%d%H_%M_%S}.bin'.format(date=datetime.datetime.now()), "wb"))
# initialize curve to be used in interpolation later
curve = initialize_NURBS(curve_param, sim_param, best_pos) # initialize optimized curve
return curve, curve_param, sim_param
def whole_swarm(x, sim_param, curve_param):
"""
Wrapper method for function evaluation of the whole swarm using pyswarms.
It is a input to the pyswarms optimizer 'optimizer.optimize'.
It calls the function to be optimized 'objective', which Calculates RMS-VIF in our application, for each particle.
:param x: The swarm for the optimization; numpy.ndarray of shape (n_particles, dimensions)
:param sim_param: Instance of sim_param (needed to set up simulation)
:param curve_param: Instance of curve_param (needed to build NURBS curve)
:return: The computed loss for each particle; numpy.ndarray of shape (n_particles, )
"""
n_particles = x.shape[0]
j = [objective(x[i], sim_param, curve_param) for i in range(n_particles)]
return np.array(j)
def objective(y_objective, sim_param, curve_param):
"""
Calculates the objective function (RMS-VIF) given the control point y-values of a given particle
:param y_objective: control point y-values of the particle
:param sim_param: Instance of sim_param
:param curve_param: Instance of curve_param
:return: RMS-VIF
"""
curve = initialize_NURBS(curve_param, sim_param, y_objective) # builds NURBS from the control point y-values
R = calculate_R_from_curve(curve, sim_param) # builds the PDE-FIND system matrix from the NURBS IC
rms_vif = calculate_rms_vif(R) # Calculate RMS-VIF from the matrix R
return rms_vif
def initialize_NURBS(curve_param, sim_param, y_objective):
"""Builds NURBS from given control points"""
# initialization:
curve = NURBS.Curve()
curve.degree = curve_param.curve_degree
ctr_points = [] # assign control points according to current y_vector
# build control points from the y-values and set additional knots to enforce periodicity:
additional_nodes = curve.degree + 3 # defined by order of spline defining the width of the local support
# also needs to force symmetry for area left of x = 0., since this is also used in cubic interpolation
for i in range(additional_nodes): # set knots left of domain Omega to enforce left hand side symmetry
# add points at beginning of spline, that correspond to the right part of the spline to enforce periodicity
# needs to start from leftmost point to create a proper spline
ctr_points.append([sim_param.x_min - (additional_nodes - i) * curve_param.delta_x_control,
y_objective[-(additional_nodes - i)]])
for i in range(curve_param.n_ctr): # set control points within Omega
ctr_points.append([curve_param.x_control[i], y_objective[i]])
for i in range(additional_nodes): # set knots right of domain Omega to enforce right hand side symmetry
# add points at end of spline, that correspond to the begin of the spline to enforce periodicity
# f(x_max) = f(x_min) must be enforced
ctr_points.append([sim_param.x_max + i * curve_param.delta_x_control, y_objective[i]])
# set up the curve
curve.ctrlpts = ctr_points
curve.knotvector = utilities.generate_knot_vector(curve.degree, len(curve.ctrlpts)) # Auto-generate knot vector
curve.sample_size = curve_param.evaluation_size # Set number of evaluation points used in interpolation later
return curve
def calculate_R_from_curve(curve, sim_param):
"""Returns the linear System from PDE-FIND. It takes the curve and the simulation parameters as inputs, employs the
curve as IC to the data generating simulation and builds the system matrix from the obtained data"""
init = calculate_y_values(curve, sim_param.x_values) # interpolate spline onto the simulation grid to be used as IC
# Run the simulation
domain = Solver.solve(sim_param.x_nodes, sim_param.t_steps, init, cfl=sim_param.cfl,
x_min=sim_param.x_min, x_max=sim_param.x_max, equation=sim_param.equation, a=sim_param.a)
# buids the linear system using PDE-FIND
u_t_new, R_new, rhs_description = Find.build_linear_system_FD(domain.grid[domain.i_ghost_l + 1:domain.i_ghost_r, :],
domain.dt, domain.dx, D=sim_param.D, P=sim_param.P,
order_combinations=sim_param.combinations,
acc_space=sim_param.acc_space,
high_order_time_derivs=sim_param.hotime,
acc_time=sim_param.acc_time)
return R_new
def calculate_y_values(curve, x_values):
"""
Interpolates y_values of spline onto the x-values of the grid using cubic interpolation.
:param curve: NURBS curve as data structure
:param x_values: Grid values to interpolate spline on
:return: interpolated y-values corresponing to the grid x-values
"""
curve.evaluate() # calculates the NURBS value for each evaluation point (number defined by 'curve.sample_size')
nurbs_pointlist = curve.evalpts # list containing all points from evaluation
# build interpolation table from the NURBS evaluation points
x_p = np.zeros(len(nurbs_pointlist))
f_p = np.zeros(len(nurbs_pointlist))
for i, point in enumerate(nurbs_pointlist):
x_p[i] = point[0]
f_p[i] = point[1]
# cubic interpolation:
# more efficient than linear interpolation, because the number of evaluation points for linear interpolation is
# much larger to obtain the same interpolation accuracy. This accuracy is necessary for high order differentiablity
# at very high accuracy requirements
f = scipy.interpolate.interp1d(x_p, f_p, kind='cubic', copy=False, assume_sorted=True)
return f(x_values)
def calculate_rms_vif(X):
"""
Calculate RMS-VIF for a matrix
:param X: System matrix
:return: RMS-VIF
"""
# R^2: Coefficient of determination for each parameter as being described by the other parameters in X
r_2 = np.zeros(X.shape[1]-1)
for i in range(1, X.shape[1]):
test_column = X[:, i] # current feature for which R^2 is to be calculated
X_rest = X[:, np.arange(X.shape[1]) != i] # all columns except test column
fit, _, _, _ = np.linalg.lstsq(X_rest, test_column, rcond=None) # fit linear regression
y_fit = np.dot(X_rest, fit) # approximation to the test column by the regression of the other vectors
_, _, r, _, _ = scipy.stats.linregress(test_column, y_fit) # calculate r
r_2[i-1] = r**2 # save the coefficient of determination for the test column
vif = 1./(1-r_2) # VIF formula
rms_vif = np.sqrt(np.mean(np.multiply(vif, vif)))
return rms_vif
| tumaer/SITE | SITE/Initialization_Creator.py | Initialization_Creator.py | py | 12,643 | python | en | code | 7 | github-code | 90 |
6319745867 | # Ultralytics YOLO ЁЯЪА, AGPL-3.0 license
import ast
import contextlib
import json
import platform
import zipfile
from collections import OrderedDict, namedtuple
from pathlib import Path
import cv2
import numpy as np
import torch
import torch.nn as nn
from PIL import Image
from ultralytics.utils import ARM64, LINUX, LOGGER, ROOT, yaml_load
from ultralytics.utils.checks import check_requirements, check_suffix, check_version, check_yaml
from ultralytics.utils.downloads import attempt_download_asset, is_url
def check_class_names(names):
"""
Check class names.
Map imagenet class codes to human-readable names if required. Convert lists to dicts.
"""
if isinstance(names, list): # names is a list
names = dict(enumerate(names)) # convert to dict
if isinstance(names, dict):
# Convert 1) string keys to int, i.e. '0' to 0, and non-string values to strings, i.e. True to 'True'
names = {int(k): str(v) for k, v in names.items()}
n = len(names)
if max(names.keys()) >= n:
raise KeyError(f'{n}-class dataset requires class indices 0-{n - 1}, but you have invalid class indices '
f'{min(names.keys())}-{max(names.keys())} defined in your dataset YAML.')
if isinstance(names[0], str) and names[0].startswith('n0'): # imagenet class codes, i.e. 'n01440764'
names_map = yaml_load(ROOT / 'cfg/datasets/ImageNet.yaml')['map'] # human-readable names
names = {k: names_map[v] for k, v in names.items()}
return names
class AutoBackend(nn.Module):
"""
Handles dynamic backend selection for running inference using Ultralytics YOLO models.
The AutoBackend class is designed to provide an abstraction layer for various inference engines. It supports a wide
range of formats, each with specific naming conventions as outlined below:
Supported Formats and Naming Conventions:
| Format | File Suffix |
|-----------------------|------------------|
| PyTorch | *.pt |
| TorchScript | *.torchscript |
| ONNX Runtime | *.onnx |
| ONNX OpenCV DNN | *.onnx (dnn=True)|
| OpenVINO | *openvino_model/ |
| CoreML | *.mlpackage |
| TensorRT | *.engine |
| TensorFlow SavedModel | *_saved_model |
| TensorFlow GraphDef | *.pb |
| TensorFlow Lite | *.tflite |
| TensorFlow Edge TPU | *_edgetpu.tflite |
| PaddlePaddle | *_paddle_model |
| ncnn | *_ncnn_model |
This class offers dynamic backend switching capabilities based on the input model format, making it easier to deploy
models across various platforms.
"""
@torch.no_grad()
def __init__(self,
weights='yolov8n.pt',
device=torch.device('cpu'),
dnn=False,
data=None,
fp16=False,
fuse=True,
verbose=True):
"""
Initialize the AutoBackend for inference.
Args:
weights (str): Path to the model weights file. Defaults to 'yolov8n.pt'.
device (torch.device): Device to run the model on. Defaults to CPU.
dnn (bool): Use OpenCV DNN module for ONNX inference. Defaults to False.
data (str | Path | optional): Path to the additional data.yaml file containing class names. Optional.
fp16 (bool): Enable half-precision inference. Supported only on specific backends. Defaults to False.
fuse (bool): Fuse Conv2D + BatchNorm layers for optimization. Defaults to True.
verbose (bool): Enable verbose logging. Defaults to True.
"""
super().__init__()
w = str(weights[0] if isinstance(weights, list) else weights)
nn_module = isinstance(weights, torch.nn.Module)
pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, ncnn, triton = \
self._model_type(w)
fp16 &= pt or jit or onnx or xml or engine or nn_module or triton # FP16
nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)
stride = 32 # default stride
model, metadata = None, None
# Set device
cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA
if cuda and not any([nn_module, pt, jit, engine]): # GPU dataloader formats
device = torch.device('cpu')
cuda = False
# Download if not local
if not (pt or triton or nn_module):
w = attempt_download_asset(w)
# Load model
if nn_module: # in-memory PyTorch model
model = weights.to(device)
model = model.fuse(verbose=verbose) if fuse else model
if hasattr(model, 'kpt_shape'):
kpt_shape = model.kpt_shape # pose-only
stride = max(int(model.stride.max()), 32) # model stride
names = model.module.names if hasattr(model, 'module') else model.names # get class names
model.half() if fp16 else model.float()
self.model = model # explicitly assign for to(), cpu(), cuda(), half()
pt = True
elif pt: # PyTorch
from ultralytics.nn.tasks import attempt_load_weights
model = attempt_load_weights(weights if isinstance(weights, list) else w,
device=device,
inplace=True,
fuse=fuse)
if hasattr(model, 'kpt_shape'):
kpt_shape = model.kpt_shape # pose-only
stride = max(int(model.stride.max()), 32) # model stride
names = model.module.names if hasattr(model, 'module') else model.names # get class names
model.half() if fp16 else model.float()
self.model = model # explicitly assign for to(), cpu(), cuda(), half()
elif jit: # TorchScript
LOGGER.info(f'Loading {w} for TorchScript inference...')
extra_files = {'config.txt': ''} # model metadata
model = torch.jit.load(w, _extra_files=extra_files, map_location=device)
model.half() if fp16 else model.float()
if extra_files['config.txt']: # load metadata dict
metadata = json.loads(extra_files['config.txt'], object_hook=lambda x: dict(x.items()))
elif dnn: # ONNX OpenCV DNN
LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
check_requirements('opencv-python>=4.5.4')
net = cv2.dnn.readNetFromONNX(w)
elif onnx: # ONNX Runtime
LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
import onnxruntime
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
session = onnxruntime.InferenceSession(w, providers=providers)
output_names = [x.name for x in session.get_outputs()]
metadata = session.get_modelmeta().custom_metadata_map # metadata
elif xml: # OpenVINO
LOGGER.info(f'Loading {w} for OpenVINO inference...')
check_requirements('openvino>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/
from openvino.runtime import Core, Layout, get_batch # noqa
core = Core()
w = Path(w)
if not w.is_file(): # if not *.xml
w = next(w.glob('*.xml')) # get *.xml file from *_openvino_model dir
ov_model = core.read_model(model=str(w), weights=w.with_suffix('.bin'))
if ov_model.get_parameters()[0].get_layout().empty:
ov_model.get_parameters()[0].set_layout(Layout('NCHW'))
batch_dim = get_batch(ov_model)
if batch_dim.is_static:
batch_size = batch_dim.get_length()
ov_compiled_model = core.compile_model(ov_model, device_name='AUTO') # AUTO selects best available device
metadata = w.parent / 'metadata.yaml'
elif engine: # TensorRT
LOGGER.info(f'Loading {w} for TensorRT inference...')
try:
import tensorrt as trt # noqa https://developer.nvidia.com/nvidia-tensorrt-download
except ImportError:
if LINUX:
check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com')
import tensorrt as trt # noqa
check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0
if device.type == 'cpu':
device = torch.device('cuda:0')
Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))
logger = trt.Logger(trt.Logger.INFO)
# Read file
with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
meta_len = int.from_bytes(f.read(4), byteorder='little') # read metadata length
metadata = json.loads(f.read(meta_len).decode('utf-8')) # read metadata
model = runtime.deserialize_cuda_engine(f.read()) # read engine
context = model.create_execution_context()
bindings = OrderedDict()
output_names = []
fp16 = False # default updated below
dynamic = False
for i in range(model.num_bindings):
name = model.get_binding_name(i)
dtype = trt.nptype(model.get_binding_dtype(i))
if model.binding_is_input(i):
if -1 in tuple(model.get_binding_shape(i)): # dynamic
dynamic = True
context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))
if dtype == np.float16:
fp16 = True
else: # output
output_names.append(name)
shape = tuple(context.get_binding_shape(i))
im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size
elif coreml: # CoreML
LOGGER.info(f'Loading {w} for CoreML inference...')
import coremltools as ct
model = ct.models.MLModel(w)
metadata = dict(model.user_defined_metadata)
elif saved_model: # TF SavedModel
LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')
import tensorflow as tf
keras = False # assume TF1 saved_model
model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)
metadata = Path(w) / 'metadata.yaml'
elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')
import tensorflow as tf
from ultralytics.engine.exporter import gd_outputs
def wrap_frozen_graph(gd, inputs, outputs):
"""Wrap frozen graphs for deployment."""
x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped
ge = x.graph.as_graph_element
return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))
gd = tf.Graph().as_graph_def() # TF GraphDef
with open(w, 'rb') as f:
gd.ParseFromString(f.read())
frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd))
elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
from tflite_runtime.interpreter import Interpreter, load_delegate
except ImportError:
import tensorflow as tf
Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate
if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime
LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
delegate = {
'Linux': 'libedgetpu.so.1',
'Darwin': 'libedgetpu.1.dylib',
'Windows': 'edgetpu.dll'}[platform.system()]
interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])
else: # TFLite
LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
interpreter = Interpreter(model_path=w) # load TFLite model
interpreter.allocate_tensors() # allocate
input_details = interpreter.get_input_details() # inputs
output_details = interpreter.get_output_details() # outputs
# Load metadata
with contextlib.suppress(zipfile.BadZipFile):
with zipfile.ZipFile(w, 'r') as model:
meta_file = model.namelist()[0]
metadata = ast.literal_eval(model.read(meta_file).decode('utf-8'))
elif tfjs: # TF.js
raise NotImplementedError('YOLOv8 TF.js inference is not currently supported.')
elif paddle: # PaddlePaddle
LOGGER.info(f'Loading {w} for PaddlePaddle inference...')
check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')
import paddle.inference as pdi # noqa
w = Path(w)
if not w.is_file(): # if not *.pdmodel
w = next(w.rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir
config = pdi.Config(str(w), str(w.with_suffix('.pdiparams')))
if cuda:
config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)
predictor = pdi.create_predictor(config)
input_handle = predictor.get_input_handle(predictor.get_input_names()[0])
output_names = predictor.get_output_names()
metadata = w.parents[1] / 'metadata.yaml'
elif ncnn: # ncnn
LOGGER.info(f'Loading {w} for ncnn inference...')
check_requirements('git+https://github.com/Tencent/ncnn.git' if ARM64 else 'ncnn') # requires ncnn
import ncnn as pyncnn
net = pyncnn.Net()
net.opt.use_vulkan_compute = cuda
w = Path(w)
if not w.is_file(): # if not *.param
w = next(w.glob('*.param')) # get *.param file from *_ncnn_model dir
net.load_param(str(w))
net.load_model(str(w.with_suffix('.bin')))
metadata = w.parent / 'metadata.yaml'
elif triton: # NVIDIA Triton Inference Server
check_requirements('tritonclient[all]')
from ultralytics.utils.triton import TritonRemoteModel
model = TritonRemoteModel(w)
else:
from ultralytics.engine.exporter import export_formats
raise TypeError(f"model='{w}' is not a supported model format. "
'See https://docs.ultralytics.com/modes/predict for help.'
f'\n\n{export_formats()}')
# Load external metadata YAML
if isinstance(metadata, (str, Path)) and Path(metadata).exists():
metadata = yaml_load(metadata)
if metadata:
for k, v in metadata.items():
if k in ('stride', 'batch'):
metadata[k] = int(v)
elif k in ('imgsz', 'names', 'kpt_shape') and isinstance(v, str):
metadata[k] = eval(v)
stride = metadata['stride']
task = metadata['task']
batch = metadata['batch']
imgsz = metadata['imgsz']
names = metadata['names']
kpt_shape = metadata.get('kpt_shape')
elif not (pt or triton or nn_module):
LOGGER.warning(f"WARNING тЪая╕П Metadata not found for 'model={weights}'")
# Check names
if 'names' not in locals(): # names missing
names = self._apply_default_class_names(data)
names = check_class_names(names)
# Disable gradients
if pt:
for p in model.parameters():
p.requires_grad = False
self.__dict__.update(locals()) # assign all variables to self
def forward(self, im, augment=False, visualize=False):
"""
Runs inference on the YOLOv8 MultiBackend model.
Args:
im (torch.Tensor): The image tensor to perform inference on.
augment (bool): whether to perform data augmentation during inference, defaults to False
visualize (bool): whether to visualize the output predictions, defaults to False
Returns:
(tuple): Tuple containing the raw output tensor, and processed output for visualization (if visualize=True)
"""
b, ch, h, w = im.shape # batch, channel, height, width
if self.fp16 and im.dtype != torch.float16:
im = im.half() # to FP16
if self.nhwc:
im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)
if self.pt or self.nn_module: # PyTorch
y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)
elif self.jit: # TorchScript
y = self.model(im)
elif self.dnn: # ONNX OpenCV DNN
im = im.cpu().numpy() # torch to numpy
self.net.setInput(im)
y = self.net.forward()
elif self.onnx: # ONNX Runtime
im = im.cpu().numpy() # torch to numpy
y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
elif self.xml: # OpenVINO
im = im.cpu().numpy() # FP32
y = list(self.ov_compiled_model(im).values())
elif self.engine: # TensorRT
if self.dynamic and im.shape != self.bindings['images'].shape:
i = self.model.get_binding_index('images')
self.context.set_binding_shape(i, im.shape) # reshape if dynamic
self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)
for name in self.output_names:
i = self.model.get_binding_index(name)
self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))
s = self.bindings['images'].shape
assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}"
self.binding_addrs['images'] = int(im.data_ptr())
self.context.execute_v2(list(self.binding_addrs.values()))
y = [self.bindings[x].data for x in sorted(self.output_names)]
elif self.coreml: # CoreML
im = im[0].cpu().numpy()
im_pil = Image.fromarray((im * 255).astype('uint8'))
# im = im.resize((192, 320), Image.BILINEAR)
y = self.model.predict({'image': im_pil}) # coordinates are xywh normalized
if 'confidence' in y:
raise TypeError('Ultralytics only supports inference of non-pipelined CoreML models exported with '
f"'nms=False', but 'model={w}' has an NMS pipeline created by an 'nms=True' export.")
# TODO: CoreML NMS inference handling
# from ultralytics.utils.ops import xywh2xyxy
# box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels
# conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float32)
# y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)
elif len(y) == 1: # classification model
y = list(y.values())
elif len(y) == 2: # segmentation model
y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)
elif self.paddle: # PaddlePaddle
im = im.cpu().numpy().astype(np.float32)
self.input_handle.copy_from_cpu(im)
self.predictor.run()
y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]
elif self.ncnn: # ncnn
mat_in = self.pyncnn.Mat(im[0].cpu().numpy())
ex = self.net.create_extractor()
input_names, output_names = self.net.input_names(), self.net.output_names()
ex.input(input_names[0], mat_in)
y = []
for output_name in output_names:
mat_out = self.pyncnn.Mat()
ex.extract(output_name, mat_out)
y.append(np.array(mat_out)[None])
elif self.triton: # NVIDIA Triton Inference Server
im = im.cpu().numpy() # torch to numpy
y = self.model(im)
else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
im = im.cpu().numpy()
if self.saved_model: # SavedModel
y = self.model(im, training=False) if self.keras else self.model(im)
if not isinstance(y, list):
y = [y]
elif self.pb: # GraphDef
y = self.frozen_func(x=self.tf.constant(im))
if len(y) == 2 and len(self.names) == 999: # segments and names not defined
ip, ib = (0, 1) if len(y[0].shape) == 4 else (1, 0) # index of protos, boxes
nc = y[ib].shape[1] - y[ip].shape[3] - 4 # y = (1, 160, 160, 32), (1, 116, 8400)
self.names = {i: f'class{i}' for i in range(nc)}
else: # Lite or Edge TPU
details = self.input_details[0]
integer = details['dtype'] in (np.int8, np.int16) # is TFLite quantized int8 or int16 model
if integer:
scale, zero_point = details['quantization']
im = (im / scale + zero_point).astype(details['dtype']) # de-scale
self.interpreter.set_tensor(details['index'], im)
self.interpreter.invoke()
y = []
for output in self.output_details:
x = self.interpreter.get_tensor(output['index'])
if integer:
scale, zero_point = output['quantization']
x = (x.astype(np.float32) - zero_point) * scale # re-scale
if x.ndim > 2: # if task is not classification
# Denormalize xywh by image size. See https://github.com/ultralytics/ultralytics/pull/1695
# xywh are normalized in TFLite/EdgeTPU to mitigate quantization error of integer models
x[:, [0, 2]] *= w
x[:, [1, 3]] *= h
y.append(x)
# TF segment fixes: export is reversed vs ONNX export and protos are transposed
if len(y) == 2: # segment with (det, proto) output order reversed
if len(y[1].shape) != 4:
y = list(reversed(y)) # should be y = (1, 116, 8400), (1, 160, 160, 32)
y[1] = np.transpose(y[1], (0, 3, 1, 2)) # should be y = (1, 116, 8400), (1, 32, 160, 160)
y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]
# for x in y:
# print(type(x), len(x)) if isinstance(x, (list, tuple)) else print(type(x), x.shape) # debug shapes
if isinstance(y, (list, tuple)):
return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]
else:
return self.from_numpy(y)
def from_numpy(self, x):
"""
Convert a numpy array to a tensor.
Args:
x (np.ndarray): The array to be converted.
Returns:
(torch.Tensor): The converted tensor
"""
return torch.tensor(x).to(self.device) if isinstance(x, np.ndarray) else x
def warmup(self, imgsz=(1, 3, 640, 640)):
"""
Warm up the model by running one forward pass with a dummy input.
Args:
imgsz (tuple): The shape of the dummy input tensor in the format (batch_size, channels, height, width)
Returns:
(None): This method runs the forward pass and don't return any value
"""
warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton, self.nn_module
if any(warmup_types) and (self.device.type != 'cpu' or self.triton):
im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
for _ in range(2 if self.jit else 1): #
self.forward(im) # warmup
@staticmethod
def _apply_default_class_names(data):
"""Applies default class names to an input YAML file or returns numerical class names."""
with contextlib.suppress(Exception):
return yaml_load(check_yaml(data))['names']
return {i: f'class{i}' for i in range(999)} # return default if above errors
@staticmethod
def _model_type(p='path/to/model.pt'):
"""
This function takes a path to a model file and returns the model type.
Args:
p: path to the model file. Defaults to path/to/model.pt
"""
# Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
# types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]
from ultralytics.engine.exporter import export_formats
sf = list(export_formats().Suffix) # export suffixes
if not is_url(p, check=False) and not isinstance(p, str):
check_suffix(p, sf) # checks
name = Path(p).name
types = [s in name for s in sf]
types[5] |= name.endswith('.mlmodel') # retain support for older Apple CoreML *.mlmodel formats
types[8] &= not types[9] # tflite &= not edgetpu
if any(types):
triton = False
else:
from urllib.parse import urlsplit
url = urlsplit(p)
triton = url.netloc and url.path and url.scheme in {'http', 'grfc'}
return types + [triton]
| ultralytics/ultralytics | ultralytics/nn/autobackend.py | autobackend.py | py | 26,984 | python | en | code | 15,778 | github-code | 90 |
18109892209 | a = raw_input()
n, q = a.split(" ")
n = int(n)
q = int(q)
total = 0
queue = []
for i in range(n):
queue.append(raw_input())
while (len(queue) > 0):
b = queue.pop(0)
p, t = b.split(" ")
sa = int(t) - q
if sa > 0:
queue.append(p + " " + str(sa))
total += q
else:
total += int(t)
print (p + " " + str(total)) | Aasthaengg/IBMdataset | Python_codes/p02264/s605633819.py | s605633819.py | py | 320 | python | en | code | 0 | github-code | 90 |
27041776278 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/5/17 13:36
# @Author : yh
# @File : doulaoban.py
# @Software: PyCharm
# @Desc :
"""
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/4/21 13:52
# @Author : yh
# @File : demo1.py
# @Software: PyCharm
# @Desc :
"""
https://api.doulaoban.com/v1/task/plas/task_list?is_open_comment=&goods_id=&query_user=&start_time=2021-04-22%2000:00&end_time=2021-04-22%2023:59&order=&by=&aweme_ids=&order_site_id=&site_id=&status=&page=30&paginate=15&time_type=creat_time&index_ids=4,6,12,13,15,30,36,41,47,67
"""
import requests, json, re, datetime,time
import schedule
# from base import mysql_help
from base.mysql_db import MySQLPipeline
from loguru import logger
from apscheduler.schedulers.blocking import BlockingScheduler
from datetime import timedelta,date
import math
# new_time=time.strftime("%H:%M", time.localtime())
# new_time = (datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
def doulaoban():
mysql = MySQLPipeline()
# new_time = (datetime.datetime.now()+datetime.timedelta(days=-1))
yesterday_obj = date.today() + timedelta(days=-1)
dt = time.strftime("%Y-%m-%d")
headers = {
"cookie": "Hm_lvt_95f29f66fe1210be4b5158abc33863ee=1618982380,1619059458,1619061523,1620292354; access_token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJodHRwOi8vYXBpLmRvdWxhb2Jhbi5jb20vbG9naW4iLCJpYXQiOjE2MjAyOTIzNjgsImV4cCI6MTYyMTUwMTk2OCwibmJmIjoxNjIwMjkyMzY4LCJqdGkiOiJxOUJXdXBTaldnaVJhbVpVIiwic3ViIjoyNDEzMjYsInBydiI6Ijg3ZTBhZjFlZjlmZDE1ODEyZmRlYzk3MTUzYTE0ZTBiMDQ3NTQ2YWEiLCJjb21wYW55IjoyMDYyNjMsImNvbXBhbnlfdXNlciI6MjQxMzI2LCJmcm9tIjoid2ViIn0.9L7hbBBeRHXPk0wW7T5kWbnrgUK2qikUAHLcZ3jCkxI; Hm_lpvt_95f29f66fe1210be4b5158abc33863ee=1620370248",
"referer": "https://zs.doulaoban.com/Monitor_plus",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.72 Safari/537.36"
}
num = 0
url = " https://api.doulaoban.com/v1/task/plas/task_list?is_open_comment=&goods_id=&query_user=&start_time=2021-05-17%2000:00&end_time=2021-05-17%2023:59&order=&by=&aweme_ids=&order_site_id=&site_id=&status=&page=1&paginate=15&time_type=creat_time&index_ids=4,6,12,13,15,30,36,41,47,67"
# session = HTMLSession()
response = requests.get(url, headers=headers).json()
total_page=math.ceil(response['data']['total']/15)
for page in range(1,total_page+1):
# for page in range(1,10):
logger.warning(f"当前是第 {page} 页")
url = f"https://api.doulaoban.com/v1/task/plas/task_list?is_open_comment=&goods_id=&query_user=&start_time={yesterday_obj}%2000:00&end_time={yesterday_obj}%2023:59&order=&by=&aweme_ids=&order_site_id=&site_id=&status=&page={page}&paginate=15&time_type=creat_time&index_ids=4,6,12,13,15,30,36,41,47,67"
# session = HTMLSession()
response = requests.get(url, headers=headers)
# print(response.status_code)
res = response.content.decode()
content = re.sub("\s", "", res)
content= json.loads(content)
start_time = 0
end_time = 0
for crawl_list in content["data"]["data"]:
num+=1
dou_id = crawl_list["id"]
desc = str(crawl_list["desc"]) # 标题
video_play_url = crawl_list["video_play_url"] # 视频链接
play_increment = crawl_list["play_increment"] # Dou+播放增量
cost = crawl_list["cost"] # 时段消耗
shopping_click = crawl_list["shopping_click"] # 购物车点击量
shopping_price = crawl_list["shopping_price"] # 购物车单价
shop_price = crawl_list["shop_price"] # 进店单价
used_price = crawl_list["used_price"] # 有效的
used_order = crawl_list["used_order"] # 有效单数
used_price_roi = crawl_list["used_price_roi"] # ROI
used_one_price = crawl_list["used_one_price"] # 有效单笔价
used_order_price = crawl_list["used_order_price"] # 有效出单成本
create_time = crawl_list["create_time"] # 创建时间
created_at = crawl_list["created_at"] # 结束时间
start_time = crawl_list["start_time"]
end_time = crawl_list["end_time"]
print(
f"开始时间{start_time},结束时间{end_time},id{dou_id},标题{desc},视频链接{video_play_url},Dou+播放增量{play_increment},时段消耗{cost},购物车点击量{shopping_click},购物车单价{shopping_price},进店单价{shop_price},有效的{used_price},有效单数{used_order},ROI{used_price_roi},有效单笔价{used_one_price},有效出单成本{used_order_price}")
logger.info("获取成功")
logger.info("-----------------")
# mysql.list_sql(dou_id, desc, video_play_url, play_increment, cost, shopping_click, shopping_price, shop_price, used_price, used_order, used_price_roi, used_one_price, used_order_price)
url2 = f"https://api.doulaoban.com/v1/task/plas/task_detail?id={dou_id}&status=0&time_type=pay_time&start_time={yesterday_obj}%2000:00&end_time={yesterday_obj}%2023:59&index_ids=4,6,13,15,12,14,30,36,41,47,67,42,35&grading=2&goods_id="
# url2 = f"https://api.doulaoban.com/v1/task/plas/task_detail?id={dou_id}&status=1&time_type=pay_time&start_time={dt}00:00&end_time={dt}23:59&index_ids=4,6,13,15,30,36,41,47,67,42,35&grading=2&goods_id="
# print(url2)
response2= requests.get(url=url2,headers=headers)
res2=response2.content.decode()
content2 = re.sub("\s", "", res2)
content2 = json.loads(content2)
if content2['code'] == 1000:
for detail in content2['data']['list']:
show_time = detail["show_time"]
start_time = detail["start_time"] # 开始时间 "2021-04-28 09:40:00"
end_time = detail["end_time"] # 结束时间 "2021-04-28 09:50:00"
play_count = detail["play_count"]
play_all_count = detail["play_all_count"] # 播放量
play_increment = detail["play_increment"] # 投放增量
cost = detail["cost"] # 投放消耗
shopping_price = detail["shopping_price"] # 购物车单价
shop_price = detail["shop_price"] # 进店单价
used_price = detail["used_price"] # 有效的
used_order = detail["used_order"] # 有效单数
used_price_roi = detail["used_price_roi"] # ROI
used_one_price = detail["used_one_price"] # 有效单笔价
used_order_price = detail["used_order_price"] # 有效出单成本
used_income_roi = detail["used_income_roi"] # 有效佣金ROI
all_order = detail["all_order"] # 全部单数
shopping_click = detail["shopping_click"] # 购物车点击量
shop_visit_count = detail["shop_visit_count"] # 店铺引流量
print(f"id{dou_id}日期{show_time},开始时间{start_time},结束时间{end_time},总数{play_count},播放量{play_all_count},投放增量{play_increment},投放消耗{cost},购物车单价{shopping_price},进店单价{shop_price},有效的{used_price},有效单数{used_order},ROI{used_price_roi},有效单笔价{used_one_price},有效出单成本{used_order_price},有效佣金ROI{used_income_roi},有效单数{all_order},购物车点击量{shopping_click},店铺{shop_visit_count}")
# mysql.detail_sql(dou_id,show_time, start_time, end_time, play_count, play_all_count, play_increment,cost, shopping_price,shop_price, used_price, used_order, used_price_roi, used_one_price, used_order_price,used_income_roi, all_order, shopping_click,shop_visit_count)
logger.info("sleep 5 秒")
time.sleep(2)
mysql.close_spider()
if __name__ == '__main__':
scheduler = BlockingScheduler()
scheduler.add_job(doulaoban, 'cron', hour=16, minute=00)
scheduler.start()
# """
# # 去重
# insert into 空表名字 select * from dou_crawl_detail group_by d_desc
# 根据时间查询最新更新数据
# select * from 表名 order by 列名 desc (降序) limit 显示的条数
# """ | zhangxin302/pythonProject | yuanhang/qianduan.py | qianduan.py | py | 8,419 | python | en | code | 3 | github-code | 90 |
18531552399 | import sys
sr = lambda: sys.stdin.readline().rstrip()
ir = lambda: int(sr())
lr = lambda: list(map(int, sr().split()))
A, B, C, K = lr()
if K%2 == 0:
answer = A - B
else:
answer = B - A
if answer > 10 ** 18:
answer = 'Unfair'
print(answer)
| Aasthaengg/IBMdataset | Python_codes/p03345/s060612963.py | s060612963.py | py | 256 | python | en | code | 0 | github-code | 90 |
1929606771 | #!/usr/bin/env python
# coding: utf-8
import requests
from lxml import etree
import pandas as pd
import time
from multiprocessing import Pool
import glob
NUM_PROCS = 20
years_range = range(10, 20)
start = ["20" + str(y).zfill(2) for y in years_range]
end = [str(y + 1).zfill(2) for y in years_range]
start_end_list = list(zip(start, end))
start_end_list[0]
df_schools_url_all = pd.read_csv("schools_list.csv")
path = r"data/"
all_files = glob.glob(path + "/*.csv")
li = []
for filename in all_files:
df = pd.read_csv(filename, index_col=0)
li.append(df)
try:
df_current_data = pd.concat(li, axis=0, ignore_index=True)
last_current_schoolNr = df_current_data.iloc[-1].School_Nr
index_of_last_current_schoolNr = df_schools_url_all.loc[
df_schools_url_all.SchulNr == last_current_schoolNr
].index[0]
df_schools_url = df_schools_url_all.iloc[index_of_last_current_schoolNr:]
except:
df_schools_url = df_schools_url_all
df_current_data = pd.DataFrame()
df_schools_url = df_schools_url[["SchulNr", "Name", "url"]]
schools = df_schools_url.to_dict(orient="records")
print("starting with : ", schools[0])
BASE_URL = "https://www.berlin.de/sen/bildung/schule/berliner-schulen/schulverzeichnis/schuelerschaft.aspx?view="
ndh_urls = [
BASE_URL + "ndh&jahr=" + start_end[0] + "/" + start_end[1]
for start_end in start_end_list
]
urls_title = [y[0] + "/" + y[1] for y in start_end_list]
ndh_urls = list(zip(urls_title, ndh_urls))
def get_ndh(session, url):
try:
r = session.get(url[1])
tree = etree.HTML(r.text)
school_name = tree.xpath(
'//span[@id="ContentPlaceHolderMenuListe_lblUebSchule"]/text()'
)
school_name = school_name[0]
df_year = pd.read_html(r.text, decimal=",", thousands=".")[0]
if 0 not in df_year.columns.values:
df_year.columns = df_year.columns.droplevel(0)
# pandas 0.18.0 and higher
df_year = df_year.rename_axis(None, axis=1)
df_year.columns = [
"Insgesamt",
"Schülerinnen",
"Schüler",
"Insgesamt.1",
"Insg. in %",
]
df_year["year"] = str(url[0])
return df_year
except Exception as e:
print(e)
return None
def get_ndh_all_years(session):
df = pd.DataFrame()
for url in ndh_urls:
time.sleep(2)
df_year = get_ndh(session, url)
if df_year is not None:
df = pd.concat([df, df_year], axis=0)
return df
def get_school_data(school):
print(school)
time.sleep(2)
session = requests.Session()
r = session.get(school["url"])
tree = etree.HTML(r.text)
school_type=""
try:
school_type = tree.xpath(
'//span[@id="ContentPlaceHolderMenuListe_lblSchulart"]/text()'
)
school_type = school_type[0]
except :
pass
df_school_data = get_ndh_all_years(session)
df_school_data["School_Nr"] = school["SchulNr"]
df_school_data["Name"] = school["Name"]
df_school_data["type"] = school_type
df_school_data["url"] = school["url"]
if df_school_data.shape[0] > 0:
return df_school_data
else:
return pd.DataFrame()
def get_all_data(schools):
# Output lists
df_all = pd.DataFrame()
# Initialize a multiprocessing pool that will close after finishing execution.
with Pool(NUM_PROCS) as pool:
results = pool.map(get_school_data, schools)
for r in results:
if r.shape[0] > 0:
df_all = pd.concat([df_all, r], axis=0)
return df_all
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i : i + n]
if __name__ == "__main__":
for lista in chunks(schools, 50):
df = get_all_data(lista)
df.to_csv(
f'data/school_data_{lista[0]["SchulNr"]}_{lista[-1]["SchulNr"]}.csv',
encoding="utf-8",
)
print(f'{lista[0]["SchulNr"]}_{lista[-1]["SchulNr"]} is done')
| clementlefevre/berlin_grundschulen_2019_2021 | python/schools_data.py | schools_data.py | py | 4,109 | python | en | code | 0 | github-code | 90 |
18292946709 | import math
n = int(input())
p = list(map(int, input().split()))
q = list(map(int, input().split()))
import itertools
t=[i for i in range(1,n+1)]
a = list(itertools.permutations(t))
def num(b,t,n):
c = 0
for i in range(len(t)):
if list(t[i]) != b:
c += 1
else:
break
return(c)
x = num(p,a,n)
y = num(q,a,n)
print(abs(x-y)) | Aasthaengg/IBMdataset | Python_codes/p02813/s373008145.py | s373008145.py | py | 357 | python | en | code | 0 | github-code | 90 |
1188882987 | #!/user/bin/env python
# -*- coding:utf-8 -*-
# Code created by gongfuture
# Create Time: 2023/3/10
# Create User: gongf
# This file is a part of Homework_test_environment
count = 0
five = 0
for i in range(1000, 10000):
a = str(i)
if a[0] == a[-1] and a[1] == a[-2]:
count += 1
print(i, end=' ')
five += 1
if five == 5:
five = 0
print("", end='\n')
print(count)
| gongfuture/Homework_test_environment | Python/作业4/成品/1.以每行5个输出四位整数中所有的回文数。回文数是指正看反看都相等的数.py | 1.以每行5个输出四位整数中所有的回文数。回文数是指正看反看都相等的数.py | py | 427 | python | en | code | 5 | github-code | 90 |
40631355000 | """
6. Dados dois números inteiros positivos i e j, imprimir em ordem crescente os
N (lido) primeiros múltiplos de i ou de j ou de ambos.
"""
i = int(input("Digite o valor de i: "))
j = int(input("Digite o valor de j: "))
n = int(input("Digite o valor de n: "))
cont = 0
fator_1 = fator_2 = 0
if i < j:
primeiro = i
segundo = j
else:
primeiro = j
segundo = i
while cont < n:
while primeiro*fator_1 < segundo*fator_2 and cont < n:
# print(primeiro*fator_1,end=" , ") # comentei essa linha pois acho que não pode usar o end
fator_1 += 1
print(primeiro*fator_1)
cont += 1
while segundo*fator_2 < primeiro*fator_1 and cont < n:
# print(segundo*fator_2,end=" , ") # comentei essa linha pois acho que não pode usar o end
fator_2 += 1
print(segundo*fator_2)
cont += 1 | fatecspmanha182/IAL-002-algoritmos_e_logica_de_programacao | Resolução-Exercícios-Parte1/ALP_Lista2-06.py | ALP_Lista2-06.py | py | 855 | python | pt | code | 0 | github-code | 90 |
25294933318 | # https://dodona.ugent.be/nl/courses/807/series/9108/activities/360836693
getallen = input('')
lijst = getallen.split()
lijst = list(map(int,lijst))
no_result = 0
i = 0
result=[]
x = True
for i in range(len(lijst)-1):
if x == True:
if (lijst[i] > 0 and lijst[i+1] > 0) or (lijst[i] < 0 and lijst[i+1] < 0) == True:
result.append(lijst[i])
result.append(lijst[i+1])
x = False
print(*result)
else:
i+=1
if x == True:
print(no_result)
| ronnyremork/dodona | goede_buren.py | goede_buren.py | py | 546 | python | nl | code | 0 | github-code | 90 |
22707381969 | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import astropy
from astropy.io import ascii
from scipy.interpolate import interp1d
data0=ascii.read('transmission_0.dat')
lam=10.*data0['col1']
klam=-2.5*np.log10(data0['col2'])
lam=lam[data0['col2'] != 0]
klam=klam[data0['col2'] != 0]
ascii.write([lam, klam], 'klam.dat', format='no_header', formats={'col1':'%.3f', 'col2':'%.5f'})
dataMcd=ascii.read('extinc_McD.dat')
dataCTIO=ascii.read('ctio_atmos.dat')
dataMK=ascii.read('maunakea_atmos.dat')
plt.plot(lam, klam, 'b', linewidth=2, label='Paranal')
plt.plot(dataMcd['col1']*10., dataMcd['col2'], 'r', linewidth=2, label='McDonald')
plt.plot(dataCTIO['col1'], dataCTIO['col2'], 'g', linewidth=2, label='CTIO')
plt.plot(dataMK['col1']*10., dataMK['col2'], 'm', linewidth=2, label='Mauna Kea')
plt.legend()
plt.show()
| sdss/lvmetc_lco_script | database/sky/trans2klam.py | trans2klam.py | py | 854 | python | en | code | 2 | github-code | 90 |
30622230760 | def main():
# Simple try catch to prevent type mismatch
quit = True
while quit:
try:
n = int(input("Please enter a number: "))
n += 1
n -= 1
except ValueError:
print("You did not enter a number. Please try again")
else:
fizzbuzz(n)
quit = False
# Functions that test for multiples of 3, 5 and 7
def fizzTest(n):
if n % 3 == 0:
print("Fizz ", end='')
return 1
else:
return 0
def buzzTest(n):
if n % 5 == 0:
print("Buzz ", end='')
return 1
else:
return 0
def bazzTest(n):
if n % 7 == 0:
print("Bazz ", end='')
return 1
else:
return 0
def fizzbuzz(n):
iter = 1
while iter <= n:
flag = 0
flag = fizzTest(iter) + buzzTest(iter) + bazzTest(iter)
if flag == 0:
print(iter, end='\n')
else:
print()
iter += 1
def inpTest(n):
if n % 3 == 0:
return 1
elif n % 5 == 0:
return 1
elif n % 7 == 0:
return 1
else:
return 0
main() | sleddog/methods | projects/fizzbuzz/python/LoganShy/fizzbuzz.py | fizzbuzz.py | py | 1,153 | python | en | code | 7 | github-code | 90 |
10259595116 | import os, shutil
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib as mpl
import flopy
import pyemu
# completed PEST run dir - calibrated parameter set
cal_dir = 'master_glm'
cal_pst_name ='cal_reg1.pst'
par_file = cal_pst_name.replace('pst','14.par')
# flag to copy calibrated model to cal dir
store_cal = True
store_dir = 'store' # content will be cleared !
# evaluation dir (calibrated parameters - noptmax=0)
eval_dir = 'pst'
eval_pst_name = 'eval.pst'
# calibration pst file
cal_pst = pyemu.Pst(os.path.join(cal_dir, cal_pst_name))
# plot phi progress
phiprog = cal_pst.plot(kind='phi_progress')
phiprog.get_figure().savefig(os.path.join('fig','phiprog.png'))
# evaluation pst file (with best parameters)
cal_pst.parrep(os.path.join(cal_dir,par_file))
cal_pst.control_data.noptmax=0
cal_pst.write(os.path.join(eval_dir,eval_pst_name))
# run calibrated model
pyemu.helpers.run(f'pestpp-glm {eval_pst_name}', cwd=eval_dir)
# copy calibrated ml files to cal dir
if store_cal:
if os.path.exists(store_dir):
shutil.rmtree(store_dir)
cases_dirs = [d for d in os.listdir(eval_dir)\
if (os.path.isdir(os.path.join(eval_dir,d)) and d.startswith('ml_'))]
for d in (cases_dirs + ['com_ext']):
shutil.copytree(os.path.join(eval_dir,d),os.path.join(store_dir,d))
# phie pie
eval_pst = pyemu.Pst(os.path.join(eval_dir, eval_pst_name))
phipie = eval_pst.plot(kind="phi_pie")
phipie.get_figure().savefig(os.path.join('fig','phipie.png'))
# scatter plot
one2one = eval_pst.plot(kind="1to1")
one2one[1].savefig(os.path.join('fig','one2one.png'))
#fetch residuals and add columns from name
res = eval_pst.res
res[['type', 'fmt', 'locname', 'time']] = res.name.apply(
lambda x: pd.Series(
dict([s.split(':') for s in x.split('_') if ':' in s]
)))[['oname', 'otype', 'usecol', 'time']]
# --- plot one2one plot per obs. group
for g in ['heads', 'qdrn', 'mr']:
res_g = res.loc[(res.group == g) & (res.weight > 0), :]
mx = max(res_g.measured.max(), res_g.modelled.max())
mn = min(res_g.measured.min(), res_g.modelled.min())
mx *= 1.1
mn *= 0.9
fig, ax = plt.subplots()
ax.axis('square')
locnames = res_g.locname.unique()
cmap = plt.get_cmap('Dark2')
colors = [cmap(i) for i in np.linspace(0, 1, len(locnames))]
for locname in locnames:
res_gg = res_g.loc[res_g.locname == locname, :]
scat = ax.scatter(res_gg.measured, res_gg.modelled,
label=locname)
ax.plot([mn, mx], [mn, mx], 'k--', lw=1.0)
xlim = (mn, mx)
ax.set_xlim(mn, mx)
ax.set_ylim(mn, mx)
ax.grid()
ax.set_xlabel('observed', labelpad=0.1)
ax.set_ylabel('simulated', labelpad=0.1)
ax.set_title(
'group:{0}, {1} observations'.format(
g, res_g.shape[0]
),
loc='left'
)
ax.legend()
txt = res_g.apply(lambda x: ax.annotate(
x['time'],
(x['measured'],x['modelled']),
textcoords="offset points",
xytext=(6,6),
ha='center',
size = 8),
axis=1)
fig.savefig(os.path.join('fig',f'one2one_{g}.png'))
# --- plot heads and particle tracks for all cases but simulation
case_dirs = sorted([os.path.join(eval_dir,d) for d in os.listdir(eval_dir) if d.startswith('ml_')])[:-1]
for case_dir in case_dirs:
# load case
sim = flopy.mf6.MFSimulation.load(sim_ws=case_dir)
ml = sim.get_model('ml')
# plot case
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, aspect='equal')
ax.set_title('Modpath7 pathlines')
pmv = flopy.plot.PlotMapView(model=ml, ax=ax)
# heads in background
hds = sim.simulation_data.mfdata[ml.name,'HDS','HEAD'][-1,-1,-1,:]
heads = pmv.plot_array(hds, masked_values=[1.e+30], alpha=0.5)
cb = plt.colorbar(heads, shrink = 0.5)
# pathlines for each group
pth = flopy.utils.PathlineFile(os.path.join(case_dir,'mp.mppth'))
rec = pth.get_alldata()
pmv.plot_pathline(rec, layer = 'all', lw = 0.1, alpha = 0.8)
# plot boundaries
bc_colors_dic = { 'RIV': 'cyan', 'DRN': 'red', 'WEL': 'coral'}
for bc in bc_colors_dic.keys():
bounds = pmv.plot_bc(bc, color = bc_colors_dic[bc])
# legend
leg = ax.legend(loc = 'lower right',fontsize = 6)
for line in leg.get_lines():
line.set_linewidth(2)
# save fig
case_id = case_dir.split('_')[-1]
fig.savefig(os.path.join('fig',f'map_{case_id}.png'))
# zoom in
vmin, vmax = 5,12
ax.set_xlim(408000,410000)
ax.set_ylim(6427500,6429500)
heads.set_clim(vmin,vmax)
fig.savefig(os.path.join('fig',f'zmap_{case_id}.png'))
# --- plot hk map
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, aspect='equal')
ax.set_title('log10(hk)')
pmv = flopy.plot.PlotMapView(model=ml, ax=ax)
pmv.plot_grid(lw = 0)
hkmap = pmv.plot_array(np.log10(ml.npf.k.data[0]))
cb = plt.colorbar(hkmap, shrink=0.5)
cb.ax.set_ylabel('log10(hk [m/s])', labelpad=5,rotation=270)
pmv.plot_bc('WEL',color='red')
pmv.plot_bc('DRN',color='black')
pmv.plot_bc('RIV',color='grey')
ax.set_xlim(407500,409500)
ax.set_ylim(6427500,6429500)
fig.autofmt_xdate()
fig.savefig(os.path.join('fig','hk.png'))
# Addtional plot function
def plot_phi_progress(pst, filename=None, pest = '++', log = True, **kwargs):
"""
Plot of measurement_phi & regularization_phi vs number of model runs
Parameters
-----------
- pst (pyemu) : pst handler load from pyemu.Pst()
- filename (str) : name of the output file containing the plot
- pest (str) : type of pest algorithm used ('++' or 'hp')
- log (bool) : plot phi in logaritmic scale
Examples
-----------
>>> phiplot = plot_phi_progress(pst, filename='cal_phi_progress.pdf')
"""
# ----- Get iobj file
iobj_file = pst.filename.replace(".pst",".iobj")
# ---- Load iobj data as dataframe
df = pd.read_csv(iobj_file)
# ---- Extract usefull data for plot
it, phi, reg_phi = df.iteration, df.measurement_phi, df.regularization_phi
# Prepare Plot figure
plt.figure(figsize=(9,6))
plt.rc('font', family='serif', size=10)
ax = plt.subplot(1,1,1)
ax1=ax.twinx()
# ---- Plot processing
lphi, = ax.plot(it, phi,color='tab:blue',marker='.', label='$\Phi_{measured}$')
lphilim = ax.hlines(pst.reg_data.phimlim, 0, len(it)-1, lw = 1,
colors='navy', linestyles='solid', label='$\Phi_{limit}$')
lphiacc = ax.hlines(pst.reg_data.phimaccept, 0, len(it)-1, lw=1,
colors='darkblue', linestyles='dotted', label='$\Phi_{accept}$')
# Plot phi regularization
lphireg, = ax1.plot(it,reg_phi,color='tab:orange',marker='+', label='$\Phi_{regul}$')
# Add log scale if required
if log == True:
ax.set_yscale('log', basey = 10)
# ---- Set labels
ax.set_xlabel('Iterations')
ax.set_ylabel('Measurement objective function ($\Phi_m$)',color='tab:blue')
ax1.set_ylabel('Regularization objective function ($\Phi_r$)',color='tab:orange')
# ---- Set grid & legend
ax.grid()
lines = [lphi, lphilim, lphiacc, lphireg]
plt.legend(lines, [l.get_label() for l in lines],loc= 'upper center')
plt.tight_layout()
# ---- Export plot if requiered
if filename is not None:
plt.savefig(filename)
return(ax)
| tracktools/case_study | pproc_pst.py | pproc_pst.py | py | 7,336 | python | en | code | 0 | github-code | 90 |
21146721978 | """initial
Revision ID: 45079e4d0040
Revises:
Create Date: 2014-12-07 22:53:45.581098
"""
# revision identifiers, used by Alembic.
revision = '45079e4d0040'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('service',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=256), nullable=False),
sa.Column('trash', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('service_provider',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=512), nullable=False),
sa.Column('availability', sa.Boolean(), nullable=True),
sa.Column('phone_number', sa.String(length=20), nullable=False),
sa.Column('address', sa.String(length=2048), nullable=False),
sa.Column('home_location', postgresql.ARRAY(sa.Float(), dimensions=1), nullable=False),
sa.Column('office_location', postgresql.ARRAY(sa.Float(), dimensions=1), nullable=False),
sa.Column('cost', sa.Float(), nullable=True),
sa.Column('service', sa.Integer(), nullable=True),
sa.Column('experience', sa.Float(), nullable=True),
sa.Column('skills', postgresql.ARRAY(postgresql.JSON(), dimensions=1), nullable=True),
sa.Column('trash', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['service'], ['service.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('service_provider')
op.drop_table('service')
### end Alembic commands ###
| krishnatejak/renaissance_men | alembic/versions/45079e4d0040_initial.py | 45079e4d0040_initial.py | py | 1,784 | python | en | code | 0 | github-code | 90 |
23195337211 | import cat
import toys
class CatPerson(cat.Cat, toys.Toy):
"""Класс персоны КОТ"""
def __init__(self, name, sex, color, breed, nationality_voice):
super().__init__()
self.sex = cat.Cat.set_sex(self, sex)
self.color = cat.Cat.set_color(self, color)
self.breed = cat.Cat.set_breed(self, breed)
self.name = name
self.nationality_voice = nationality_voice
self.addchild = []
self.addtoy = []
def __str__(self):
return f"{self.name} -- ( {cat.Cat.param(self)} )"
def add_toy(self, name, material, color):
p = toys.Toy()
p.set_param_t(name, material, color)
self.addtoy.append(p.ret_toy())
def add_child(self, child):
ch = child
self.addchild.append(ch)
def voice(self):
return self.nationality_voice
def tree_view(root,n, x):
print(f' {n} {root} ')
for j in root.addtoy:
print(f' игрушка кота {root.name}: {j}')
n = ' |-'
if x != 0: n = ' |-'
for i in root.addchild:
tree_view(i, n, root.addchild.index(i))
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
cat1 = CatPerson('Васька', 'кот', 'черный', 'русская дворовая', 'мяу')
cat11 = CatPerson('Леопольд', 'кот', 'бежевый', 'русская дворовая', 'мяу')
cat111 = CatPerson('Симба', 'кошка', 'коричневая', 'швецкая дворовая', 'мьян-мьян')
cat112 = CatPerson('Джокер', 'кот', 'бежевый', 'латвийская дворовая', 'нау-нау')
cat12 = CatPerson('Майло', 'кошка', 'белая', 'данская дворовая', 'миав')
cat13 = CatPerson('Нафаня', 'кот', 'рыжий', 'французская дворовая', 'миаоу')
cat1.add_child(cat11)
cat1.add_child(cat12)
cat1.add_child(cat13)
cat11.add_child(cat111)
cat11.add_child(cat112)
cat1.add_toy('мяч', 'резина', 'оранжевый')
cat11.add_toy('не мяч', 'пластик', 'синий')
cat112.add_toy('мышь', 'пластик', 'серый')
cat13.add_toy('пакет', 'пластик', 'белый')
tree_view(cat1, '', 1)
| Evgeny-iAD/oop2 | main.py | main.py | py | 2,364 | python | ru | code | 0 | github-code | 90 |
971210723 | import logging
import uuid
from app.utility.base_service import BaseService
from app.objects.c_operation import Operation
from app.objects.c_ability import AbilitySchema
from app.objects.c_adversary import Adversary, AdversarySchema
from app.objects.secondclass.c_executor import Executor, ExecutorSchema
from app.api.v2.responses import JsonHttpBadRequest, JsonHttpForbidden, JsonHttpNotFound
from app.objects.secondclass.c_link import Link
from app.utility.base_world import BaseWorld
from base64 import b64encode
from app.api.v2.managers.operation_api_manager import OperationApiManager
class AttackService(OperationApiManager):
def __init__(self, services):
#self.file_svc = services.get('file_svc')
#self.data_svc = services.get('data_svc')
#self.log = logging.getLogger('attack_svc')
super().__init__(services)
self.services = services
async def foo(self):
return 'bar'
# Add functions here that call core service
async def send_ability(self, paw, ability_id, obfuscator, facts=()):
new_links = []
#converted_facts = [Fact(trait=f['trait'], value=f['value']) for f in data.get('facts', [])]
for agent in await self._data_svc.locate('agents', dict(paw=paw)):
self.log.info('Tasking %s with %s' % (paw, ability_id))
links = await agent.task(
abilities=await self._data_svc.locate('abilities', match=dict(ability_id=ability_id)),
obfuscator=obfuscator,
facts=facts # facts = converted_facts
)
self.log.info("links", links)
new_links.extend(links)
return new_links
#To make possible the spanning of abilities on the agents, we need to create a new operation with no adversary profile specified
#Then, to this operation, we'll add all the potential links
async def new_operation(self, name):
#find the default source
defaultSourceId = 'ed32b9c3-9593-4c33-b0db-e2007315096b' # basic fact source
sources = await self._data_svc.locate("sources")
for source in sources:
if source.id == defaultSourceId:
src = source
self.log.info("source trovata: %s " %src.name)
#find the atomic planner
planners = await self._data_svc.locate('planners')
for p in planners:
if p.name == "atomic":
planner = p
self.log.info("planner trovato: %s" %planner.name)
#Create a new empty adversary profile
adv = Adversary.load(dict(adversary_id='ad-hoc', name='ad-hoc', description='an empty adversary profile',
atomic_ordering=[]))
AdversarySchema().dump(adv) #?????NECESSARIO???
#create and save the new operation
operation = Operation(adversary = adv, name = name, source = src, planner = planner)
await operation.update_operation_agents(self.services)
await self._data_svc.store(operation)
return operation
#Datas that has to be send to create_potential_link includes an executor, that has to have 2 parameters:
#1) 'name' such as 'sh', 'bash' etc...
#2) 'command', so we have to retrieve the command associated with the needed ability and selected exectuor name
async def new_potential_link(self, operation_id, paw , ability_id, access):
agents_list = await self._data_svc.locate('agents')
for ag in agents_list:
if ag.paw == paw:
agent = ag
self.log.info("paw %s" %agent.paw)
#Retrieve ability by given id
abilities = await self._data_svc.locate('abilities')
for ab in abilities:
if ab.ability_id == ability_id:
ability = ab
self.log.info("ability %s" %ability.ability_id)
executor = ''
self.log.info("name: %s" %agent.executors[0])
#retrieve ability exectuor
for ex in ability.executors:
if ex.name == agent.executors[0]:
executor = ex
self.log.info('exectuor found, command: %s' %executor.command)
#vars needed to convert executor class in a dictionary. vars adds some useless parameters that we get rid of with that for loop.
#We MUST remove these parameters, otherwise the ExecutorSchema().load in build_executor() is going to fail.
dictEx = {k: v for k, v in vars(executor).items() if not k.startswith('_')}
#same thing for ability
dictAb = {k: v for k, v in vars(ability).items() if not k.startswith('_')}
#construct data dictionary
data = dict(paw= paw,executor = dictEx, ability = dictAb, platform = None, executors = None)
#method override
OperationApiManager.build_ability = self.build_ability
#create a new potential link
link = await self.create_potential_link(operation_id, data, access)
#This one returning is actually link.display
return link
#Need to override this function to add that data.pop('tags') line that causes problems. Tags, in fact, is not present in AbilitySchema.
def build_ability(self, data: dict, executor: Executor):
if not data.get('ability_id'):
data['ability_id'] = str(uuid.uuid4())
if not data.get('tactic'):
data['tactic'] = 'auto-generated'
if not data.get('technique_id'):
data['technique_id'] = 'auto-generated'
if not data.get('technique_name'):
data['technique_name'] = 'auto-generated'
if not data.get('name'):
data['name'] = 'Manual Command'
if not data.get('description'):
data['description'] = 'Manual command ability'
data['executors'] = [ExecutorSchema().dump(executor)]
#Added line
data.pop('tags')
ability = AbilitySchema().load(data)
return ability
| ingbuono99/attack-on-agent | app/attack_svc.py | attack_svc.py | py | 5,990 | python | en | code | 0 | github-code | 90 |
39891082987 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 20 22:49:03 2018
@author: Ricardo Nunes
"""
def pascal(n):
result=""
for linha in range(n):
for coluna in range(linha+1):
result=result+str(int(factorial(linha)/(factorial(coluna)*factorial(linha-coluna))))
if coluna!=linha:
result=result+" "
# print(result)
result=result+"\n"
return result
def factorial(n):
if n==0:
return 1
else:
fat=1
for i in range(1,n+1):
fat=fat*i
return fat
| TitanicThompson1/FPRO | Play/pascal.py | pascal.py | py | 659 | python | en | code | 0 | github-code | 90 |
29341589005 | import sys
from dataclasses import dataclass, field
from math import fabs, isnan, log
from typing import Callable, Dict, List, Union
import numpy as np
from scipy.integrate import simpson
@dataclass
class SignalingMetric(object):
"""
Signaling metric used for sensitivity analysis.
Attributes
----------
quantification : Dict[str, Callable[[np.ndarray], Union[int, float]]]
Functions to quantify signaling metrics.
"""
quantification: Dict[str, Callable[[np.ndarray], Union[int, float]]] = field(
default_factory=lambda: dict(
maximum=np.max,
minimum=np.min,
integral=simpson,
),
init=False,
)
def dlnyi_dlnxj(
signaling_metric: np.ndarray,
n_file: List[int],
perturbed_idx: List[int],
observables: List[str],
conditions: List[str],
rate: float,
) -> np.ndarray:
"""
Numerical computation of sensitivities using finite difference approximations
with 1% changes in the reaction rates, parameter values or non-zero initial values.
Parameters
----------
signaling_metric : numpy array
Signaling metric
n_file : list of integers
Optimized parameter sets in out/
perturbed_idx : list of integers
Indices of rate equations or non-zero initial values.
observables : list of strings
observables in observable.py
conditions : list of strings
Experimental conditions.
rate : float ~ 1
1.01 for 1% change.
Returns
-------
sensitivity_coefficients: numpy array
"""
sensitivity_coefficients = np.empty(
(len(n_file), len(perturbed_idx), len(observables), len(conditions))
)
for i, _ in enumerate(n_file):
for j, _ in enumerate(perturbed_idx):
for k, _ in enumerate(observables):
for l, _ in enumerate(conditions):
if isnan(signaling_metric[i, j, k, l]):
sensitivity_coefficients[i, j, k, l] = np.nan
elif (
fabs(signaling_metric[i, -1, k, l]) < sys.float_info.epsilon
or fabs(signaling_metric[i, j, k, l] - signaling_metric[i, -1, k, l])
< sys.float_info.epsilon
or (signaling_metric[i, j, k, l] / signaling_metric[i, -1, k, l]) <= 0
):
# 1. Signaling metric before adding perturbation is zero
# 2. Absolute change caused by perturbation is too small
# 3. Antilogarithm <= 0
sensitivity_coefficients[i, j, k, l] = 0.0
else:
sensitivity_coefficients[i, j, k, l] = log(
signaling_metric[i, j, k, l] / signaling_metric[i, -1, k, l]
) / log(rate)
return sensitivity_coefficients
| okadalabipr/periodontitis | biomass/analysis/util.py | util.py | py | 2,942 | python | en | code | 0 | github-code | 90 |
27102613316 | import tkinter as tk
# Create the master object
master = tk.Tk()
#Width, height
master.minsize(400,150)
# Create the entry objects using master
e1 = tk.Entry(master)
e2 = tk.Entry(master)
# Pack them using grid
e1.grid(row=0, column=1, columnspan=1, ipadx=55)
e2.grid(row=1, column=1, columnspan=1, ipadx=55)
button1 = tk.Button(master, text="Browse...")
button1.grid(row=0, column=0, columnspan=1, ipadx=19,pady=10, padx=20)
# Create another button
button2 = tk.Button(master, text="Browse...")
button2.grid(row=1, column=0, columnspan=1, ipadx=19, pady=10, padx=20)
# Create another button
button3 = tk.Button(master, text="Check for files...")
button3.grid(row=2, column=0, columnspan=1, ipadx=2, pady=10, padx=5)
# Create another button
button4 = tk.Button(master, text="Close Program")
button4.grid(row=2, column=1, columnspan=1, pady=10, ipadx=2, sticky='SE' )
# The mainloop
tk.mainloop()
if __name__ == "__main__":
name()
| meloniemorrell/Python | python_225.py | python_225.py | py | 1,017 | python | en | code | 0 | github-code | 90 |
14274915076 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 15 09:28:55 2021
@author: workstation
"""
import pandas as pd
dataset=pd.read_csv("iris.csv")
dataset.describe()
x=dataset['sepal.length'].value_counts()
import matplotlib.pyplot as plt
plt.pie(x)
plt.show()
import matplotlib.pyplot as plt
import numpy as np
y = np.array([35, 25, 25, 15])
mylabels = ["Apples", "Bananas", "Cherries", "Dates"]
plt.pie(y, labels = mylabels)
plt.show()
import numpy as np
import matplotlib.pyplot as plt
# creating the dataset
data = {'C':20, 'C++':15, 'Java':30,
'Python':35}
courses = list(data.keys())
values = list(data.values())
#fig = plt.figure(figsize = (10, 5))
# creating the bar plot
plt.bar(courses, values, color ='maroon',
width = 0.4)
plt.xlabel("Courses offered")
plt.ylabel("No. of students enrolled")
plt.title("Students enrolled in different courses")
plt.show()
# importing pandas package
import pandas as pd
# making data frame from csv file
data = pd.read_csv("employees.csv")
# sorting by first name
data.sort_values("First Name", inplace = True)
# dropping ALL duplicate values
data.drop_duplicates(subset ="First Name",
keep = "first", inplace = True)
# displaying data
data
# Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 3].values
# Taking care of missing data
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values = ' ', strategy = 'mean')
imputer = imputer.fit_transform(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])
| elhossiny/AI-search-techniques-and-first-order-logic | untitled1.py | untitled1.py | py | 1,806 | python | en | code | 0 | github-code | 90 |
32147646370 | import json as js
from Elevator import Elevator
class Building:
def __init__(self, file):
with open(file, 'r') as f:
data = js.load(f)
self._minFloor = data['_minFloor']
self._maxFloor = data['_maxFloor']
self.elev = []
for line in data['_elevators']:
self.elev.append(
Elevator(line['_id'], line['_speed'], line['_minFloor'], line['_maxFloor'], line['_closeTime'],
line['_openTime'], line['_startTime'], line['_stopTime']))
f.close()
def number_of_elevators(self):
count = 0
for i in self.elev:
count = count + 1
return count
def json_reader(self, file):
elevs = []
with open(file, 'r') as f:
data = js.load(f)
for line in data['_elevators']:
elevs.append(
Elevator(line['_id'], line['_speed'], line['_minFloor'], line['_maxFloor'], line['_closeTime'],
line['_openTime'], line['_startTime'], line['_stopTime']))
self.elev = elevs
f.close()
| ShauliTaragin/Smart-Elevator | Building.py | Building.py | py | 1,132 | python | en | code | 0 | github-code | 90 |
9196332340 | import re
import sys
import numpy as np
import matplotlib.pyplot as plt
def parse_loss(fpath, epoch_num=None):
losses = []
loss_regex = r"Loss:\s+\d+\.\d+\s*\(\s*\d+\.\d+\)"
if epoch_num is None:
epoch_count = 0
iter_count = 0
with open(fpath, "r") as fin:
for line in fin:
if "Validation" in line:
if epoch_num is None:
epoch_count += 1
continue
loss_str = re.findall(loss_regex, line)
if len(loss_str) == 1:
loss_str = loss_str[0]
running_loss = re.findall(r"\d+\.\d+", loss_str)[1]
running_loss = float(running_loss)
losses.append(running_loss)
iter_count += 1
losses = np.array(losses)
if epoch_num is not None:
epoch_count = epoch_num
epoches = np.linspace(0, epoch_count - 1, num=iter_count)
return epoches, losses
if __name__ == "__main__":
if len(sys.argv) >= 2:
log_file = sys.argv[1]
epoch_num = None
if len(sys.argv) == 3:
epoch_num = int(sys.argv[2])
epochs, losses = parse_loss(log_file, epoch_num)
print(losses)
print(len(losses))
plt.plot(epochs, losses, "-b", label="Loss")
plt.xlabel(r"# Epochs")
plt.ylabel(r"Loss")
plt.show()
else:
print("cmd <log file>")
| SaltedFishLZ/torchstream | tools/plot_logs.py | plot_logs.py | py | 1,418 | python | en | code | 2 | github-code | 90 |
70306868136 | from flask import Flask,render_template,request
from keras.models import load_model
import joblib
app = Flask(__name__)
# 플라스크 앱 생성
@app.route('/',methods=['GET'])
def main():
return render_template('ozone/input.html')
@app.route('/result',methods=['POST'])
def result():
model = load_model('c:/workspace3/model/ozone/ozone.h5')
# 모형
scaler = joblib.load('c:/workspace3/model/ozone/scaler.model')
# 스케일러
a = float(request.form['a'])
# 내장객체
b = float(request.form['b'])
c = float(request.form['c'])
test_set = [[a, b, c]]
# 2차원 변환
test_set = scaler.transform(test_set)
# 스케일러 변환
rate= model.predict(test_set)
# 모형입력
if rate[0][0] >= 0.5:
result='충분'
else:
result='부족'
return render_template('ozone/result.html',
rate='{:.2f}%'.format(rate[0][0]*100), result=result, a=a, b=b, c=c)
if __name__ == '__main__':
#웹브라우저에서 실행할 때 http://localhost로 하면 느림
#http://127.0.0.1로 할 것
app.run(port=8000, threaded=False) | wonyounging/3rd_Part | kerasweb/ozone.py | ozone.py | py | 1,185 | python | ko | code | 0 | github-code | 90 |
18487425639 | import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
in_n = lambda: int(readline())
in_nn = lambda: map(int, readline().split())
in_nl = lambda: list(map(int, readline().split()))
in_na = lambda: map(int, read().split())
in_s = lambda: readline().rstrip().decode('utf-8')
def main():
N = in_n()
t = in_na()
xyh = list(zip(t, t, t))
for cx in range(101):
for cy in range(101):
hmax = 10**9 + 7
pred_h = -1
for i in range(N):
x, y, h = xyh[i]
xy = abs(x - cx) + abs(y - cy)
if h == 0:
hmax = min(hmax, xy)
if pred_h > hmax:
break
else:
if pred_h == -1:
pred_h = h + xy
elif pred_h != h + xy:
break
else:
print(cx, cy, pred_h)
exit()
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03240/s372691529.py | s372691529.py | py | 1,021 | python | en | code | 0 | github-code | 90 |
41415567926 | from infill_modified_triangle_wave import infill_modified_triangle_wave
import fullcontrol as fc
# Airfoil Parameters
naca_nums = ['2412', '2412'] # NACA airfoil numbers (for NACA airfoil method)
num_points = 256 # Resolution of airfoil - higher values give better quality but slower performance and larger file size for gcode
# Wing Parameters
z_positions = [0, 100] # Z-coordinates for each airfoil section
chord_lengths = [100, 75] # Chord length (mm) for each airfoil. May be different scale for extracted airfoils.
# File Extraction Parameters
file_extraction = False # Enable to use file extraction, disable for NACA airfoil method
filenames = ['naca2412.dat', 'naca2412.dat'] # File names for file extraction method. These have to be in the profiles folder.
interpolate=False # Enable if you want to multiply the amount of points the imported airfoil has
interpolate_airfoil_multiplier = 2 # Multiplier for how many times to multiply the amount of points
sort_point_order=True # Sorts the points of the airfoil to start and end at min_X or x=0 depending if you have move_leading_edge enabled or not.
reverse_points_sorting=False # Reverse the direction that sort point order sorts the points to start and end at. If enabled makes the points start and end at max_x or x=chord_length.
# Infill Parameters
generate_infill = True
infill_density = 6 # Density of infill (higher values = denser infill)
infill_reverse = False # Enable to reverse infill direction. Used if file_extraction makes the airfoil start at max x instead of min x.
infill_rise = False # Enable to raise infill by half layer height when returning to start point of infill. Makes the hop from layer to layer smaller.
infill_type = infill_modified_triangle_wave # Infill pattern type
# Fully filled layer
filled_layers_enabled = False
fill_angle = 45
filled_layers = [0, 0.3, 0.6, 24.6, 24.9, 25.2, 49.8, 50.1, 50.4]
# Circle Generation Parameters
generate_circle = False
circle_centers = [ # Center points for start and end of circle
{"start_center": fc.Point(x=43.8, y=1.35, z=min(z_positions)), "end_center": fc.Point(x=43.8, y=1.35, z=max(z_positions))},
]
circle_radius = 4 # Radius of circle
circle_num_points = 24 # Number of points in circle
circle_offset = 0.75 # Offset for second circle
circle_segment_angle = 45 # Angle covered by each pass when drawing circle
circle_start_angle = 180 # Starting angle for circle. Started on the outer circle.
# Wing Curvature Settings
move_leading_edge = True # Allows movement for the leading edge. If true the edge moves if an chord_length upper in z is smaller.
move_trailing_edge = True # Allows movement for the trailing edge. If true the edge moves if an chord_length upper in z is smaller.
curved_wing = False # Right now uses quadratic interpolation. More options coming soon.
curve_amount = 1 # Ellipse curvature amount (1 = fully curved, 0 = not curved)
# Layer height and width settings
layer_height = 0.3
line_width = 0.4
# Offset
offset_wing = False
offset_x = 50 # Offset in mm (x-axis)
offset_y = 100 # Offset in mm (y-axis)
offset_z = 0 # Offset in mm (z-axis). Adjust if nozzle is digging into the bed during first layer print. The 3D printer's own z offset might not work when using fullcontrol.
# Pre-Print Settings
calibration_moves = False # Enable/disable extra travel move at the start. (Makes the maximum dimensions of the printer be shown in the plot)
bed_x_max = 300
bed_y_max = 300
# Post-Print Settings
z_hop_enabled = False # Makes z move up a set amount after print is done
z_hop_amount = 50 # Height to move extruder up after printing
# 3D Printing Configuration
gcode_generation = False # Enable gcode generation.
gcode_name = 'gcode_output' # Output filename for G-code
# Printer Specific Settings
printer_settings = {
"extrusion_width": line_width, # Width of extrusion in mm
"extrusion_height": layer_height, # Height of extrusion in mm
"print_speed": 2000, # Print speed (acceleration)
"travel_speed": 2000, # Travel speed (acceleration)
"nozzle_temp": 210, # Nozzle temperature in degrees Celsius
"bed_temp": 60, # Bed temperature in degrees Celsius
}
# Plot Settings
plot_neat_for_publishing = True # Hides travel moves and the coordinates so the plot is just a 3d view of the airfoil. Used in for example taking images for the documentation.
plot_style = "tube" # Options: "tube" and "line". Tube shows the lines in 3d as, well tubes. The line option shows the lines as 2d lines.
# Debug Settings
print_total_layers = True
print_rendering_plot = True
print_rendering_plot_done = True
print_generating_gcode = True
print_time_taken = True
print_generation_done = True
# SETTINGS END
| aapolipponen/fullcontrol-airfoil | src/parameters.py | parameters.py | py | 4,690 | python | en | code | 2 | github-code | 90 |
25369954730 | import gym
import random
import itertools
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.style
import numpy as np
import pandas as pd
import sys
from collections import defaultdict
import plottings
matplotlib.style.use('ggplot')
import gym_Recsys1
import time
import sys
#sys.path.append('gym_Recsys1/gym_Recsys1/envs/')
import Recsys1_env as rcs
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import math
plt.figure(figsize=(20,10))
import seaborn as sns
class Model:
def __init__(self, num_states, num_actions, batch_size):
self._num_states = num_states
self._num_actions = num_actions
self._batch_size = batch_size
# define the placeholders
self._states = None
self._actions = None
# the output operations
self._logits = None
self._optimizer = None
self._var_init = None
# now setup the model
self._define_model()
def _define_model(self):
self._states = tf.placeholder(shape=[None, self._num_states], dtype=tf.float32)
self._q_s_a = tf.placeholder(shape=[None, self._num_actions], dtype=tf.float32)
# create a couple of fully connected hidden layers
fc1 = tf.layers.dense(self._states, 50, activation=tf.nn.relu)
fc2 = tf.layers.dense(fc1, 50, activation=tf.nn.relu)
self._logits = tf.layers.dense(fc2, self._num_actions)
loss = tf.losses.mean_squared_error(self._q_s_a, self._logits)
self._optimizer = tf.train.AdamOptimizer().minimize(loss)
self._var_init = tf.global_variables_initializer()
def predict_one(self, state, sess):
return sess.run(self._logits, feed_dict={self._states: state.reshape(1, self._num_states)})
def predict_batch(self, states, sess):
return sess.run(self._logits, feed_dict={self._states: states})
def train_batch(self, sess, x_batch, y_batch):
sess.run(self._optimizer, feed_dict={self._states: x_batch, self._q_s_a: y_batch})
class Memory:
def __init__(self, max_memory):
self._max_memory = max_memory
self._samples = []
def add_sample(self, sample):
self._samples.append(sample)
if len(self._samples) > self._max_memory:
self._samples.pop(0)
def sample(self, no_samples):
if no_samples > len(self._samples):
return random.sample(self._samples, len(self._samples))
else:
return random.sample(self._samples, no_samples)
class RecomRunner:
def __init__(self, sess, model, env, memory, max_eps, min_eps,
decay, render=False):
self._sess = sess
self._env = env
self._model = model
self._memory = memory
self._render = render
self._max_eps = max_eps
self._min_eps = min_eps
self._decay = decay
self._eps = self._max_eps
self._steps = 0
self._reward_store = []
self._max_x_store = []
self.doc_consume=[]
self.ltv=[]
def run(self):
state = self._env.reset()
tot_reward = 0
tot_ltv=0
#max_x = -100
done = False
while not done:
if self._render:
self._env.render()
action = self._choose_action(state)
next_state, reward, done, info = self._env.step(action)
"""if next_state[0] >= 0.1:
reward += 10
elif next_state[0] >= 0.25:
reward += 20
elif next_state[0] >= 0.5:
reward += 100
if next_state[0] > max_x:
max_x = next_state[0]"""
# is the game complete? If so, set the next state to
# None for storage sake
if done:
next_state = None
self._memory.add_sample((state, action, reward, next_state))
tot_ltv+=self._replay()
# exponentially decay the eps value
self._steps += 1
self._eps = MIN_EPSILON + (MAX_EPSILON - MIN_EPSILON) \
* math.exp(-LAMBDA * self._steps)
# move the agent to the next state and accumulate the reward
state = next_state
tot_reward += reward
# if the game is done, break the loop
if done:
self._reward_store.append(tot_reward)
#print("reward store",self._reward_store)
self.doc_consume+=env.historic
self.ltv.append(tot_ltv)
#print(doc_consume)
#self._max_x_store.append(max_x)
break
print("Step {}, Total reward: {}, Eps: {}".format(self._steps, tot_reward, self._eps))
def _choose_action(self, state):
if random.random() < self._eps:
return random.randint(0, self._model._num_actions - 1)
else:
return np.argmax(self._model.predict_one(state, self._sess))
def _replay(self):
batch = self._memory.sample(self._model._batch_size)
states = np.array([val[0] for val in batch])
next_states = np.array([(np.zeros(self._model._num_states)
if val[3] is None else val[3]) for val in batch])
# predict Q(s,a) given the batch of states
q_s_a = self._model.predict_batch(states, self._sess)
# predict Q(s',a') - so that we can do gamma * max(Q(s'a')) below
q_s_a_d = self._model.predict_batch(next_states, self._sess)
# setup training arrays
x = np.zeros((len(batch), self._model._num_states))
y = np.zeros((len(batch), self._model._num_actions))
for i, b in enumerate(batch):
state, action, reward, next_state = b[0], b[1], b[2], b[3]
# get the current q values for all actions in state
current_q = q_s_a[i]
# update the q value for action
if next_state is None:
# in this case, the game completed after action, so there is no max Q(s',a')
# prediction possible
current_q[action] = reward
else:
current_q[action] = reward + 1 * np.amax(q_s_a_d[i])
x[i] = state
y[i] = current_q
self._model.train_batch(self._sess, x, y)
return np.max(q_s_a_d[i])
u1=rcs.User(1000,20,rcs.associateTopicInterest(),1,10000)
u2=rcs.User(1001,21,rcs.associateTopicInterest(),2,10000)
u3=rcs.User(1002,22,rcs.associateTopicInterest(),1,10000)
u4=rcs.User(1003,40,rcs.associateTopicInterest(),2,40000)
u5=rcs.User(1004,41,rcs.associateTopicInterest(),1,40000)
u6=rcs.User(1005,42,rcs.associateTopicInterest(),2,40000)
u7=rcs.User(1006,60,rcs.associateTopicInterest(),1,70000)
u8=rcs.User(1007,61,rcs.associateTopicInterest(),2,70000)
u9=rcs.User(1008,62,rcs.associateTopicInterest(),1,70000)
users=[u1,u2,u3,u4,u5,u6,u7,u8,u9]
#users=rcs.geNerNuser(10)
#random.seed(30)
docs=rcs.geNerNdocument(50)
docu=rcs.Document(888,1,4,10.22589655899)
docs.append(docu)
for i in range(len(users)):
env = gym.make('Recsys1-v0',user=users[i],alldocs=docs)
num_states = env.observation_space.shape[1]
num_actions = env.action_space.n
BATCH_SIZE=50
model = Model(num_states, num_actions, BATCH_SIZE)
mem = Memory(50000)
with tf.Session() as sess:
LAMBDA=0.9999
sess.run(model._var_init)
num_episodes = 50
MAX_EPSILON=0.9
MIN_EPSILON=0.1
gr = RecomRunner(sess, model, env, mem, MAX_EPSILON, MIN_EPSILON,
LAMBDA)
cnt = 0
rs=open("Recsys1/result/result_"+"user_"+str(i)+".txt",'w')
rs.write("Interest user before consume docs : "+ str(sorted(users[i].associate_topic_interet.items(), key=lambda x: x[1], reverse=True))+"\n")
while cnt < num_episodes:
if cnt % 10 == 0:
print('Episode {} of {}'.format(cnt+1, num_episodes))
gr.run()
cnt += 1
rs.write("Average Reward : "+ str(sum(gr._reward_store)/len(gr._reward_store))+"\n")
from collections import Counter
rs.write("user after consume docs : "+ str(sorted(users[i].associate_topic_interet.items(), key=lambda x: x[1], reverse=True))+"\n")
rs.write("total document consomme : "+str(len(gr.doc_consume))+"\n")
z=Counter(gr.doc_consume)
y=Counter([gr.doc_consume[k].id for k in range(len(gr.doc_consume)) ])
o=Counter([gr.doc_consume[k].topic for k in range(len(gr.doc_consume)) ])
rs.write("Les Documents consommés : "+"\n")
rest=sorted(z.items(), key=lambda x: x[1],reverse=True)
reste=sorted(y.items(), key=lambda x: x[1],reverse=True)
rester=sorted(o.items(), key=lambda x: x[1],reverse=True)
doc = list(zip(*reste))[0]
consom = list(zip(*reste))[1]
x_pos = np.arange(len(doc))
plt.bar(x_pos, consom,align='center')
plt.xticks(x_pos, doc)
plt.xlabel('Doc ID')
plt.ylabel('Nb Consum Doc')
plt.title("ConsumDoc_"+"user_"+str(i)+"by ID")
plt.savefig("Recsys1/result/ConsumDoc_"+"user_"+str(i)+"by ID")
plt.clf()
doce = list(zip(*rester))[0]
consome = list(zip(*rester))[1]
x_pose = np.arange(len(doce))
plt.bar(x_pose, consome,align='center')
plt.xticks(x_pose, doce)
plt.xlabel('Doc topic')
plt.ylabel('Nb Consum Doc')
plt.title("ConsumDoc_"+"user_"+str(i)+"by Topic")
plt.savefig("Recsys1/result/ConsumDoc_"+"user_"+str(i)+"by Topic")
plt.clf()
plt.plot(gr._reward_store)
plt.xlabel('Episode')
plt.ylabel('Reward')
plt.title("reward_"+"user_"+str(i))
plt.savefig("Recsys1/result/reward_"+"user_"+str(i))
plt.clf()
plt.plot(gr.ltv)
plt.title("user_"+str(i)+" engagement")
plt.savefig("Recsys1/result/ltv_"+"user_"+str(i))
plt.clf()
#rest2=sorted(z.keys(), key=lambda x: x[1],reverse=True)
for j in range(30):
rs.write("Documents : "+ rest[j][0].__str__()+"Nombre de fois consommes : "+str(rest[j][1])+"\n")
total_quality=sum([doc_.inhQuality for doc_ in gr.doc_consume])
rs.write("Average qality of documents consume by user : "+str(users[i].id)+"is : "+str(total_quality/len(gr.doc_consume)) +"\n")
rs.close
#plt.plot(gr.max_x_store)
#plt.show() | Nabzozifo/Recsys1 | gym_Recsys1/slateq2.py | slateq2.py | py | 9,065 | python | en | code | 1 | github-code | 90 |
12631578012 | import os
from bisect import bisect
import numpy as np
import pandas as pd
import scipy.stats
import random
import datetime
import pickle
import os
import pathlib
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
from rpy2.robjects.vectors import IntVector, FloatVector
from sklearn.model_selection import train_test_split
import sklearn.metrics
from sklearn.base import BaseEstimator, TransformerMixin
# Define the function to save an array of samples to a file
def save_to_file(data, filename):
f = open(filename, "w")
for row in data:
line = ",".join([str(x) for x in row])
f.write(line + "\n")
f.close()
# Define a function to load a set of samples from a file
def load_from_file(filename):
data = []
f = open(filename, "r")
lines = f.readlines()
for line in lines:
xs = line.split(',')
xs = [float(x) for x in xs]
data.append(xs)
f.close()
return data
# For calculating empirical CDF
def ecdf(x, sample):
sample.sort() # sample HAS TO BE SORTED in the ASCENDING (NON-DESCENDING) order
i = bisect(sample, x)
return float(i) / len(sample)
# For "inverting" empirical CDF
def pseudoInvEcdf(sample, p):
n = len(sample)
i = 0
s = 1.0 / n
while s < p and i < n:
s = s + 1.0 / n
i = i + 1
if i == n:
i = n - 1
x = sample[i] #min([x for x in sample if ecdf(x, sample) >= p])
return x
# Define a function which maps samples to descriptors (this is the feature engineering step)
def createDescriptor(sample, q):
sample.sort()
m = int(1 / q)
n = len(sample)
# The fimensions of the descriptors are sample size and the following descriptive statistics:
maximum = max(sample)
minimum = min(sample)
mean = np.mean(sample)
median = np.median(sample)
sd = np.std(sample)
#kurtosis = scipy.stats.kurtosis(sample)
#skewness = scipy.stats.skew(sample)
standardized_sample = [(x - mean) / sd for x in sample]
# ... as well the selected quantiles
descriptor = [pseudoInvEcdf(standardized_sample, j*q) for j in range(1, m+1)]
descriptor = descriptor + [n, mean, sd, minimum, maximum, median]
return descriptor
# For checking id xs contains only finite values
def onlyFinite(xs):
for x in xs:
if np.isinf(x) or np.isnan(x):
return False
return True
# This function assigns names to the descriptor features
def names(v):
q = 10
c = 10
return ["p{}%".format(q*i) for i in range(1, c+1)] + ["n", "mean", "sd", "max", "min", "med"]
# For the Lilliefors test
rstring = """
function(sample){
library(nortest)
tryCatch({
res <- lillie.test(sample)
p <- res[[2]]
return(p)
}, error = function(e){
return(0.0)
})
}
"""
lf=robjects.r(rstring)
# Define the decision-making functions based on statistical tests: True - sample is normal, False - sample comes from
# a non-normal distribution
def lilliefors(sample, alpha):
return lf(FloatVector(sample))[0] >= alpha
def shapiro_wilk(sample, alpha):
return scipy.stats.shapiro(sample)[1] >= alpha
def anderson_darling(sample, alpha):
if alpha == 0.01:
c = 4
elif alpha == 0.05:
c = 2
elif alpha == 0.1:
c = 1
result = scipy.stats.anderson(sample, dist='norm')
statistic = result[0]
critical_value = result[1][c]
return statistic <= critical_value
def jarque_bera(sample, alpha):
return scipy.stats.jarque_bera(sample)[1] >= alpha
# Create the access point for the tests based on their codes
def get_test(code):
if code == 'SW':
return shapiro_wilk, shapiro_wilk_statistic
elif code == 'LF':
return lilliefors, lilliefors_statistic
elif code == 'AD':
return anderson_darling, anderson_darling_statistic
elif code == 'JB':
return jarque_bera, jarque_bera_statistic
# This class is a wraper around tests
class TestClassifier(object):
"""docstring for Test"""
def __init__(self, test, statistic, alpha, class_label=1, opposite_label=0):
super(TestClassifier, self).__init__()
self.test = test
self.alpha = alpha
self.statistic = statistic
self._estimator_type = 'classifier'
self.classes_ = [0, 1]
def predict(self, samples):
labels = [2 for sample in samples]
for i, sample in enumerate(samples):
try:
is_normal = self.test(sample, self.alpha)
if is_normal:
labels[i] = 1
else:
labels[i] = 0
except Exception as e: # due to numerical instability
labels[i] = 2
#labels = [1 if self.test(sample, self.alpha) else 0 for sample in samples]
return labels
def calculate_statistic(self, samples):
if not all([type(x) == list for x in samples]):
return self.statistic(samples)
return np.array([self.statistic(sample) for sample in samples])
# This function creates a test-based classifier for given code and alpha
def get_standard_classifier(test_code, alpha):
test, statistic = get_test(test_code)
classifier = TestClassifier(test, statistic, alpha, 1, 0)
return classifier
# Define the functions that calculate the statistics of normality tests
rstring_lf_stat = """
function(sample){
library(nortest)
tryCatch({
res <- lillie.test(sample)
stat <- res[[1]]
return(stat)
}, error = function(e){
return(-10.0)
})
}
"""
lf_stat=robjects.r(rstring_lf_stat)
def lilliefors_statistic(sample):
return lf_stat(FloatVector(sample))[0]
def shapiro_wilk_statistic(sample):
return scipy.stats.shapiro(sample)[0]
def anderson_darling_statistic(sample):
return scipy.stats.anderson(sample, dist='norm')[0]
def jarque_bera_statistic(sample):
return scipy.stats.jarque_bera(sample)[0]
# This function returns a randomly selected mean
def random_mean():
return -100 + random.random() * 200
# This function returns a randomly selected standard deviation
def random_sigma():
return 1 + random.random() * 19
# This function generates normal samples
def generate_normal_samples(ns, L):
raw_samples = []
for n in ns:
for l in range(L):
mu = random_mean()
sigma = random_sigma()
sample = np.random.normal(mu, sigma, n)
raw_samples.append(sample.tolist())
#print(n, len(raw_samples))
return raw_samples
# Create the R string for the Pearson family. Make sure to have gsl and PearsonDS installed.
pearsonstring = """
function(n, m1, m2, m3, m4){
library(gsl)
library(PearsonDS)
tryCatch({
sample <- rpearson(n,moments=c(mean=m1,variance=m2,skewness=m3,kurtosis=m4))
return(sample)
}, error = function(e){
return(FALSE)
})
}
"""
generate_pearson=robjects.r(pearsonstring)
# Generate non-normal samples
def generate_pearson_nonnormal_samples(s_range, k_range, ns, L):
# s_range is the set of skewness values to be supplied to generate_pearson
# k_range is the set of kurtosis values to be supplied to generate_pearson
h = 0
raw_samples = []
for n in ns:
for s in s_range:
for k in k_range:
if k - s**2 - 1 >= 0 and not(s==0 and k==3):
h = h + 1
for l in range(L):
mean = random_mean()
sd = random_sigma()
response = generate_pearson(n, mean, sd, s, k)
if not(response[0] == False):
sample = response
#confs[(n, mean, sd, skewness, kurtosis)] = True
raw_samples.append(list(sample))
#print(n, h, len(raw_samples))
return raw_samples
# Define the function to assign the same label to all the samples
def label_samples(samples, label):
return [list(sample) + [label] for sample in samples]
# Define the function for splitting the sets
def split(samples, train_size, labels=None, random_state=0):
if labels is None:
labels = [sample[-1] for sample in samples]
X_train, X_test, y_train, y_test = train_test_split(samples, labels,
stratify=labels, train_size=train_size, random_state=random_state)
return X_train, X_test, y_train, y_test
# This is deprecated.
def preprocess1(raw_samples, how={'method' : 'nothing'}):
if how['method'] == 'nothing':
return raw_samples
elif how['method'] == 'descriptors':
q = how['q']
descriptors = [createDescriptor(sample[:-1], q) for sample in raw_samples]
pruned = []
for i in range(len(descriptors)):
if onlyFinite(descriptors[i]):
pruned += [descriptors[i] + [raw_samples[i][-1]]]
return pruned
# Create a function for generating nicely formatted LaTeX tables
mean_sd_merge=lambda x: '${:.3f}\pm{:.3f}$'.format(x[0], x[1])
def get_latex_table(df, sort_by=None, merge_instructions=None, renamer=None,
caption=None, label=None, float_format='$%.3f$', index=False):
if sort_by is not None:
df = df.sort_values(sort_by, axis='index')
if merge_instructions is not None:
for instruction in merge_instructions:
merge_function = instruction['merge_function']
new_column = instruction['new_column']
columns_to_merge = instruction['columns_to_merge']
df[new_column] = df[columns_to_merge].apply(merge_function, axis=1)
df = df.drop(columns_to_merge, axis=1)
if renamer is not None:
df = df.rename(columns=renamer)
latex = df.to_latex(index=index,
float_format=float_format, escape=False, caption=caption, label=label)
return latex
mean_sd_merge = lambda x: '${:.3f}\pm{:.3f}$'.format(x[0], x[1])
# Define the function for showing the current date and time in the selected format
def now(format='%Y-%m-%d %H-%M-%S.%f'):
return datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S.%f')
# Define the function that can calculate the following performance metrics: A - accuracy, TPR - true positive rate,
# PPV - positive predictive value, TNR - true negative rate, NPV - negative predictive value, F1 - the F1 measure,
# UR - the unresponsive rate (percentage of 2s in guesses that are caused by numerical instability)
def calculate_metrics(true_labels, guessed_labels, metrics=['A', 'TPR', 'PPV', 'TNR', 'NPV', 'F1', 'UR']):
# Determine the elements of the confusion matrix
matrix = sklearn.metrics.confusion_matrix(true_labels, guessed_labels, labels=[0, 1])
T_nonnormal = matrix[0, 0]
F_normal = matrix[0, 1]
F_nonnormal = matrix[1, 0]
T_normal = matrix[1, 1]
# Determine the set specifics
N_normal = sum(matrix[1, :]) # T_normal + F_nonnormal
N_nonnormal = sum(matrix[0, :]) # T_nonnormal + F_normal
N = N_normal + N_nonnormal
scores = []
# Calculate the chosen metrics
if 'A' in metrics:
score = (T_normal + T_nonnormal) / N
scores.append(score)
if 'TPR' in metrics:
score = T_normal / N_normal
scores.append(score)
if 'PPV' in metrics:
score = T_normal / (T_normal + F_normal)
scores.append(score)
if 'TNR' in metrics:
score = T_nonnormal / N_nonnormal
scores.append(score)
if 'NPV' in metrics:
score = T_nonnormal / (T_nonnormal + F_nonnormal)
scores.append(score)
if 'F1' in metrics:
TPR = T_normal / N_normal
PPV = T_normal / (T_normal + F_normal)
score = 2 * TPR * PPV / (TPR + PPV)
scores.append(score)
if 'UR' in metrics:
total = len(guessed_labels)
undecided = len([x for x in guessed_labels if x == 2])
score = undecided / total
scores.append(score)
return scores
# Define the function that evaliates a classifer's performance on a set of samples
def evaluate(samples, true_labels, classifier, metrics=['A', 'TPR', 'PPV', 'TNR', 'NPV', 'F1', 'AUROC', 'U'],
n_range=None):
# Guess the labels
guessed_labels = classifier.predict(samples)
# Calculate the performance metrics for the whole set
total_scores = calculate_metrics(true_labels, guessed_labels, metrics=metrics)
if 'AUROC' in metrics:
prediction_scores = classifier.predict_proba(samples)[:, 1]
where = np.where(~np.isnan(prediction_scores))[0]
t = [true_labels[j] for j in where]
ps = [prediction_scores[j] for j in where]
auroc = sklearn.metrics.roc_auc_score(t, ps)
i = metrics.index('AUROC')
total_scores = total_scores[:i] + [auroc] + total_scores[i:]
if n_range is None:
return total_scores
else:
guessed_by_n = {n : [] for n in n_range}
true_by_n = {n : [] for n in n_range}
prediction_scores_by_n = {n : [] for n in n_range}
for i, sample in enumerate(samples):
n = len(sample)
if n not in n_range:
continue
true_label = int(true_labels[i])
true_by_n[n].append(true_label)
guessed_label = int(guessed_labels[i])
guessed_by_n[n].append(guessed_label)
if 'AUROC' in metrics:
prediction_score = prediction_scores[i]
prediction_scores_by_n[n].append(prediction_score)
all_scores = []
for n in n_range:
scores_for_n = calculate_metrics(true_by_n[n], guessed_by_n[n], metrics=metrics)
if 'AUROC' in metrics:
where = np.where(~np.isnan(prediction_scores_by_n[n]))[0]
t = [true_by_n[n][j] for j in where]
ps = [prediction_scores_by_n[n][j] for j in where]
auroc_for_n = sklearn.metrics.roc_auc_score(t, ps)
i = metrics.index('AUROC')
scores_for_n = scores_for_n[:i] + [auroc_for_n] + scores_for_n[i:]
all_scores.append(scores_for_n)
return all_scores + [total_scores]
# Define a wrapper around evaluate that returns the results in the form of a Pandas dataframe
def evaluate_pretty(samples, true_labels, classifier,
metrics=['A', 'TPR', 'PPV', 'TNR', 'NPV', 'F1', 'AUROC'], n_range=None, index=None
):
scores_lists = evaluate(samples, true_labels, classifier, metrics=metrics, n_range=n_range)
columns = metrics
if n_range is not None:
for i in range(len(n_range)):
scores_lists[i] = [n_range[i]] + scores_lists[i]
scores_lists[-1] = ['overall'] + scores_lists[-1]
columns = ['n'] + metrics
if n_range is None:
scores_lists = [scores_lists]
results_df = pd.DataFrame(scores_lists, columns=columns)
if index is not None:
results_df = results_df.set_index(index, drop=True)
return results_df
# https://stackoverflow.com/a/46730656/1518684
def get_activations(clf, X):
hidden_layer_sizes = clf.hidden_layer_sizes
if not hasattr(hidden_layer_sizes, "__iter__"):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
layer_units = [X.shape[1]] + hidden_layer_sizes + \
[clf.n_outputs_]
activations = [X]
for i in range(clf.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
clf._forward_pass(activations)
return activations
# For separating samples by their labels
def separate_by_label(samples, labels):
separated = {}
for (sample, label) in zip(samples, labels):
label = int(label)
if label not in separated:
separated[label] = []
separated[label].append(sample)
return separated
# For separating by size and, optinally, labels
def separate_by_size(samples, labels=None):
separated_samples = {}
if labels is not None:
separated_labels = {}
for i in range(len(samples)):
sample = samples[i]
n = len(sample)
if n not in separated_samples:
separated_samples[n] = []
separated_samples[n].append(sample)
if labels is not None:
if n not in separated_labels:
separated_labels[n] = []
separated_labels[n].append(labels[i])
if labels is None:
return separated_samples
else:
return separated_samples, separated_labels
# For filtering the samples by label and size
def filter_samples(samples, labels, target_label=None, n=None):
if target_label is None and n is None:
return samples
elif target_label is None and n is not None:
for (sample, label) in zip(samples, labels):
if len(sample) == n:
filtered_samples.append(sample)
elif target_label is not None and n is None:
for (sample, label) in zip(samples, labels):
if label == target_label:
filtered_samples.append(sample)
else:
for (sample, label) in zip(samples, labels):
if label == target_label:
filtered_samples.append(sample)
return filtered_samples
# Define the function that finds the lowest empirical quantile
# that is greater than 100q% of the sample (0 < q <= 1).
# The sample has to be sorted in the ascending order in order for this function to work.
def get_quantile(sample, q):
n = len(sample)
i = 0
s = 1.0 / n
while s < q and i < n:
s = s + 1.0 / n
i = i + 1
if i == n:
i = n - 1
return sample[i]
# Define the function to construct a sample's descriptor.
def get_descriptor(sample, q, sorted=True):
# Make sure that the sample is sorted before calling this function
if sorted == False:
sample.sort()
# Determine the descriptive statistics
n = len(sample)
maximum = max(sample)
minimum = min(sample)
mean = np.mean(sample)
median = np.median(sample)
sd = np.std(sample)
#kurtosis = scipy.stats.kurtosis(sample, fisher=False)
#skewness = scipy.stats.skew(sample)
# Standardize the sample
standardized_sample = [(x - mean) / sd for x in sample]
# Determine the number of quantiles
m = int(1 / q)
# Determine the quantiles
# Note: Optimize this step. All the quantiles can be found in one single pass.
descriptor = [get_quantile(standardized_sample, j*q) for j in range(1, m + 1)]
# Combine the quantiles and descriptive statistics to get the sample's descriptor
descriptor = descriptor + [n, mean, sd, minimum, maximum, median]
return descriptor
# Define a class to transform the samples to descriptors, that can be used in a sklearn pipeline
class DescriptorBuilder(TransformerMixin, BaseEstimator):
def __init__(self, q=0.1):
super(DescriptorBuilder, self).__init__()
self.q = q
# Set the names of the features in the descriptors
self.features = ['q{:.2f}'.format(i * q) for i in range(1, int(1/q) + 1)]
self.features += ['n', 'mean', 'sd', 'minimum', 'maximum', 'median']
def fit(self, X, y=None):
# Not needed, but present for compatibility.
return self
def transform(self, X, y=None):
# Note: Currently works only on a list of lists or a single list.
if isinstance(X, list):
if all(isinstance(x, list) for x in X):
X = [get_descriptor(x, q=self.q, sorted=False) for x in X]
return pd.DataFrame(X)
else:
X = get_descriptor(X, q=self.q, sorted=False)
return X
else:
# Pandas dataframes and numpy arrays are not supported for now.
pass
# For completing all the steps in a pipe except for the last one (prediction)
# The neural net classifier consists of the descriptor builder, standard scaler, mean imputer, and the neural network.
def prepare_input(X, dbnn_pipe):
X = dbnn_pipe['descriptor_builder'].transform(X)
X = dbnn_pipe['scaler'].transform(X)
X = dbnn_pipe['imputer'].transform(X)
return X
# This functions traverses a dictionary to save all the figures in it to said directory
def traverse_and_save(dictionary, img_dir_path):
if type(dictionary) is not dict:
return
for key in dictionary:
if 'fig' in key:
pathlib.Path(*img_dir_path.split(os.sep)).mkdir(parents=True, exist_ok=True)
figure = dictionary[key]
for extension in ['pdf', 'eps']:
path = os.path.join(img_dir_path, img_dir_path.split(os.sep)[-1] + '_' + key) + '.' + extension
print('Saving', path)
if 'savefig' in dir(figure):
figure.savefig(path, bbox_inches='tight')
else:
figure.figure.savefig(path, bbox_inches='tight')
else:
traverse_and_save(dictionary[key], os.path.join(img_dir_path, key)) | milos-simic/statistical_classification | util.py | util.py | py | 21,392 | python | en | code | 0 | github-code | 90 |
18430427229 | n=int(input())
print(n*(n-1)//2-n//2)
if n%2:
wa=n
else:
wa=n+1
for i in range(1,n+1):
for ii in range(1,i):
if i+ii!=wa:
print(ii,i) | Aasthaengg/IBMdataset | Python_codes/p03090/s780762127.py | s780762127.py | py | 166 | python | en | code | 0 | github-code | 90 |
16252627437 | from django.shortcuts import render, redirect
from .models import Schools, SchoolData
from .forms import SchoolDataForm,SchoolForm
# Create your views here.
def home(request):
return render(request,'index.html')
def schooldata_list(request):
schooldatas = SchoolData.objects.all()
return render(request,'schooldata_list.html', {'schooldatas':schooldatas})
def schooldata_edit(request, id):
print('ohhhh')
schooldata=SchoolData.objects.get(id=id)
return render(request,'schooldata_edit.html', {'schooldata':schooldata})
def test(request):
return render(request, 'test.html')
def schooldata_delete(request, id):
schooldata=SchoolData.objects.get(id=id)
schooldata.delete()
return redirect("/schooldata/show")
def schooldata_update(request, id):
print('ohhhh')
schooldata=SchoolData.objects.get(id=id)
form = SchoolDataForm(request.POST, instance=schooldata)
if form.is_valid():
form.save()
print('no_is_valid')
return redirect("/")
return render(request, 'schooldata_edit.html', {'schooldata': schooldata})
# def schooldata_show(request):
# schools=Schools.objects.all()
# return render(request,'schooldata_creat.html',{'schools':schools})
# def school_list(request):
# schools=Schools.objects.all()
# return render(request, 'school_list.html',{'schools':schools})
# def add_show(request):
# return render(request, 'school_add.html')
def school_show(request):
schools=Schools.objects.all()
return render(request,'school_list.html',{'schools':schools})
def add_view(request):
return render(request,'school_create.html')
def school_create(request):
if request.method=="POST":
form=SchoolForm(request.POST)
if form.is_valid():
try:
form.save()
print('add school')
return redirect("/school/list")
except:
pass
else:
form=SchoolForm()
return render(request,'school_add.html',{'form':form})
def school_delete(request,id):
school=Schools.objects.get(id=id)
school.delete()
return redirect("/school/list")
def school_edit(request, id):
print('ohhhh')
school=Schools.objects.get(id=id)
return render(request,'school_edit.html', {'school':school})
def school_update(request, id):
print('ohhhh')
school=Schools.objects.get(id=id)
form = SchoolDataForm(request.POST)
if form.is_valid():
form.save()
print('no_is_valid')
return redirect("/school/list")
return render(request, 'school_edit.html', {'school': school}) | sdev3millions/schoolPPP | schoolppp/school/views.py | views.py | py | 2,616 | python | en | code | 1 | github-code | 90 |
3387462946 | #!/usr/bin/env python
from hippocampus_common.node import Node
import rospy
from std_msgs.msg import Float64
import threading
from mavros_msgs.msg import State
from depth_control.msg import DepthEKFStamped
from dynamic_reconfigure.server import Server
from gripper.cfg import DepthStabilizerConfig
class StabilizerNode(Node):
def __init__(self, name, anonymous=False, disable_signals=False):
super(StabilizerNode, self).__init__(name,
anonymous=anonymous,
disable_signals=disable_signals)
self.data_lock = threading.RLock()
self.throttle = 0.0
self.armed = False
self.hold_depth = True
self.input_threshold = 0.1
self.hz = 30.0
self.current_depth = 0.0
self.current_speed = 0.0
self.depth_setpoint = 0.0
self.speed_setpoint = 0.0
self.depth_gain = 1.0
self.speed_gain = 1.0
self.MAX_SPEED = 0.2
self.reconfigure_server = Server(DepthStabilizerConfig,
self.on_reconfigure)
self.vertical_thrust_pub = rospy.Publisher("vertical_thrust",
Float64,
queue_size=1)
self.depth_setpoint_pub = rospy.Publisher("depth_setpoint",
Float64,
queue_size=1)
self.input_sub = rospy.Subscriber("vertical_thrust_input",
Float64,
self.on_input,
queue_size=1)
self.state_sub = rospy.Subscriber("mavros/state", State, self.on_state)
self.depth_sub = rospy.Subscriber("depth", DepthEKFStamped,
self.on_depth)
def on_reconfigure(self, config, _):
with self.data_lock:
self.MAX_SPEED = config["max_speed"]
self.depth_gain = config["depth_gain"]
self.speed_gain = config["speed_gain"]
return config
def on_input(self, msg):
with self.data_lock:
self.throttle = msg.data
if abs(self.throttle) > self.input_threshold:
self.hold_depth = False
else:
if not self.hold_depth:
self.depth_setpoint = self.current_depth
self.hold_depth = True
def on_depth(self, msg):
with self.data_lock:
self.current_depth = msg.depth
self.current_speed = msg.z_vel
def on_state(self, msg):
with self.data_lock:
self.armed = msg.armed
def run(self):
r = rospy.Rate(self.hz)
while not rospy.is_shutdown():
with self.data_lock:
if self.hold_depth:
self.do_depth_control()
else:
self.do_speed_control()
msg = Float64()
msg.data = self.depth_setpoint
self.depth_setpoint_pub.publish(msg)
r.sleep()
def do_depth_control(self):
error = self.depth_setpoint - self.current_depth
u = self.depth_gain * error
msg = Float64()
msg.data = u
self.vertical_thrust_pub.publish(msg)
def do_speed_control(self):
error = self.throttle * self.MAX_SPEED - self.current_speed
u = self.speed_gain * error
msg = Float64()
msg.data = u
self.vertical_thrust_pub.publish(msg)
def main():
node = StabilizerNode("depth_stabilizer")
node.run()
if __name__ == "__main__":
main()
| DavidHahn97/gripper | nodes/depth_stabilizer.py | depth_stabilizer.py | py | 3,782 | python | en | code | 0 | github-code | 90 |
73552257898 | import functools
import itertools
import struct
from . import command
from . import zwave
class DeserializeError(Exception):
pass
#----------------------------------------------------------------------
@functools.singledispatch
def serialize(cmd):
return list(cmd.sig())
@serialize.register(command.AssociationGet)
@serialize.register(command.MultiChannelAssociationGet)
def _(cmd):
return list(cmd.sig()) + [cmd.group]
@serialize.register(command.BasicSet)
@serialize.register(command.BinarySwitchSet)
@serialize.register(command.MultilevelSwitchSet)
def _(cmd):
return list(cmd.sig()) + [cmd.value]
@serialize.register(command.ConfigurationSet)
def _(cmd):
return list(cmd.sig()) + \
[cmd.parameter, struct.calcsize(cmd.fmt)] + \
list(struct.pack(">%s" % cmd.fmt, cmd.value))
@serialize.register(command.ConfigurationGet)
def _(cmd):
return list(cmd.sig()) + [cmd.parameter]
@serialize.register(command.MultiChannelAssociationRemove)
@serialize.register(command.MultiChannelAssociationSet)
def _(cmd):
return list(cmd.sig()) + [cmd.group] + \
cmd.nodes + [zwave.MULTI_CHANNEL_ASSOCIATION_SET_MARKER_V2] + \
list(itertools.chain(*cmd.multi_channel_nodes))
@serialize.register(command.MultiChannelEncap)
def _(cmd):
return list(cmd.sig()) + [0, cmd.endpoint] + serialize(cmd.command)
#----------------------------------------------------------------------
class lookup:
def __init__(self, func):
self.func = func
self.class_dict = {}
self.func_dict = {}
def register(self, cmd):
sig = cmd.sig()
self.class_dict[sig] = cmd
def decorate(func):
self.func_dict[sig] = func
return func
return decorate
def __call__(self, data):
sig = (data[0], data[1])
cmd = self.class_dict.get(sig)
if cmd:
command = cmd()
self.func_dict[sig](command, data[2:])
return command
else:
return self.func(data)
@lookup
def deserialize(data):
raise DeserializeError
@deserialize.register(command.AssociationReport)
def _(cmd, data):
cmd.group = data[0]
cmd.max_nodes = data[1]
cmd.num_reports = data[2]
cmd.nodes = data[3:]
@deserialize.register(command.BasicReport)
@deserialize.register(command.BinarySwitchReport)
@deserialize.register(command.MultilevelSwitchReport)
def _(cmd, data):
cmd.value = data[0]
@deserialize.register(command.MeterReport)
def _(cmd, data):
pass
@deserialize.register(command.MultiChannelAssociationReport)
def _(cmd, data):
cmd.group = data[0]
cmd.max_nodes = data[1]
cmd.num_reports = data[2]
cmd.nodes = list(itertools.takewhile(
lambda x: x != zwave.MULTI_CHANNEL_ASSOCIATION_SET_MARKER_V2,
data[3:]))
n = len(cmd.nodes) + 4
cmd.multi_channel_nodes = list(zip(data[n::2], data[n+1::2]))
@deserialize.register(command.MultiChannelEncap)
def _(cmd, data):
cmd.endpoint = data[0]
cmd.command = deserialize(data[2:])
@deserialize.register(command.ConfigurationReport)
def _(cmd, data):
cmd.parameter = data[0]
size = data[1]
fmt = ">b" if size == 1 else ">h" if size == 2 else ">i"
cmd.value = struct.unpack(fmt, data[2:])[0]
| ahsparrow/zwave | zwave/serialize.py | serialize.py | py | 3,302 | python | en | code | 0 | github-code | 90 |
16268156658 | """Tests for the is_subset function in the sets module of the lists package"""
import pytest
from hypothesis import given
from hypothesis import settings
from hypothesis import Verbosity
from hypothesis.strategies import integers
from hypothesis.strategies import lists
from speedsurprises.lists import sets
@pytest.mark.benchmark
def test_issubset_benchmark(benchmark):
"""Benchmark the is_subset function"""
subset_found = benchmark(
sets.is_subset, first_list=[1, 2, 3], second_list=[1, 2, 3, 4]
)
assert subset_found is True
@given(
first_list=lists(elements=integers(min_value=1, max_value=5), min_size=0),
second_list=lists(elements=integers(min_value=1, max_value=5), min_size=1),
)
@settings(verbosity=Verbosity.verbose, deadline=None)
@pytest.mark.hypothesisworks
def test_issubset_hypothesis_integer_lists_yes(first_list, second_list):
"""Returns output with correct factorial number"""
fully_containing_list = [1, 2, 3, 4, 5]
determined_is_subset_first = sets.is_subset(first_list, fully_containing_list)
determined_is_subset_second = sets.is_subset(second_list, fully_containing_list)
assert determined_is_subset_first is True
assert determined_is_subset_second is True
@given(
first_list=lists(elements=integers(min_value=1, max_value=5), min_size=0),
second_list=lists(elements=integers(min_value=1, max_value=5), min_size=1),
)
@settings(verbosity=Verbosity.verbose, deadline=None)
@pytest.mark.hypothesisworks
def test_issubset_hypothesis_integer_lists_no(first_list, second_list):
"""Returns output with correct factorial number"""
not_fully_containing_list = [11, 12, 13, 14, 15]
determined_is_subset_first = sets.is_subset(first_list, not_fully_containing_list)
determined_is_subset_second = sets.is_subset(second_list, not_fully_containing_list)
# Note that the empty set is a subset of all possible sets
if not first_list:
assert determined_is_subset_first is True
else:
assert determined_is_subset_first is False
assert determined_is_subset_second is False
@pytest.mark.parametrize(
"first_list, second_list, expected_answer",
[([1, 2, 3], [1, 2, 3, 4], True), ([], [], True), ([], [1, 2, 3], True)],
)
def test_issubset_multiple(first_list, second_list, expected_answer):
"""Check the is_subset function with multiple inputs"""
computed_answer = sets.is_subset(first_list, second_list)
assert computed_answer is expected_answer
def test_issubset_single():
"""Check the is_subset function with a single input"""
should_be_valid = sets.is_subset([], [1, 2, 3, 4])
assert should_be_valid is True
| Tada-Project/speed-surprises | tests/test_issubset.py | test_issubset.py | py | 2,673 | python | en | code | 3 | github-code | 90 |
20456448994 | __author__ = 'di_shen_sh@163.com'
from DataCenter import *
from norlib.graphics import *
def Test():
print("abcdefg")
exec("Test()")
datacenter = DataCenter("mongodb://localhost:27017/")
klinecol = datacenter.IF当月[300]
klines = klinecol.getdatas(20130101, 20140701)
pairs = [(i,k) for i, k in enumerate(klines) if k.SolidLength>= 15]
indexs, datas=zip(*pairs)
nextklines = [klines[i+1] for i in indexs]
nextlengths = [klines[i+1].SolidLength for i in indexs]
datetimes = [klines[i+1].Time for i in indexs]
def _onbar(a_index):
print(datetimes[a_index])
kl = nextklines[a_index]
print("Upper:{0}\r\nLower:{1}".format(kl.HighLength, kl.LowLength))
bar.draw(nextlengths, _onbar)
hist.draw(nextlengths, len(nextlengths)/3)
#print(len(datas))
#print(datas[0])
#print(klines.Count)
| norsd/PythonProjects | Quant/Test.py | Test.py | py | 814 | python | en | code | 0 | github-code | 90 |
17984793759 | from collections import Counter
s=input()
ch=set([c for c in s])
ans=101
for c in ch:
cur=s
cnt=0
while len(set([x for x in cur]))>1:
cnt+=1
tmp=''
for i in range(len(cur)-1):
if cur[i]==c or cur[i+1]==c:
tmp+=c
else:
tmp+=cur[i]
cur=tmp
ans=min(ans,cnt)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03687/s741561677.py | s741561677.py | py | 371 | python | en | code | 0 | github-code | 90 |
26337741410 | # -*- coding: utf-8 -*-
""" Granite FW's XML -> WBXML encoder.
Notes
The latest WAP Binary XML Content Format (WBXML) specification can be
found from here: http://www.w3.org/TR/wbxml/
"""
# ============================================================================
# Module Setup
# Python library module imports
import types
import core
from interfaces.c_srv.srv_kbd import kbd
from utils import include
font = include.Include('font_def_priv.h')
grn = include.Include('granite_p_isi.h')
# ============================================================================
class Wbxml:
# ----------------------------------------------------------------------------
data = []
def parse(self, testcase):
"""Add basic tokens:
wbxml version
Public Identifier
UTF-8 charset
String table length
"""
self.test = testcase
# some attributes are removed before sending step to server
# Thus, clone must be made here
teststep = testcase.teststep.clone()
self.data = []
self.splitData = []
self.data.extend([grn.GRN_WBXML_VERSION,
grn.GRN_WBXML_PUBLIC_ID,
grn.GRN_WBXML_CHARSET_UTF8,
0]) # GRN_WBXML_STRTAB_LENGTH is always 0
self.testStephandler(teststep)
self.splitData.append(self.data)
self.test = None
self.data = []
return self.splitData
def testStephandler(self, teststep):
"""Handle teststep/rootTag"""
if teststep.getAttributes() and teststep.getChildNodes():
self.data.append(grn.GRN_WBXML_C | grn.GRN_WBXML_A | grn.GRN_WBXML_TAG_teststep)
self.handleAttributes(teststep)
self.handleChildren(teststep)
else:
if teststep.getAttributes():
self.data.append(grn.GRN_WBXML_A | grn.GRN_WBXML_TAG_teststep)
self.handleAttributes(teststep)
else:
if teststep.getChildNodes():
self.data.append(grn.GRN_WBXML_C | grn.GRN_WBXML_TAG_teststep)
self.handleChildren(teststep)
else:
self.data.append(grn.GRN_WBXML_TAG_teststep)
self.data.append(grn.GRN_WBXML_END)
def handleChildren(self, node):
"""Handle given tag childNodes"""
for child in node.getChildNodes():
self.checkDataLength()
if child.getName() in ["key_in", "expect", "text", "keyblock",
"sx", "animation", "capture", "keypress",
"delay", "display", "config", "bitmap",
"block", "touch", "press", "move", "release"]:
self.handleTag(child)
else:
if child.getName() == "data":
self.handleDataTag(child)
else:
debug.err("unknown child: %s" % child)
def handleTag(self, tag):
"""Check tag and apply information to wbxml format"""
tag = self.checkUnsupportedAttributes(tag)
if tag.getName() in ['text', 'sx', 'animation']:
if tag.getAttributes():
self.data.append(grn.GRN_WBXML_C | grn.GRN_WBXML_A | \
getattr(grn, "GRN_WBXML_TAG_" + \
tag.getName()))
self.handleAttributes(tag)
self.data.append(grn.GRN_WBXML_END)
else:
self.data.append(grn.GRN_WBXML_C | \
getattr(grn, "GRN_WBXML_TAG_" + \
tag.getName()))
self.handleTextElement(tag['text'])
else:
if tag.getChildNodes() and tag.getAttributes():
self.data.append(grn.GRN_WBXML_C | grn.GRN_WBXML_A | \
getattr(grn, "GRN_WBXML_TAG_" + \
tag.getName()))
self.handleAttributes(tag)
self.handleChildren(tag)
else:
if tag.getChildNodes():
self.data.append(grn.GRN_WBXML_C | \
getattr(grn, "GRN_WBXML_TAG_" + \
tag.getName()))
self.handleChildren(tag)
else:
self.data.append(grn.GRN_WBXML_A | \
getattr(grn, "GRN_WBXML_TAG_" + \
tag.getName()))
self.handleAttributes(tag)
self.data.append(grn.GRN_WBXML_END)
def checkUnsupportedAttributes(self, tag):
"""This method checks all non supported attributes from tag."""
for attribute in tag.getAttributes():
if attribute == 'zoneName' or attribute == 'layoutName':
tag.removeAttribute(attribute)
if attribute == 'name':
if tag.getName() in ['display', 'animation', 'bitmap', 'capture']:
tag.removeAttribute(attribute)
return tag
def handleTextElement(self, value):
"""This method handle given text to wbxml format."""
if value.strip():
self.data.append(grn.GRN_WBXML_STR_I)
if type(value) == types.UnicodeType:
st = value.encode('utf-8')
else:
st = unicode(value, 'Latin-1').encode('utf-8')
for char in st:
self.data.append(ord(char))
self.data.append(0x00)
self.checkDataLength()
def handleAttributes(self, element):
"""This method handle given tag attributes"""
for attribute in element.getAttributes():
if attribute in ["length", "wait", "timeout", "repeat", "x", "y",
"height", "width", "format", "checksum",
"animation_id", "max_isi_data_length", "screen",
"offset", "gap", "repeat", "masku", "maskd",
"maskl", "maskr", "long_press_length",
"repeat_length", "keypress_length",
"keypress_wait", "short_press_length",
"keys_idle_length", "touch_id"]:
# NOTE: getattr None is done to ensure backwards compatibility
attrDefine = getattr(grn, "GRN_WBXML_ATTR_" + attribute, None)
if not attrDefine is None:
self.data.append(attrDefine)
self.mb_u_int32(element[attribute])
else:
debug.err('%s WAS NOT FOUND FROM GRANITE DEFINE FILES!' % \
attribute)
else:
if attribute == "key":
self.data.append(grn.GRN_WBXML_ATTR_key)
self.handleKeyAttribute(element[attribute])
else:
if attribute in ["highlighted", "valid", "truncated",
"items", "bitmap_capture", "dimmed",
"wildcard", "action", "background", "ntf_sending"]:
self.data.append(getattr(grn, 'GRN_WBXML_ATTR_' + \
attribute))
self.handleEnumeratedAttribute(element[attribute])
else:
if attribute == "name":
self.data.append(grn.GRN_WBXML_ATTR_name)
self.handleTextElement(element[attribute])
else:
if attribute == "font":
self.data.append(grn.GRN_WBXML_ATTR_font)
self.handleFontAttribute(element[attribute])
elif not attribute in ["save", "waitUntilNoUIevents", "dragHold"]:# Ignore save, waitUntilNoUIevents and dragHold attributes
debug.err("unknown attribute: %s" % attribute)
# FIXME: get rid of this if
if element.getChildNodes():
self.data.append(grn.GRN_WBXML_END)
def handleEnumeratedAttribute(self, value):
"""handle enumerated attributes to wbxml format"""
try:
self.data.append(getattr(grn, 'GRN_WBXML_ATTR_VALUE_' + value))
except AttributeError:
self.test.fail("Unknown attribute value: %r " % value)
def handleKeyAttribute(self, value):
self.mb_u_int32(getattr(kbd, value))
def mb_u_int32(self, value):
"""This method convert given value to mb_u_int32 format"""
c = long(value)
i = 0
if c <= 0x7F:
"1 byte encoding."
"Single byte value follows"
self.data.append(grn.GRN_WBXML_EXT_T_0)
self.data.append(c)
else:
"2 -> n byte encoding"
"Multi byte value follows"
self.data.append (grn.GRN_WBXML_EXT_T_0)
result = []
j = 0
while c > 0:
i = c & 0x7F
if j > 0:
i = i | 0x80
result.append (i)
c = c >> 7
j = j + 1
i = len(result) - 1;
while i >= 0:
self.data.append(result[i])
i = i - 1
def handleFontAttribute(self, value):
"""Handle font attribute to wbxml format"""
fontname = ""
fonttype = ""
if value.find('-') >= 0:
fontname = 'FONT_NAME_PRIV_' + value.split('-')[0]
fonttype = 'FONT_TYPE_PRIV_' + value.split('-')[1]
else:
fontname = 'FONT_NAME_PRIV_' + value
if not fonttype:
fontdata = getattr(font, fontname)
else:
fontdata = getattr(font, fontname) + (getattr(font, fonttype) << 16)
self.mb_u_int32(fontdata)
def handleDataTag(self, dataTag):
"""Handle Data tag"""
firstChildsValue = dataTag.getChildNodes[0]
self.checkDataLength(len(firstChildsValue) / 2)
self.data.append(grn.GRN_WBXML_C | grn.GRN_WBXML_TAG_data)
self.opaque(firstChildsValue) # content
self.data.append(grn.GRN_WBXML_END)
def opaque(self, value):
"""Handle opaque data
value must be string.
Ex. 0x80 = '80'
"""
opaqueData = []
for i in range(0, len(value), 2):
opaqueData.append(int(value[i:i + 2], 16))
self.data.append(grn.GRN_WBXML_OPAQUE)
self.mb_u_int32(len(opaqueData))
self.data.extend(opaqueData)
def checkDataLength(self, value=None):
"""Check data length.
Value must be len()-value."""
if not value: value = 0
if len(self.data) + value >= core.FW_conf['encoder_isi_length'] - 300:
self.splitData.append(self.data)
self.data = []
def parse(testcase):
wbxml = Wbxml()
return wbxml.parse(testcase)
| slimsymphony/astt | framework/interfaces/wbxml_encoder.py | wbxml_encoder.py | py | 11,510 | python | en | code | 3 | github-code | 90 |
12844833140 | import config
import os
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from api import elasticsearch
from api.models import NotificationConfig, NotificationConfigDetail
from api.schema import NotificationModel, NotificationConfigModel, NotificationConfigDetailModel
from api.email import send_new_response_notification
from api.exports_service import export_file, remove_file_from_s3
from api.email import dnd_check, send_notification_email
from sqlalchemy import func, Enum, or_, and_, not_, extract, cast, Integer, text
from datetime import datetime, timedelta, tzinfo
engine = create_engine(config.SQLALCHEMY_DATABASE_URI)
Session = sessionmaker(bind=engine)
session = Session()
class simple_utc(tzinfo):
def tzname(self,**kwargs):
return "UTC"
def utcoffset(self, dt):
return timedelta(0)
# CHECK prospects and life events count according to notifications configs and notify to users.
# Get all configs according to their lust run, interval and DND settings (tricky part)
# For each config get their details
# For each detail search in elastic according to their type, filterSet (if it has) and count
# If the count is reached, then create notifications according to:
# 1. if app: save notification in DB (w/ read = false)
# 2. if email: send email (use user email)
# 3. if sms: send sms (PENDING)
def check_for_notifications():
current_date = datetime.utcnow()
print(f'Checking for notifications. Current datetime: {current_date}')
intervalCheck = or_(NotificationConfigModel.last_run == None, func.extract('epoch', current_date - NotificationConfigModel.last_run) > (cast(NotificationConfigModel.interval, Integer) * 60))
notif_configs = session.query(NotificationConfig).filter(and_(NotificationConfigModel.notifications_allowed == True, intervalCheck)).all()
for config in notif_configs:
if dnd_check(config.user):
for detail in config.details:
args = {}
newer = config.last_run if config.last_run else current_date - timedelta(minutes=30) # minutes=config.interval
args["newer"] = newer.replace(tzinfo=simple_utc()).isoformat()
print(f'datetime: {args["newer"]}')
count = elasticsearch.execute_notifications_count(config.user, args, detail.filter_set.filters if detail.filter_set else None, detail.set_type)
print(f'count: {count}')
if count > detail.count:
if config.app:
text=f'You have new {detail.set_type} data.'
notification = NotificationModel(user_id=config.user.id, read=False, date=datetime.utcnow(), notification_type='NOTIFICATION', text=text)
session.add(notification)
if config.email:
text=f'You have new {detail.set_type} data.'
send_notification_email(config, detail, text, subject=f"{config.SITE_TITLE} - New notification")
if config.sms:
print('PENDING: sending sms notification')
config.last_run = current_date
session.commit()
return True
| akash-cis/PROJECTS | socialai/WebApp-API-develop/notifications/data_service.py | data_service.py | py | 3,214 | python | en | code | 0 | github-code | 90 |
73138888298 | def estPrem(x):
if (x <= 1):
return False
pasDeFacteur = True
tested = x
candidateForFactor = 2
while(candidateForFactor <= tested/candidateForFactor and pasDeFacteur):
if(tested % candidateForFactor == 0):
pasDeFacteur = False
candidateForFactor += 1
return pasDeFacteur
n = 3
found = False
while(not found):
if(not estPrem(n)):
cant = True
m = 1
while(2*m**2 < n and cant):
cant = not estPrem(n - 2*m**2)
m+=1
if(cant):
found = True
print(n)
n+=2
| AmdaUwU/Projet_Euler | python/#46.py | #46.py | py | 625 | python | en | code | 0 | github-code | 90 |
24998871765 | def longestWord(words: [str]) -> str:
words.sort(key=lambda x: (-len(x), x))
set_words = set(words)
def dfs(s: str, idx: int, wordCnt: int) -> bool:
n = len(s)
if idx >= n:
return wordCnt > 1
for i in range(idx, n):
substr = s[idx:i + 1]
if substr in set_words and dfs(s, i + 1, wordCnt + 1):
return True
return False
for word in words:
if dfs(word, 0, 0):
return word
return ''
if __name__ == '__main__':
print(longestWord(["cat", "banana", "dog", "nana", "walk", "walker", "dogwalker"]))
| Lycorisophy/LeetCode_python | 中等难度/面试题 17.15. 最长单词.py | 面试题 17.15. 最长单词.py | py | 622 | python | en | code | 1 | github-code | 90 |
18473208989 | N, K = map(int, input().split())
H = []
for _ in range(N):
H.append(int(input()))
H = list(sorted(H))
r_min = 10 ** 10
for i in range(N-K+1):
n_min = H[i]
n_max = H[i+K-1]
r_min = min(r_min, n_max-n_min)
print(r_min)
| Aasthaengg/IBMdataset | Python_codes/p03208/s797595887.py | s797595887.py | py | 233 | python | en | code | 0 | github-code | 90 |
11564430728 | from __future__ import absolute_import, division, unicode_literals
import socket
from typing import Union
from .base import StatsClientBase, PipelineBase
class StatsClient(StatsClientBase):
"""A client for statsd."""
_maxudpsize: int
def __init__(
self,
host: str = "localhost",
port: int = 8125,
prefix: Union[str, None] = None,
maxudpsize: int = 512,
ipv6: bool = False,
telegraf: bool = False,
separator: str = '.'
):
"""Create a new client."""
fam = socket.AF_INET6 if ipv6 else socket.AF_INET
family, _, _, _, addr = socket.getaddrinfo(host, port, fam, socket.SOCK_DGRAM)[
0
]
self._addr = addr
self._sock = socket.socket(family, socket.SOCK_DGRAM)
self._prefix = prefix
self._maxudpsize = maxudpsize
self._telegraf = telegraf
self._separator = separator
def _send(self, data: str):
"""Send data to statsd."""
try:
self._sock.sendto(data.encode("ascii"), self._addr)
except (socket.error, RuntimeError):
# No time for love, Dr. Jones!
pass
def close(self):
if self._sock and hasattr(self._sock, "close"):
self._sock.close()
self._sock = None
def pipeline(self):
return Pipeline(self)
class Pipeline(PipelineBase):
def __init__(self, client: StatsClient):
super(Pipeline, self).__init__(client)
self._maxudpsize = client._maxudpsize
def _send(self):
data = self._stats.popleft()
while self._stats:
# Use popleft to preserve the order of the stats.
stat = self._stats.popleft()
if len(stat) + len(data) + 1 >= self._maxudpsize:
self._client._after(data)
data = stat
else:
data += "\n" + stat
self._client._after(data)
| Arun-chaitanya/posthog-vite | env/lib/python3.10/site-packages/statshog/client/udp.py | udp.py | py | 1,957 | python | en | code | 0 | github-code | 90 |
42198896008 | #+++++++++++++++++++++++++ STUDENT RECORD ++++++++++++++++++++++++++++++++++++++++++++++++
class StudentRecord:
def __init__(self,i,name):
self.studentId=i
self.studentName = name
def Get_Student_Id(self):
return self.studentId
def Set_Student_Id(self,i):
self.studentId=i
def __str__(self):
return str(self.studentId) + "" + self.studentName
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#--------------------------------------Linear Probing------------------------------------
class Hashtable:
def __init__(self,tableSize):
self.m=tableSize
self.arr=[None] * self.m
self.n=0
def Hashh(self,key):
return (key % self.m)
def Insert(self,newRecord):
key=newRecord.Get_Student_Id()
h=self.Hashh(key)
location=h
for i in range(1,self.m):
if self.arr[location] is None or self.arr[location].Get_Student_Id() == -1:
self.arr[location]=newRecord
self.n+=1
return
if self.arr[location].Get_Student_Id() == key:
location = self.Hashh(location+1)
location =(h + i) % self.m
print("Table is Full : Record can't be Inserted ")
def Search(self,key):
h=self.Hashh(key)
location = h
for i in range(1,self.m):
if self.arr[location] is None:
return None
if self.arr[location].Get_Student_Id() == key:
return self.arr[location]
location=(h+i) % self.m
return None
def Display_Table(self):
for i in range(self.m):
print("[",end="");print(i,end="");print("]",end="");
if self.arr[i] is not None and self.arr[i].Get_Student_Id() != -1:
print(self.arr[i])
else:
print("____")
def delete(self,key):
h=self.Hashh(key)
location=h
for i in range(1,self.m):
if self.arr[location] is None:
return None
if self.arr[location].Get_Student_Id() == key:
temp=self.arr[location]
self.arr[location].Set_Student_Id(-1)
self.n-=1
return temp
location=(h+i) % self.m
return None
#-----------------------------------SEPERATE CHAINING--------------------------------------
class Node:
def __init__(self,value):
self.Value = value
self.next = None
class SinglyLinkedList:
def __init__(self):
self.start=None
def Display_List(self):
if self.start is None:
print("_______")
return
p=self.start
while p is not None:
print(p.Value," ", end="")
p=p.next
print()
def Search(self,x):
p=self.start
while p is not None:
if p.Value.Get_Student_Id() == x:
return p.Value
p=p.next
else:
return None
def Insert_First(self,data):
temp =Node(data)
temp.next=self.start
self.start=temp
def delete_node(self,x):
if self.start is None:
print("List is Empty")
return
#deletion of firs Node
if self.start.Value.Get_Student_Id() == x:
self.start = self.start.next
return
#deletoin in Between or at the end
p=self.start
while p.next is not None:
if p.next.Value.Get_Student_Id() == x:
break
p=p.next
if p.next is None:
print("Element ", x ," not in list")
else:
p.next =p.next.next
class HashTabe:
def __init__(self,tableSize):
self.m=tableSize
self.arr=[None] * self.m
self.n=0
def Hash(self,key):
return (key % self.m)
def Display_Table(self):
for i in range(self.m):
print("[", i ," ] --> ", end=' ' )
if self.arr[i]!=None:
self.arr[i].Display_List()
else:
print("____")
def Search(self,key):
h=self.Hash(key)
if self.arr[h] != None:
return self.arr[h].Search(key)
return None
def insert(self,newRecord):
key=newRecord.Get_Student_Id()
h=self.Hash(key)
if self.arr[h] == None:
self.arr[h] = SinglyLinkedList()
self.arr[h].Insert_First(newRecord)
self.n+=1
def delete(self,key):
h =self.Hash(key)
if self.arr[h] != None:
self.arr[h].delete_node(key)
self.n-=1
else:
print("Value", key,"not present")
#================================DRIVER CODE==========================================
print("=====================BY LINEAR PROBING======================")
size=int(input("Enter size of table:"))
table=Hashtable(size)
while True:
print("1. Insert a record")
print("2. Search a Record")
print("3. Delete a Record")
print("4. Display Table")
print("5. Exit")
choice =int(input("Enter your choice : "))
if choice == 1:
id =int(input("Enter student id : "))
name=input("Enter Student Name : ")
aRecord =StudentRecord(id, name)
print("")
table.Insert(aRecord)
elif choice == 2:
id=int(input("Enter a Key to be Searched : "))
aRecord =table.Search(id)
if aRecord is None:
print("Key Not Found")
else:
print(aRecord)
elif choice == 3:
id=int(input("Enter a key to be deleted : "))
table.delete(id)
elif choice == 4:
table.Display_Table()
elif choice == 5:
print("Thank you")
break
else:
print("Wrong Option")
print()
print("=====================BY SEPERATE CHAINING======================")
size=int(input("Enter size of table:"))
table=HashTabe(size)
while True:
print("1. Insert a record")
print("2. Search a Record")
print("3. Delete a Record")
print("4. Display Table")
print("5. Exit")
choice =int(input("Enter your choice : "))
if choice == 1:
id =int(input("Enter student id : "))
name=input("Enter Student Name : ")
aRecord =StudentRecord(id, name)
print("")
table.insert(aRecord)
elif choice == 2:
id=int(input("Enter a Key to be Searched : "))
aRecord =table.Search(id)
if aRecord is None:
print("Key Not Found")
else:
print(aRecord)
elif choice == 3:
id=int(input("Enter a key to be deleted : "))
table.delete(id)
elif choice == 4:
table.Display_Table()
elif choice == 5:
print("Thank you")
break
else:
print("Wrong Option")
print()
| MehadBinNaveed/HashTable_Implementation | implementation_HashTable.py | implementation_HashTable.py | py | 7,555 | python | en | code | 0 | github-code | 90 |
5100845078 | from collections import deque
n=int(input())
a=[]
for i in range(n):
a.append(int(input()))
a.sort()
a=deque(a)
ans=0
big=a.pop()
small=a.popleft()
ans+=big-small
#print(ans,a,big,small)
pre_big=big
pre_small=small
checked_a=deque([small,big])
while(len(a)>1):
big=a.pop()
small=a.popleft()
ans+=pre_big-small
ans+=big-pre_small
# print(ans,a,big,small,pre_big,pre_small)
pre_big=big
pre_small=small
if(len(a)==1):
last_a=a.pop()
ans+=max(pre_big-last_a,last_a-pre_small)
print(ans)
| WAT36/procon_work | procon_python/src/atcoder/corporate/C_Tenka1Beginner.py | C_Tenka1Beginner.py | py | 526 | python | en | code | 1 | github-code | 90 |
21827466829 | '''
A calculator
@author: Appu13
'''
a = float(input("Enter the first number "))
b = float(input("Enter the second number "))
ops = input("Enter the operation ")
if ops == '+':
print("Sum = ", (a+b))
elif ops == '-' :
print("Difference = ", (a-b))
elif ops == '*':
print("product = ", (a*b))
elif ops == '/':
print("Quotient = ", round((a/b),2))
else:
print("Wrong operation")
| Appu13/Python | Calculator.py | Calculator.py | py | 421 | python | en | code | 0 | github-code | 90 |
71507917737 | from tdw.controller import Controller
from tdw.tdw_utils import TDWUtils
# from tdw.collisions import Collisions
from tdw.output_data import OutputData, Bounds, Images
from tdw.librarian import ModelLibrarian
import tdw.output_data as output_data
# from tdw.collisions import Collisions
import random
import math
import pandas as pd
import numpy as np
import json
import argparse
parser = argparse.ArgumentParser(description="Few required data generation parameters")
parser.add_argument(
"-jold",
"--jsonold",
type=str,
required=True,
help="sample json file path with name",
)
args = parser.parse_args()
with open(args.jsonold, "r") as handle:
data = json.load(handle)
# Create the avatar.
c = Controller()
resp = c.communicate(
[
{"$type": "set_render_quality", "render_quality": 5},
{"$type": "set_screen_size", "width": 512, "height": 512},
TDWUtils.create_empty_room(25, 25),
{"$type": "create_avatar", "type": "A_Img_Caps_Kinematic", "id": "a"},
{"$type": "set_target_framerate", "framerate": 25},
{"$type": "set_pass_masks", "avatar_id": "a", "pass_masks": ["_img"]},
{
"$type": "teleport_avatar_to",
"avatar_id": "a",
"position": {"x": 0, "y": 10, "z": 10},
},
{
"$type": "look_at_position",
"avatar_id": "a",
"position": {"x": 0, "y": 0, "z": 0},
},
]
)
# Disable post-processing, so the changes to the material aren't blurry.
# c.communicate({"$type": "set_post_process",
# "value": False})
c.communicate(
[
{"$type": "adjust_directional_light_intensity_by", "intensity": 0.25},
{"$type": "set_anti_aliasing", "mode": "none"},
{"$type": "rotate_directional_light_by", "angle": 20, "axis": "yaw"},
{"$type": "rotate_directional_light_by", "angle": 65, "axis": "pitch"},
{"$type": "set_shadow_strength", "strength": 10},
]
)
all_idx = []
static_objs = []
for i, obj in enumerate(data["initial_static_objects"]):
all_idx.append(i)
tmp = {
"id": i,
"name": obj["name"],
"color": obj["color"],
"material": obj["material"],
"scale": obj["scale"],
"loc": obj["loc"],
"mass": obj["mass"],
}
static_objs.append(tmp)
dynamic_objs = []
for i, obj in enumerate(data["iniital_dynamic_objects"]):
all_idx.append(len(data["initial_static_objects"]) + i)
tmp = {
"id": i + len(data["initial_static_objects"]),
"name": obj["name"],
"color": obj["color"],
"material": obj["material"],
"scale": obj["scale"],
"loc": obj["loc"],
"mass": obj["mass"],
"target_object_id": obj["target_object_id"],
"directional_force": obj["directional_force"],
"initial_acc": obj["initial_acc"],
}
dynamic_objs.append(tmp)
# initialize static objects
for obj1 in static_objs:
record = ModelLibrarian("models_flex.json").get_record(obj1["name"])
c.communicate(
{
"$type": "add_object",
"name": obj1["name"],
"url": record.get_url(),
"scale_factor": obj1["scale"],
"position": obj1["loc"],
"rotation": TDWUtils.VECTOR3_ZERO,
"category": record.wcategory,
"id": obj1["id"],
}
)
c.communicate(
TDWUtils.set_visual_material(
c, record.substructure, obj1["id"], obj1["material"], quality="low"
)
)
c.communicate(
[
{"$type": "set_mass", "id": obj1["id"], "mass": obj1["mass"]},
{
"$type": "set_physic_material",
"id": obj1["id"],
"dynamic_friction": 0,
"static_friction": 0,
},
{"$type": "set_color", "color": obj1["color"], "id": obj1["id"]},
]
)
# initialize dynamic objects
for obj2 in dynamic_objs:
record = ModelLibrarian("models_flex.json").get_record(obj2["name"])
c.communicate(
{
"$type": "add_object",
"name": obj2["name"],
"url": record.get_url(),
"scale_factor": obj2["scale"],
"position": obj2["loc"],
"rotation": TDWUtils.VECTOR3_ZERO,
"category": record.wcategory,
"id": obj2["id"],
}
)
c.communicate(
TDWUtils.set_visual_material(
c, record.substructure, obj2["id"], obj2["material"], quality="low"
)
)
c.communicate(
[
{"$type": "set_mass", "id": obj2["id"], "mass": obj2["mass"]},
{
"$type": "set_physic_material",
"id": obj2["id"],
"dynamic_friction": 0,
"static_friction": 0,
},
{"$type": "set_color", "color": obj2["color"], "id": obj2["id"]},
{
"$type": "apply_force_to_object",
"force": obj2["directional_force"],
"id": obj2["id"],
},
]
)
## Get the output data
c.communicate(
[
{"$type": "send_images", "frequency": "always"},
{
"$type": "send_bounds",
"ids": [i for i in range(data["total_objects"])],
"frequency": "always",
},
{
"$type": "send_rigidbodies",
"ids": [i for i in range(data["total_objects"])],
"frequency": "always",
},
{
"$type": "send_transforms",
"ids": [i for i in range(data["total_objects"])],
"frequency": "always",
},
{
"$type": "send_collisions",
"collision_types": ["obj", "env"],
"enter": True,
"exit": True,
"stay": False,
},
]
)
output_json = {
"total_objects": data["total_objects"],
"total_initial_moving_objects": data["total_initial_moving_objects"],
"initial_static_objects": static_objs,
"iniital_dynamic_objects": dynamic_objs,
"frames": [],
"collisions": [],
}
for i in range(125): # number of frames
resp = c.communicate([])
images = Images(resp[0])
TDWUtils.save_images(images, filename="{:04d}".format(i), output_directory="./dist")
positions = output_data.Bounds(resp[1])
vel = output_data.Rigidbodies(resp[2])
rot = output_data.Transforms(resp[3])
r_id = OutputData.get_data_type_id(resp[4])
tmp_frame = {
"frame": i,
}
for j in range(data["total_objects"]):
tmp_frame[j] = {
"location": positions.get_bottom(j),
"velocity": vel.get_velocity(j),
"rotation": rot.get_rotation(j),
}
output_json["frames"].append(tmp_frame)
c.communicate({"$type": "terminate"})
| Maitreyapatel/CRIPP-VQA | dataset/recreate.py | recreate.py | py | 6,872 | python | en | code | 7 | github-code | 90 |
17653089777 | from odoo import fields, models, api, _
from collections import defaultdict
from odoo.exceptions import UserError
class SaleContractedOrder(models.Model):
_inherit = 'sale.contracted.order'
project_id = fields.Many2one('project.project',string='Project',)
job_order_number = fields.Char(string='Job Order Number')
class contractedOrderWizard(models.TransientModel):
_inherit = 'sale.contracted.order.wizard'
_description = 'Contracted order wizard'
def create_sale_order(self):
order_lines_by_customer = defaultdict(list)
currency_id = 0
pricelist_id = 0
user_id = 0
payment_term_id = 0
active_id = False
if self._context.get('active_model') == 'sale.contracted.order':
active_id = self.env['sale.contracted.order'].browse(self._context.get('active_id'))
# for line in active_id.line_ids.filtered(lambda l: l.original_uom_qty != 0.0):
for line in self.line_ids.filtered(lambda l: l.qty != 0.0):
if line.remaining_uom_qty < line.qty:
raise UserError(
_('You can\'t order more than the remaining quantities'))
vals = {'product_id': line.product_id.id,
'name': line.product_id.name,
'product_uom': line.product_uom.id,
'sequence': line.contracted_line_id.sequence,
'price_unit': line.price_unit,
'job_cost_type':line.job_cost_type,
'contracted_order_line': line.contracted_line_id.id,
'product_uom_qty': line.qty,
'tax_id': [(6, 0, line.taxes_id.ids)]}
order_lines_by_customer[active_id.partner_id.id].append((0, 0, vals))
#active_id = self.env['sale.contracted.order'].browse(self._context.get('active_id'))
if currency_id == 0:
currency_id = active_id.currency_id.id
elif currency_id != active_id.currency_id.id:
currency_id = False
if pricelist_id == 0:
pricelist_id = active_id.pricelist_id.id
elif pricelist_id != active_id.pricelist_id.id:
pricelist_id = False
if user_id == 0:
user_id = active_id.partner_id.id
elif user_id != active_id.partner_id.id:
user_id = False
if user_id == 0:
user_id = active_id.user_id.id
elif user_id != active_id.user_id.id:
user_id = False
if payment_term_id == 0:
payment_term_id = active_id.payment_term_id.id
elif payment_term_id != active_id.payment_term_id.id:
payment_term_id = False
elif self._context.get('active_model') == 'sale.contracted.order.line':
active_id = self.env['sale.contracted.order.line'].browse(self._context.get('active_id'))
for line in self.line_ids.filtered(lambda l: l.qty != 0.0):
if line.remaining_uom_qty < line.qty:
raise UserError(
_('You can\'t order more than the remaining quantities'))
vals = {'product_id': line.product_id.id,
'name': line.product_id.name,
'product_uom': line.product_uom.id,
'sequence': line.contracted_line_id.sequence,
'price_unit': line.price_unit,
'job_cost_type': line.job_cost_type,
'contracted_order_line': line.contracted_line_id.id,
'product_uom_qty': line.qty,
'tax_id': [(6, 0, line.taxes_id.ids)]}
order_lines_by_customer[active_id.partner_id.id].append((0, 0, vals))
if currency_id == 0:
currency_id = active_id.currency_id.id
elif currency_id != active_id.currency_id.id:
currency_id = False
if pricelist_id == 0:
pricelist_id = active_id.pricelist_id.id
elif pricelist_id != active_id.pricelist_id.id:
pricelist_id = False
if user_id == 0:
user_id = active_id.partner_id.id
elif user_id != active_id.partner_id.id:
user_id = False
if user_id == 0:
user_id = active_id.user_id.id
elif user_id != active_id.user_id.id:
user_id = False
if payment_term_id == 0:
payment_term_id = active_id.payment_term_id.id
elif payment_term_id != active_id.payment_term_id.id:
payment_term_id = False
if not order_lines_by_customer:
raise UserError(_('An order can\'t be empty'))
if not currency_id:
raise UserError(_('Can not create Sale Order from Contracted '
'Order lines with different currencies'))
res = []
for customer in order_lines_by_customer:
order_vals = {
'partner_id': customer,
'origin': self.contracted_order_id.name,
'user_id': user_id,
'currency_id': currency_id,
'project_id': active_id.project_id.id,
'job_order_number': active_id.job_order_number,
'pricelist_id': pricelist_id,
'payment_term_id': payment_term_id,
'order_line': order_lines_by_customer[customer],
}
sale_order = self.env['sale.order'].create(order_vals)
res.append(sale_order.id)
return {
'domain': [('id', 'in', res)],
'name': _('Sales Orders'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'sale.order',
'context': {'from_sale_order': True},
'type': 'ir.actions.act_window'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | myat90thu/test4 | joborder_project_bpc/models/contract_sale.py | contract_sale.py | py | 6,283 | python | en | code | 0 | github-code | 90 |
18315265639 | n,m=map(int,input().split())
l=list(input())
l=l[::-1]
now=0
ans=[]
while now<n:
num=min(n-now,m)#進める最大
b=True
for i in range(num):
num1=num-i
if l[now+num1]!="1":
ans.append(str(num1))
now+=num1
b=False
break
if b:
now=n+1
if b:
print(-1)
else:
ans=ans[::-1]
print(" ".join(ans)) | Aasthaengg/IBMdataset | Python_codes/p02852/s061919226.py | s061919226.py | py | 340 | python | en | code | 0 | github-code | 90 |
28932437319 | from mxnet import nd
from mxnet.gluon import nn
class MLP(nn.Block):
def __init__(self, prefix=None, params=None):
super().__init__(prefix, params)
with self.name_scope():
self.hidden = nn.Dense(256, activation="relu")
self.output = nn.Dense(10)
def forward(self, x):
return self.output(self.hidden(x))
net = MLP()
net.initialize()
x = nd.random.uniform(shape=(2, 20))
print(net.forward(x))
print('hidden layer name with default prefix:', net.hidden.name)
print('output layer name with default prefix:', net.output.name)
class MLP_NO_NAMESCOPE(nn.Block):
def __init__(self, **kwargs):
super(MLP_NO_NAMESCOPE, self).__init__(**kwargs)
self.hidden = nn.Dense(256, activation='relu')
self.output = nn.Dense(10)
def forward(self, x):
return self.output(self.hidden(x))
net = MLP_NO_NAMESCOPE(prefix='my_mlp_')
print('hidden layer name without prefix:', net.hidden.name)
print('output layer name without prefix:', net.output.name)
class MySequential(nn.Block):
def __init__(self, **kwargs):
super(MySequential, self).__init__(**kwargs)
def add(self, block):
self._children.append(block)
def forward(self, x):
for block in self._children:
x = block(x)
return x
nets = MySequential()
with nets.name_scope():
nets.add(nn.Dense(256, activation='relu'))
nets.add(nn.Dense(10))
nets.initialize()
nets(x)
class FancyMLP(nn.Block):
def __init__(self, **kwargs):
super(FancyMLP, self).__init__(**kwargs)
self.rand_weight = nd.random_uniform(shape=(10, 20))
with self.name_scope():
self.dense = nn.Dense(10, activation='relu')
def forward(self, x):
x = self.dense(x)
x = nd.relu(nd.dot(x, self.rand_weight) + 1)
x = self.dense(x)
return x
net = FancyMLP()
net.initialize()
net(x)
class NestMLP(nn.Block):
def __init__(self, **kwargs):
super(NestMLP, self).__init__(**kwargs)
self.net = nn.Sequential()
with self.name_scope():
self.net.add(nn.Dense(64, activation='relu'))
self.net.add(nn.Dense(32, activation='relu'))
self.dense = nn.Dense(16, activation='relu')
def forward(self, x):
return self.dense(self.net(x))
net = nn.Sequential()
net.add(NestMLP())
net.add(nn.Dense(10))
net.initialize()
print(net(x)) | whenSunSet/gluon_leanning | 模型构造.py | 模型构造.py | py | 2,425 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.