seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
23175990731 | from bergen.console import console
from bergen.schema import Node
from bergen.graphical import GraphicalBackend
class Interaction:
def __init__(self, node: Node) -> None:
self.node = node
async def graphical_assign(self):
from bergen.ui.assignation import AssignationUI
with console.status("[bold green]Using Graphical Assignment"):
form = AssignationUI()
nana = form.exec_()
return nana
async def __aenter__(self):
console.log("Interaction started")
self.graphical_backend = await GraphicalBackend().__aenter__()
return self
async def __aexit__(self, *args, **kwargs):
console.log("Interaction Done") | jhnnsrs/bergen | bergen/contracts/interaction.py | interaction.py | py | 713 | python | en | code | 0 | github-code | 90 |
18235028162 | from django.shortcuts import redirect
from django.utils.encoding import smart_str
from django.contrib.sites.shortcuts import get_current_site
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from django.urls import reverse
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.core.paginator import Paginator
import pytz
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.generics import ListAPIView
from rest_framework import status, generics
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.parsers import JSONParser
from rest_framework.generics import GenericAPIView
from rest_framework_simplejwt.views import TokenObtainPairView
from affiliates.models import Redirect, Url
from affiliates.serializers import RedirectSerializer
from product.serializers import ProductSerializer
from product.models import Product
from rest_framework.permissions import AllowAny
from event_notification.views import refund_requested_nofication
from core.serializers import DeviceSerializer
from product.serializers import ProductImgSerializer
from product.models import ProductImg
from django.utils.encoding import smart_str, force_bytes
from core.permissions import EcommerceAccessPolicy
from core.utilities import auth_token, methods, send_mail, TokenGenerator
from payment.models import Order
from core.models import Recent, Review, User, Wishlist, Address
from django_user_agents.utils import get_user_agent
from core.serializers import (
AddressSerializer,
CustomTokenObtainPairSerializer,
WishlistPostSerializer,
RecentsPostSerializer,
RecentsSerializer,
RefundsSerializer,
ReviewsPostSerializer,
ReviewsSerializer,
SetNewPasswordSerializer,
UserSerializer,
WishlistSerializer,
RefreshToken,
)
from payment.serializers import OrdersSerializer
@permission_classes((EcommerceAccessPolicy,))
@api_view(['GET'])
def landing_page_products(request):
if request.method == methods["get"]:
newest_products = Product.objects.order_by('created')[:10]
highest_rated_products = Product.objects.order_by('average_rating')[
:10]
best_selling_products = Product.objects.order_by('sales')[:10]
serialized_newest = ProductSerializer(newest_products, many=True)
serialized_highest_rated = ProductSerializer(
highest_rated_products, many=True)
serialized_best_selling = ProductSerializer(
best_selling_products, many=True)
return Response({
'newest_products': serialized_newest.data,
'highest_rated_products': serialized_highest_rated.data,
'best_selling_products': serialized_best_selling.data
}, status=status.HTTP_200_OK)
@api_view([methods["post"]])
@permission_classes((EcommerceAccessPolicy,))
def create_user(request):
if request.method == methods["post"]:
data = JSONParser().parse(request)
if User.objects.filter(email=data["email"]).exists():
return Response(
{"error": "Email already registered"},
status=status.HTTP_400_BAD_REQUEST)
else:
user = User.objects.create_user(**data)
serializer = UserSerializer(user)
uidb64 = urlsafe_base64_encode(force_bytes(user.id))
token = TokenGenerator().make_token(user)
current_site = get_current_site(request).domain
relativeLink = reverse(
"activate", kwargs={"uidb64": uidb64, "token": token}
)
absolute_url = f"http://{current_site}{relativeLink}"
token = auth_token(user)
# send_mail("onboarding-user", user.email,
# data={"firstname": user.first_name, "absolute_url": absolute_url})
return Response(
serializer.data,
headers={"Authorization": token},
status=status.HTTP_201_CREATED,
)
@api_view([methods["put"]])
@permission_classes((EcommerceAccessPolicy,))
def edit_user_detail(request, userId):
data = JSONParser().parse(request)
try:
user = User.objects.get(pk=userId, email=data.get("email"))
except:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == methods["put"]:
serializer = UserSerializer(user, partial=True, data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view([methods["delete"]])
@permission_classes((EcommerceAccessPolicy,))
def delete_user_account(request, userId):
data = JSONParser().parse(request)
try:
user = User.objects.get(pk=userId, email=data.get("email"))
except:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == methods["delete"]:
user.delete()
return Response(status=status.HTTP_202_ACCEPTED)
@api_view([methods["get"]])
@permission_classes((EcommerceAccessPolicy,))
def get_user(request, userId):
try:
user = User.objects.get(pk=userId)
except:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == methods["get"]:
serializer = UserSerializer(user)
return Response(serializer.data, status=status.HTTP_200_OK)
@permission_classes((EcommerceAccessPolicy,))
@api_view([methods["post"]])
def create_wishlist(request, userId, productId):
try:
wishlist = Wishlist.objects.get(productId=productId, user=userId)
except:
data = {"productId": productId, "user": userId}
serializer = WishlistPostSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(
{"success": "Added to wishlist"}, status=status.HTTP_201_CREATED
)
if request.method == methods["post"]:
wishlist.delete()
return Response(
{"success": "Removed from wishlist"}, status=status.HTTP_202_ACCEPTED
)
@permission_classes((EcommerceAccessPolicy,))
@api_view([methods["get"]])
def get_wishlist(request, userId):
try:
wishlist = Wishlist.objects.filter(
user=userId, liked=True).order_by("created").reverse()
except:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == methods["get"]:
page_number = request.GET.get("offset", 1)
per_page = request.GET.get("limit", 15)
paginator = Paginator(wishlist, per_page=per_page)
items = paginator.get_page(number=page_number)
serializer = WishlistSerializer(
items, many=True, context={"request": request})
return Response(serializer.data, status=status.HTTP_200_OK)
@permission_classes((EcommerceAccessPolicy,))
@api_view([methods["get"]])
def get_reviews(request, userId):
try:
reviews = Review.objects.filter(
user=userId).order_by("created").reverse()
except:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == methods["get"]:
page_number = request.GET.get("offset", 1)
per_page = request.GET.get("limit", 15)
paginator = Paginator(reviews, per_page=per_page)
items = paginator.get_page(number=page_number)
serializer = ReviewsSerializer(items, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@permission_classes((EcommerceAccessPolicy,))
@api_view([methods["post"]])
def create_review(request):
if request.method == methods["post"]:
data = JSONParser().parse(request)
user = data.get("user")
productId = data.get("productId")
serializer = ReviewsPostSerializer(data=data)
if serializer.is_valid():
try:
review = Review.objects.get(user=user, productId=productId)
except:
# create review
review = Review.objects.get(user=user, productId=productId)
# update product rating
review.set_avg_rating()
return Response(serializer.data, status=status.HTTP_201_CREATED)
# update review
serializer = ReviewsPostSerializer(review, data=data)
if serializer.is_valid():
serializer.save()
# update product rating
review.set_avg_rating()
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view([methods["delete"]])
@permission_classes((EcommerceAccessPolicy,))
def delete_review(request, userId, productId):
try:
review = Review.objects.get(user=userId, productId=productId)
except:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == methods["delete"]:
review.delete()
return Response(status=status.HTTP_202_ACCEPTED)
@api_view([methods["post"]])
@permission_classes((EcommerceAccessPolicy,))
def create_recent(request, productId, userId):
if request.method == methods["post"]:
data = {"productId": productId, "user": userId}
serializer = RecentsPostSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(status=status.HTTP_202_ACCEPTED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view([methods["get"]])
@permission_classes((EcommerceAccessPolicy,))
def get_recents(request, userId):
try:
recent = Recent.objects.filter(
user=userId).order_by("created").reverse()
except:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == methods["get"]:
page_number = request.GET.get("offset", 1)
per_page = request.GET.get("limit", 15)
paginator = Paginator(recent, per_page=per_page)
items = paginator.get_page(number=page_number)
serializer = RecentsSerializer(items, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class Orders(ListAPIView):
permission_classes = (EcommerceAccessPolicy,)
serializer_class = OrdersSerializer
search_field = (
"id",
"status",
"orderId",
"ordered_date",
)
filter_backends = [SearchFilter, OrderingFilter]
ordering_fields = ["ordered_date"]
paginate_by = 20
def get_queryset(self):
return Order.objects.all(user=self.request.data["userId"])
def get_serializer_context(self):
return {"request": self.request}
@api_view([methods["post"]])
@permission_classes((EcommerceAccessPolicy,))
def create_address(request):
if request.method == methods["post"]:
data = JSONParser().parse(request)
serializer = AddressSerializer(data=data)
if serializer.is_valid():
if serializer.validated_data.get("is_default"):
try:
default_address = Address.objects.get(
user=serializer.validated_data["user"], is_default=True
)
default_address.is_default = False
default_address.save()
serializer.save()
return Response(status=status.HTTP_202_ACCEPTED)
except:
serializer.save()
return Response(status=status.HTTP_202_ACCEPTED)
else:
serializer.save()
return Response(status=status.HTTP_202_ACCEPTED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view([methods["put"]])
@permission_classes((EcommerceAccessPolicy,))
def edit_address(request, userId):
data = JSONParser().parse(request)
try:
address = Address.objects.get(user=userId, id=data["id"])
addresses = Address.objects.get(user=userId, is_default=True)
except:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == methods["put"]:
serializer = AddressSerializer(address, data=data, partial=True)
if serializer.is_valid():
if serializer.validated_data["is_default"] and addresses.id == address.id:
serializer.save()
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view([methods["get"]])
@permission_classes((EcommerceAccessPolicy,))
def get_address(request, userId):
try:
address = Address.objects.get(user=userId)
except:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == methods["get"]:
serializer = AddressSerializer(address)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@api_view([methods["delete"]])
@permission_classes((EcommerceAccessPolicy,))
def delete_address(request, userId, AddressId):
try:
user = Address.objects.get(user=userId, id=AddressId)
except:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == methods["delete"]:
user.delete()
return Response(status=status.HTTP_202_ACCEPTED)
@api_view([methods["post"]])
@permission_classes((EcommerceAccessPolicy,))
def request_refund(request):
data = JSONParser().parse(request)
serializer = RefundsSerializer(data=data)
if serializer.is_valid():
serializer.save()
user = User.objects.get(id=serializer.validated_data["user"])
email = user.email
data = {
"firstname": user.first_name,
"order": serializer.validated_data["order"],
"product": [],
"created": serializer.validated_data["created"],
}
send_mail("refund-request-acknowledged", email, data=data)
refund_requested_nofication()
return Response(
{
"message": "Refund accepted, this may take 3 to 7 business days before you get a response"
},
status=status.HTTP_201_CREATED,
)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ResetPassword(GenericAPIView):
permission_classes = (EcommerceAccessPolicy,)
def post(self, request):
email = self.request.data["email"]
if User.objects.filter(email=email).exists():
user = User.objects.get(email=email)
uidb64 = urlsafe_base64_encode(user.id.to_bytes())
token = PasswordResetTokenGenerator().make_token(user)
current_site = get_current_site(request).domain
relativeLink = reverse(
"password_reset_confirmation", kwargs={"uidb64": uidb64, "token": token}
)
abs_url = f"http://{current_site}{relativeLink}"
data = {
"absolute_url": abs_url,
"email": email,
}
send_mail("password-reset", email, data=data)
return Response(
{"success": "We have sent you a mail to reset your password"},
status=status.HTTP_200_OK,
)
return Response(
{"error": "Email is not registered"},
status=status.HTTP_400_BAD_REQUEST,
)
class ActivateEmailTokenCheckAPI(GenericAPIView):
permission_classes = (EcommerceAccessPolicy,)
def get(self, request, uidb64, token):
try:
id = urlsafe_base64_encode(user.id.to_bytes())
user = User.objects.get(id=id)
if not TokenGenerator().check_token(user, token):
return Response(
{"error": "Token is not valid, please request a new one"},
status=status.HTTP_401_UNAUTHORIZED,
)
user.is_verified = True
user.save()
return Response(
{
"success": True,
"message": "Email verified, you can now login",
},
status=status.HTTP_200_OK,
)
except:
Response(
{"error": "Token is not valid, please request a new one"},
status=status.HTTP_401_UNAUTHORIZED,
)
class PasswordTokenCheckAPI(GenericAPIView):
permission_classes = (EcommerceAccessPolicy,)
def get(self, request, uidb64, token):
try:
id = smart_str(urlsafe_base64_decode(uidb64))
user = User.objects.get(id=id)
if not PasswordResetTokenGenerator().check_token(user, token):
return Response(
{"error": "Token is not valid, please request a new one"},
status=status.HTTP_401_UNAUTHORIZED,
)
return Response(
{
"success": True,
"message": "Credentials Valid",
"uidb64": uidb64,
"token": token,
},
status=status.HTTP_200_OK,
)
except:
Response(
{"error": "Token is not valid, please request a new one"},
status=status.HTTP_401_UNAUTHORIZED,
)
class SetNewPassword(generics.GenericAPIView):
serializer_class = SetNewPasswordSerializer
permission_classes = (EcommerceAccessPolicy,)
def patch(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
Response(
{"sucess": True, "message": "Password reset successful"},
status=status.HTTP_200_OK,
)
class EmailTokenObtainPairView(TokenObtainPairView, generics.GenericAPIView):
serializer_class = CustomTokenObtainPairSerializer
permission_classes = (EcommerceAccessPolicy,)
# def post(self, request, *args, **kwargs):
# agent = get_user_agent(self.request)
# type = agent.os.family
# version = agent.os.version_string
# serializer = DeviceSerializer(
# data=request.data.pop("password", "email"))
# ip = self.request.META.get('HTTP_X_FORWARDED_FOR')
# if ip:
# ip = ip.split(',')[0]
# else:
# ip = self.request.META.get('REMOTE_ADDR')
# serializer.validated_data["device_ip"] = ip
# serializer.validated_data["type"] = type
# serializer.validated_data["version"] = version
# serializer.save()
class UserLogout(GenericAPIView):
permission_classes = (EcommerceAccessPolicy,)
def post(self, request, *args, **kwargs):
try:
refresh_token = request.data["refresh"]
token = RefreshToken(refresh_token)
token.blacklist()
return Response(status=status.HTTP_205_RESET_CONTENT)
except:
return Response(status=status.HTTP_400_BAD_REQUEST)
@permission_classes((EcommerceAccessPolicy,))
def redirect_url(request, marketerId, productId, identifier):
if request.method == methods["get"]:
data = {"marketer": marketerId,
"product": productId, "identifier": identifier}
serializer = RedirectSerializer(data=data)
if serializer.is_valid():
try:
url = Url.objects.get(
marketer=marketerId, product=productId, identifier=identifier
)
except:
return Response(
"Sorry link is broken or unable to get product :(",
status=status.HTTP_403_FORBIDDEN,
)
serializer.save()
return redirect(
url.product_url,
permanent=True,
status=status.HTTP_308_PERMANENT_REDIRECT,
)
@permission_classes((EcommerceAccessPolicy,))
@api_view([methods["get"]])
def product_image(request, productId, imageId):
try:
product = ProductImg.objects.get(id=imageId, productId=productId)
except:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == methods["get"]:
serializer = ProductImgSerializer(product)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@permission_classes((EcommerceAccessPolicy,))
@api_view([methods["get"]])
def product_images(request, productId):
try:
product = ProductImg.objects.filter(productId=productId)
except:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == methods["get"]:
serializer = ProductImgSerializer(product)
return Response(serializer.data, status=status.HTTP_201_CREATED)
| samadaderinto/e-commerce-backend | codematics/core/views.py | views.py | py | 21,024 | python | en | code | 1 | github-code | 90 |
1608358740 | from kivymd.app import MDApp
from kivy.lang.builder import Builder
from kivy.properties import StringProperty, ListProperty
from kivymd.uix.list import MDList,OneLineIconListItem
from kivymd.theming import ThemableBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.screenmanager import ScreenManager, Screen
from kivymd.uix.datatables import MDDataTable
class LoginScreen(Screen):
pass
class StockScreen(Screen):
def data_table_set(self):
data_table = MDDataTable(pos_hint={'center_x': 0.5, 'center_y': 0.5},
size_hint=(0.9, 0.6),
check=True,
rows_num=10,
column_data=[
("No.", dp(18)),
("Food", dp(20)),
("Calories", dp(20))
],
row_data=[
("1", "Burger", "300"),
("2", "Oats", "200"),
("3", "Oats", "200"),
("4", "Oats", "200"),
("5", "Oats", "200"),
("6", "Oats", "200"),
("7", "Oats", "200"),
("8", "Oats", "200")
]
)
data_table.bind(on_row_press=DemoApp.on_row_press)
data_table.bind(on_check_press=DemoApp.on_check_press)
class UploadScreen(Screen):
pass
class ContentNavigationDrawer(BoxLayout):
pass
class DrawerList(ThemableBehavior, MDList):
def set_color_item(self, instance_item):
'''Called when tap on a menu item.'''
# Set the color of the icon and text for the menu item.
for item in self.children:
if item.text_color == self.theme_cls.primary_color:
item.text_color = self.theme_cls.text_color
break
instance_item.text_color = self.theme_cls.primary_color
class ItemDrawer(OneLineIconListItem):
icon = StringProperty()
text_color = ListProperty((0, 0, 0, 1))
# Create the screen manager
sm = ScreenManager()
sm.add_widget(LoginScreen(name='login'))
sm.add_widget(StockScreen(name='stock'))
sm.add_widget(UploadScreen(name='upload'))
class DemoApp(MDApp):
def __init__(self, **kwargs):
self.title = "My Material Application"
super().__init__(**kwargs)
def build(self):
self.theme_cls.primary_palette = "Red"
screen = Builder.load_file('main.kv')
return screen
def on_row_press(self, instance_table, instance_row):
print(instance_table, instance_row)
def on_check_press(self, instance_table, current_row):
print(instance_table, current_row)
DemoApp().run() | simofane4/transapp | mangement/main.py | main.py | py | 3,219 | python | en | code | 0 | github-code | 90 |
42888975081 | import os, re, torch
from collections import defaultdict, OrderedDict
from mmsc.datasets.base_dataset import BaseDataset
from mmsc.utils.dataset import load_video
_CONSTANTS = {
'dataset_name': 'voxceleb2',
'wave_ext': 'wav',
'face_ext': 'jpg',
'face_folder': 'aligned_faces',
'video_ext': 'mp4',
'video_folder': 'dev/mp4',
'video_list': 'train.list',
'test_list': 'test.list'
}
class VoxCeleb2Dataset(BaseDataset):
def __init__(self, config, dataset_type, *args, **kwargs):
super().__init__(_CONSTANTS['dataset_name'], config, dataset_type)
self._data_dir = config.data_dir
self._max_frames = config.max_frame
self._snippet_length = config.snippet_length
if not os.path.exists(self._data_dir):
raise RuntimeError(
f"Data folder {self._data_dir} for VoxCeleb2 does not exist."
)
self._processors_map = []
processor_config = config.get('processors', None)
if processor_config is not None:
self._init_processor_map(processor_config)
def __getitem__(self, indices):
samples = [[self._get_one_item_(index) for index in pair] for pair in indices]
return samples
def __len__(self):
return self.num_speakers
def _get_one_item_(self, index):
sample = load_video(self.idx_db[index],
self._snippet_length,
self._max_frames,
compressed=False)
for processor_name in self.processor_map:
processor = getattr(self, processor_name)
sample = processor(sample)
sample.targets = self._get_target(index)
sample.meta.update(self._get_meta(index))
return sample
def load(self):
if self.dataset_type in ['train', 'val']:
self._load_dataset()
else:
raise RuntimeError("{} {}".format(_CONSTANTS['dataset_name'],
self.dataset_type), "set not found")
def _load_dataset(self):
video_path = os.path.join(self._data_dir,
_CONSTANTS['video_folder'])
if self.dataset_type == 'train':
video_list = os.path.join(self._data_dir,
_CONSTANTS['video_list'])
elif self.dataset_type == 'val':
video_list = os.path.join(self._data_dir,
_CONSTANTS['test_list'])
else:
raise TypeError(f'invalid {self.dataset_type} dataset type')
self._pid_db_group = defaultdict(list)
with open(video_list, 'r') as f:
lines = f.readlines()
pattern = re.compile(r'.*?(id\d{5})/(.*?)/(\d{5}).mp4\n')
self._idx_db = []
self._pid_label = []
for i, line in enumerate(lines):
if not line.strip().endswith(_CONSTANTS['video_ext']):
continue
[pid, url, clip] = list(pattern.search(line).groups())
key = '{}.{}'.format(clip, _CONSTANTS['video_ext'])
video = '{}/{}/{}/{}'.format(video_path, pid, url, key)
faces = '{}/{}/{}/{}/{}/*.{}'.format(self._data_dir, _CONSTANTS['face_folder'], pid,
url, clip, _CONSTANTS['face_ext'])
self._idx_db.append((video, faces, key, pid))
self._pid_label.append(pid)
self._pid_db_group[pid].append(i)
self._pid_label = sorted(list(set(self._pid_label)))
self._pid_label = { pid: i for i, pid in enumerate(self._pid_label) }
def _load_test_dataset(self):
video_path = os.path.join(self._data_dir,
_CONSTANTS['video_folder'])
test_list = os.path.join(self._data_dir,
_CONSTANTS['test_list'])
with open(test_list, 'r') as f:
pairs = f.readlines()
keep = []
self._idx_db = []
for p in pairs:
keep.extend(p.split()[1:])
keep = list(set(keep))
for db in keep:
pid, url, key = db.split('/')
video = '{}/{}/{}/{}'.format(video_path, pid, url, key)
self._idx_db.append((video, key, pid))
def _get_target(self, index):
pid = self.idx_db[index][-1]
return torch.LongTensor([self.pid_label[pid]])
def _get_meta(self, index):
meta = {}
return meta
def _init_processor_map(self, config):
'''
init processor map from config
'''
for p in OrderedDict(config).keys():
if self.dataset_type != 'train' and 'augment' in p:
continue
self._processors_map.append(p)
@property
def num_speakers(self):
return len(self.pid_db_group.keys())
@property
def pid_db_group(self):
return self._pid_db_group
@property
def idx_db(self):
return self._idx_db
@property
def pid_label(self):
return self._pid_label
@property
def processor_map(self):
return self._processors_map | zcxu-eric/AVA-AVD | model/mmsc/datasets/builders/voxceleb2/dataset.py | dataset.py | py | 5,233 | python | en | code | 32 | github-code | 90 |
19586753164 | #!/usr/bin/python
##################################
# getrange.py - Get distance reading from ultrasonic range detector
# on GPIO pins GPIO_TRIGGER_F and GPIO_ECHO_F
#
# HISTORICAL INFORMATION -
#
# 2016-xx-xx Eric/Mike Created for Raspberry pi
# 2017-02-04 msipin Adapted to C.H.I.P. by replacing GPIO library,
# editing pins and adding "settling time" before
# the ultrasonic sensor is triggered.
# 2021-01-24 msipin Changed definition for "main" ultrasonic sensor pins.
# 2021-01-25 msipin Added left and right ultrasonic sensors and parameterized "distance" function
# 2021-02-04 msipin Improved debugging in ultrasonic sensor distance function
# 2021-02-06 msipin Increased allowable range of sensors (one spec says up to 500cm!). Also returned
# "max" when sensor doesn't pickup anything, to allow failover on bad reading
##################################
#Libraries
#import CHIP_IO.GPIO as GPIO
import Adafruit_GPIO as gpio
GPIO = gpio.get_platform_gpio()
import time
import sys
run=True
# Import the pin definition (a symbolic link to MyPins.<RobotName>.py)
# for your particular robot -
from MyPins import *
#set GPIO direction (IN / OUT)
GPIO.setup(GPIO_TRIGGER_L, gpio.OUT)
GPIO.setup(GPIO_ECHO_L, gpio.IN)
#
GPIO.setup(GPIO_TRIGGER_F, gpio.OUT)
GPIO.setup(GPIO_ECHO_F, gpio.IN)
#
GPIO.setup(GPIO_TRIGGER_R, gpio.OUT)
GPIO.setup(GPIO_ECHO_R, gpio.IN)
# Settle each trigger to zero
GPIO.output(GPIO_TRIGGER_L, False)
GPIO.output(GPIO_TRIGGER_F, False)
GPIO.output(GPIO_TRIGGER_R, False)
def distance(trigger_gpio,echo_gpio):
# Uncomment the following line if THIS THREAD
# will need to modify the "run" variable. DO NOT
# need to uncomment it to just READ it...
global run
# Send trigger pulse
# set Trigger to HIGH
GPIO.output(trigger_gpio, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(trigger_gpio, False)
now = time.time()
StartTime = now
# save StartTime
while run and GPIO.input(echo_gpio) == 0 and ((now - StartTime) < 0.1):
now = time.time()
print("\t\tE.T. %02d StartTime: %0.4f" % (trigger_gpio, (now - StartTime)))
StartTime = now
StopTime = now
# save time of arrival
while run and GPIO.input(echo_gpio) == 1 and ((now - StopTime) < 0.1):
now = time.time()
print("\t\tE.T. %02d StopTime: %0.4f" % (trigger_gpio, (now - StopTime)))
StopTime = now
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
dist = ((TimeElapsed * 34300) / 2 / 2.54)
# If distance is beyond the range of this device,
# consider it "invalid", and set it to "max. distance" (999)
if (dist > 120):
dist = 999
if (dist <= 0):
dist = 999
return dist
def main():
continuous=False
for arg in sys.argv[1:]:
#print arg
if (arg == "-c"):
continuous=True
while (True):
try:
print
# Ensure triggers have time to settle
time.sleep(0.25)
for NAME,TRIG,ECHO in [ ["LEFT",GPIO_TRIGGER_L, GPIO_ECHO_L],\
["FWD",GPIO_TRIGGER_F, GPIO_ECHO_F],\
["RIGHT",GPIO_TRIGGER_R, GPIO_ECHO_R] ]:
dist = distance(TRIG,ECHO)
print ("%s - %d in" % (NAME,dist))
# Reset by pressing CTRL + C
except KeyboardInterrupt:
print("\n\nMeasurement stopped by User")
break
if (not continuous):
break
if __name__ == "__main__":
main()
GPIO.cleanup()
print
| rhazzed/potatoCHIP | getrange.py | getrange.py | py | 3,598 | python | en | code | 0 | github-code | 90 |
26496706878 | # L)백준2447_별 찍기 - 10
# https://www.acmicpc.net/problem/2447
# 문제
# 재귀적인 패턴으로 별을 찍어 보자. N이 3의 거듭제곱(3, 9, 27, ...)이라고 할 때, 크기 N의 패턴은 N×N 정사각형 모양이다.
# 크기 3의 패턴은 가운데에 공백이 있고, 가운데를 제외한 모든 칸에 별이 하나씩 있는 패턴이다.
# ***
# * *
# ***
# N이 3보다 클 경우, 크기 N의 패턴은 공백으로 채워진 가운데의 (N/3)×(N/3) 정사각형을 크기 N/3의 패턴으로 둘러싼 형태이다. 예를 들어 크기 27의 패턴은 예제 출력 1과 같다.
#
# 입력
# 첫째 줄에 N이 주어진다. N은 3의 거듭제곱이다. 즉 어떤 정수 k에 대해 N=3k이며, 이때 1 ≤ k < 8이다.
#
# 출력
# 첫째 줄부터 N번째 줄까지 별을 출력한다.
def star(a,b,n):
if n == 1:
return
if a > l*l or b > l*l:
return
for i in range(3):
for j in range(3):
star((a-n//3)+(i*n//3),(b-n//3)+(j*n//3),n//3)
star_blank(a,b,n)
# n이면 (n/3)*(n/3)크기의 박스를 지워야함
def star_blank(a,b,n):
m = n//3
# a와 b의 starting point잡기
a -= m//2
b -= m//2
for i in range(m):
for j in range(m):
list[a+i][b+j] = ' '
n = int(input())
list = [["*"for _ in range(n)]for _ in range(n)]
l = len(list)
star(n//2,n//2,n)
for i in list:
print(''.join(i))
| hamin2065/PnP-Algorithm | Week 2/L)백준2447.py | L)백준2447.py | py | 1,363 | python | ko | code | 0 | github-code | 90 |
43940853851 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from tqdm import tqdm
import os
# 存储数据的根目录
ROOT_PATH = "./data"
# 比赛数据集路径
DATASET_PATH = ROOT_PATH + '/wechat_algo_data1/'
# 训练集
USER_ACTION = DATASET_PATH + "user_action.csv"
FEED_INFO = DATASET_PATH + "feed_info.csv"
FEED_EMBEDDINGS = DATASET_PATH + "feed_embeddings.csv"
# 测试集
TEST_FILE = DATASET_PATH + "test_a.csv"
# 初赛待预测行为列表
ACTION_LIST = ["read_comment", "like", "click_avatar", "forward"]
FEA_COLUMN_LIST = ["read_comment", "like", "click_avatar", "forward", "comment", "follow", "favorite"]
END_DAY = 15
# 视频feed 本身的信息
FEA_FEED_LIST = ['feedid', 'authorid', 'videoplayseconds', 'bgm_song_id', 'bgm_singer_id']
# 负样本下采样比例(负样本:正样本)
ACTION_SAMPLE_RATE = {"read_comment": 5, "like": 5, "click_avatar": 5, "forward": 10, "comment": 10, "follow": 10,
"favorite": 10}
def process_embed(train):
feed_embed_array = np.zeros((train.shape[0], 512))
for i in tqdm(range(train.shape[0])):
x = train.loc[i, 'feed_embedding']
if x != np.nan and x != '':
y = [float(i) for i in str(x).strip().split(" ")]
else:
y = np.zeros((512,)).tolist()
feed_embed_array[i] += y
temp = pd.DataFrame(columns=[f"embed{i}" for i in range(512)], data=feed_embed_array)
train = pd.concat((train, temp), axis=1)
return train
def prepare_data():
feed_info_df = pd.read_csv(FEED_INFO)
# user action df col : userid, date_, feedid, "read_comment", "like", "click_avatar", "forward", "comment", "follow", "favorite"
#user_action_df = pd.read_csv(USER_ACTION)[["userid", "date_", "feedid"] + FEA_COLUMN_LIST]
user_action_df = pd.read_csv(USER_ACTION)[["userid", "date_", "feedid", "device"] + FEA_COLUMN_LIST]
feed_embed = pd.read_csv(FEED_EMBEDDINGS)
test = pd.read_csv(TEST_FILE)
# userid, date_, feedid, device, "read_comment", "like", "click_avatar", "forward", "comment", "follow", "favorite"
# add feed feature
train = pd.merge(user_action_df, feed_info_df[FEA_FEED_LIST], on='feedid', how='left')
# ['userid', 'date_', 'feedid', device, 'read_comment', 'like', 'click_avatar', 'forward', 'comment', 'follow', 'favorite',
# 'authorid', 'videoplayseconds', 'bgm_song_id', 'bgm_singer_id']
test = pd.merge(test, feed_info_df[FEA_FEED_LIST], on='feedid', how='left')
# ['userid', 'feedid', 'device', 'authorid', 'videoplayseconds', 'bgm_song_id', 'bgm_singer_id']
stage_day = 14
# 基于userid统计的历史行为的次数
user_date_feature_path = os.path.join(ROOT_PATH, "feature", "userid_feature.csv")
user_date_feature = pd.read_csv(user_date_feature_path)
user_date_feature = user_date_feature.set_index(["userid", "date_"])
# 基于feedid统计的历史行为的次数
feed_date_feature_path = os.path.join(ROOT_PATH, "feature", "feedid_feature.csv")
feed_date_feature = pd.read_csv(feed_date_feature_path)
feed_date_feature = feed_date_feature.set_index(["feedid", "date_"])
train = train.join(feed_date_feature, on=['feedid', 'date_'], how='left', rsuffix="_feed")
train = train.join(user_date_feature, on=['userid', 'date_'], how='left', rsuffix='_user')
# ['userid', 'date_', 'feedid', device, 'read_comment', 'like', 'click_avatar', 'forward', 'comment', 'follow', 'favorite',
# 'authorid', 'videoplayseconds', 'bgm_song_id', 'bgm_singer_id', read_commentsum, likesum, click_avatarsum, forwardsum,
# read_commentsum_user, likesum_user, click_avatarsum_user, forwardsum_user]
FEA_COL = ["read_comment", "like", "click_avatar", "forward"]
feed_feature_col = [b + "sum" for b in FEA_COL]
user_feature_col = [b + "sum_user" for b in FEA_COL]
train[feed_feature_col] = train[feed_feature_col].fillna(0.0)
train[user_feature_col] = train[user_feature_col].fillna(0.0)
train[feed_feature_col] = np.log(train[feed_feature_col] + 1.0)
train[user_feature_col] = np.log(train[user_feature_col] + 1.0)
train[["authorid", "bgm_song_id", "bgm_singer_id"]] += 1 # 先全部加1,再填充未知用0
train[["authorid", "bgm_song_id", "bgm_singer_id", "videoplayseconds"]] = \
train[["authorid", "bgm_song_id", "bgm_singer_id", "videoplayseconds"]].fillna(0)
user_sum_feature_path = os.path.join(ROOT_PATH, "feature", "userid_sum_feature.csv")
user_sum_feature = pd.read_csv(user_sum_feature_path)
user_sum_feature = user_sum_feature.set_index(['userid'])
feed_sum_feature_path = os.path.join(ROOT_PATH, "feature", "feedid_sum_feature.csv")
feed_sum_feature = pd.read_csv(feed_sum_feature_path)
feed_sum_feature = feed_sum_feature.set_index(['feedid'])
test = test.join(feed_sum_feature, on=['feedid'], how='left', rsuffix="_feed")
test = test.join(user_sum_feature, on=['userid'], how='left', rsuffix='_user')
test[feed_feature_col] = test[feed_feature_col].fillna(0.0)
test[user_feature_col] = test[user_feature_col].fillna(0.0)
test[feed_feature_col] = np.log(test[feed_feature_col] + 1.0)
test[user_feature_col] = np.log(test[user_feature_col] + 1.0)
test[["authorid", "bgm_song_id", "bgm_singer_id"]] += 1 # 先全部加1,再填充未知用0
test[["authorid", "bgm_song_id", "bgm_singer_id", "videoplayseconds"]] = \
test[["authorid", "bgm_song_id", "bgm_singer_id", "videoplayseconds"]].fillna(0)
test["videoplayseconds"] = np.log(test["videoplayseconds"] + 1.0)
test.to_csv(ROOT_PATH + f'/test_data.csv', index=False)
for action in tqdm(ACTION_LIST):
# 以下两行先保留
print(f"prepare data for {action}")
tmp = train.drop_duplicates(['userid', 'feedid', action], keep='last')
# 负样本采样
df_neg = tmp[tmp[action] == 0]
df_neg = df_neg.sample(frac=1.0 / ACTION_SAMPLE_RATE[action], random_state=42, replace=False)
# 正负样本 合并
df_all = pd.concat([df_neg, tmp[tmp[action] == 1]])
df_all["videoplayseconds"] = np.log(df_all["videoplayseconds"] + 1.0)
df_all.to_csv(ROOT_PATH + f'/train_data_for_{action}.csv', index=False)
def check_file():
'''
检查数据文件是否存在
'''
paths = [USER_ACTION, FEED_INFO, TEST_FILE]
flag = True
not_exist_file = []
for f in paths:
if not os.path.exists(f):
not_exist_file.append(f)
flag = False
return flag, not_exist_file
def create_dir():
# 创建data目录
if not os.path.exists(ROOT_PATH):
print('Create dir: %s' % ROOT_PATH)
os.mkdir(ROOT_PATH)
# data目录下需要创建的子目录
need_dirs = ["offline_train", "online_train", "evaluate", "submit",
"feature", "model", "model/online_train", "model/offline_train"]
for need_dir in need_dirs:
need_dir = os.path.join(ROOT_PATH, need_dir)
if not os.path.exists(need_dir):
print('Create dir: %s' % need_dir)
os.mkdir(need_dir)
'''
为DeepFM 增加一些特征输入 用户过去各种行为次数sum feed被交互行为次数
'''
def statistic_feature(start_day=1, before_days=4, agg='sum'):
"""
统计用户/feed 过去 before days = 5 的行为,进行sum,然后作为 DeepFM 的 dense 特征
:param start_day: Int. 起始日期
:param before_day: Int. 时间范围(天数)
:param agg: String. 统计方法
:return:
"""
# 统计初赛的所需要考虑的行为 过去n天的次数
action_features = ["read_comment", "like", "click_avatar", "forward"]
history_data = pd.read_csv(USER_ACTION)[["userid", "date_", "feedid"] + action_features]
feature_dir = os.path.join(ROOT_PATH, "feature")
for dim in ["userid", "feedid"]:
print(dim)
user_data = history_data[[dim, "date_"] + action_features]
res_arr = []
for start in range(start_day, END_DAY-before_days+1):
temp = user_data[((user_data["date_"]) >= start) & (user_data["date_"] < (start + before_days))]
temp = temp.drop(columns=['date_'])
temp = temp.groupby([dim]).agg([agg]).reset_index()
temp.columns = list(map(''.join, temp.columns.values))
temp["date_"] = start + before_days
res_arr.append(temp)
dim_feature = pd.concat(res_arr)
feature_path = os.path.join(feature_dir, dim+"_feature.csv")
print('Save to: %s'%feature_path)
dim_feature.to_csv(feature_path, index=False)
# 统计初赛的所需要考虑的行为 过去14天的次数
history_data = pd.read_csv(USER_ACTION)[["userid", "date_", "feedid"] + action_features]
for dim in ['userid', 'feedid']:
print(dim)
user_data = history_data[[dim, 'date_'] + action_features]
temp = user_data.drop(columns=['date_'])
temp['seen'] = 1
temp = temp.groupby([dim]).agg(
[agg]).reset_index() # group by uid uid 在 (start + before) day 过去 before days 各行为的次数 sum
temp.columns = list(map(''.join, temp.columns.values))
_path = os.path.join(ROOT_PATH, "feature", dim + "_sum_feature.csv")
# print('Save to: $s'%_path)
temp.to_csv(_path, index=False)
# def generate_sample():
# day = 14
# for action in ACTION_LIST:
# # action_df =
# pass
# 采用的是基本特征(离散特征:{'userid', 'feedid', 'authorid', 'bgm_song_id', 'bgm_singer_id'},连续特征:{'videoplayseconds'})
def main():
create_dir()
flag, not_exists_file = check_file()
if not flag:
print("请检查目录中是否存在下列文件: ", ",".join(not_exists_file))
return
statistic_feature()
# sample_arr = generate_sample()
prepare_data()
if __name__ == "__main__":
main()
| crayonyon/wechat-big-data-game | prepare_data.py | prepare_data.py | py | 9,889 | python | en | code | 0 | github-code | 90 |
9769635982 | from django.contrib import admin
from .models import (Challenge, Solution,)
from .forms import (ChallengeForm, SolutionForm,)
class SolutionInline(admin.TabularInline):
model = Solution
fields = ['text','is_correct',]
extra = 0
class ChallengeAdmin(admin.ModelAdmin):
form = ChallengeForm
list_display = ('title', 'level', 'status', 'last_modified')
list_filter = ['status', 'creation', 'last_modified']
search_fields = ['title']
inlines = [
SolutionInline,
]
def save_model(self, request, obj, form, change):
form.instance.created_by = request.user
form.save()
admin.site.register(Challenge, ChallengeAdmin) | saraivaufc/askmath | competition/admin.py | admin.py | py | 630 | python | en | code | 0 | github-code | 90 |
35024302777 | #!/usr/bin/env python3
"""Somewhat automated crawler using the YTCrawl library."""
# pylama:ignore=E501
# Note that the common import also checks for Python 3
from common import youtube_id_from_cmdline, log, rel_path
# Make sure these parameters can be imported from another script
BATCH_FILE = rel_path("batch_ytid.txt")
OUTPUT_DIR = rel_path("output")
def main():
"""Entry point."""
yt_ids = youtube_id_from_cmdline()
log("Creating batch file: %s", BATCH_FILE)
with open(BATCH_FILE, "w") as fh:
fh.write('\n'.join(list(sorted(yt_ids))))
fh.write('\n')
log("Will use output directory: %s", OUTPUT_DIR)
log("Starting batch process")
from ytcrawl.crawler import Crawler
c = Crawler()
c._crawl_delay_time = 1
c._cookie_update_delay_time = 1
c.batch_crawl(BATCH_FILE, OUTPUT_DIR)
log("COMPLETED")
if __name__ == '__main__':
main()
| CraigKelly/youtube-data | do_crawl.py | do_crawl.py | py | 907 | python | en | code | 4 | github-code | 90 |
75053607656 | # -*- coding: utf-8 -*-
import pandas as pd
import os
from rdkit import Chem
from tqdm import tqdm
from autotemplate.run_utils import clearIsotope, RemoveReagent
from autotemplate.extract_utils import canon_remap
import matplotlib.pyplot as plt
import CGRtools
plt.rcParams["figure.dpi"] = 400
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
def make_dict(data):
index = dict()
for line in data:
rxn_smiles, rxn_id = line.strip().split('\t')
index.update({rxn_id:rxn_smiles})
return index
def CGR_smarts(reaction_smiles, display_figure=False):
reaction_smiles_new = clearIsotope(reaction_smiles)
r = CGRtools.smiles(reaction_smiles_new)
r.clean2d()
cgr = ~r
if display_figure:
display(cgr)
return cgr.__str__()
def is_atommap_corrected(ori_rxn_smiles, proc_rxn_smiles):
cgr_ori = CGR_smarts(ori_rxn_smiles)
cgr_proc = CGR_smarts(proc_rxn_smiles)
return not cgr_ori == cgr_proc
def is_curated(ori_rxn_smiles, proc_rxn_smiles):
""" If the processed reactant SMILES contains the molecule that original reactant does not posses,
this reaction has been curated. Use unsorted list to check the elements. """
ori_reac_smiles = ori_rxn_smiles.split('>>')[0]
proc_reac_smiles = proc_rxn_smiles.split('>>')[0]
ori_reac_list = [canon_remap(s) for s in ori_reac_smiles.split()]
proc_reac_list = [canon_remap(s) for s in proc_reac_smiles.split()]
for smi in proc_reac_list:
if smi in ori_reac_list:
ori_reac_list.remove(smi)
else:
return True
else:
return False
all_rxn_class = ["AdamsDecarboxylation",
"Baylis-HillmanReaction",
"Buchwald-HartwigCross-Coupling",
"Chan_LamCoupling",
"DielsAlder",
"FischerIndoleSynthesis",
"Friedel-CraftsAcylation",
"Friedel-CraftsAlkylation",
"GrignardReaction",
"HiyamaCoupling",
"HuisgenCycloaddition",
"Hydrogenation",
"Kabachnik-FieldsReaction",
"KumadaCoupling",
"MannichReaction",
"NegishiCoupling",
"PausonKhandReaction",
"ReductiveAmination",
"SuzukiCoupling",
"WittigReaction",
]
records = []
statistics_df = pd.DataFrame(columns=["Reaction type", "No. of reactions", "No. of universal templates", "Residual rate", "Time elapsed"])
for k, rxn_class in enumerate(all_rxn_class):
print('Current rxn: {}, number {}'.format(rxn_class, k+1))
data_dir = './data/{}'.format(rxn_class)
unprocessed_path = os.path.join(data_dir, 'MappingResult_{}.txt'.format(rxn_class))
processed_path = os.path.join(data_dir, 'MappingResult_{}.txt.processed'.format(rxn_class))
failed_path = os.path.join(data_dir, 'MappingResult_{}.txt.failed'.format(rxn_class))
template_path = os.path.join(data_dir, 'all_templates_used.csv')
output_dir = './job/process/'
output_path = os.path.join(output_dir, "{}.sh.o".format(rxn_class))
number_templates = len(pd.read_csv(template_path))
with open(unprocessed_path, 'r') as f:
unprocessed = f.readlines()
with open(processed_path, 'r') as f:
processed = f.readlines()
with open(failed_path, 'r') as f:
failed = f.readlines()
with open(output_path, 'r') as f:
outputs = f.readlines()
time_used = outputs[-3]
total = len(unprocessed)
same = 0
curated = 0
mapping_curated = 0
failed = len(failed)
unprocessed = make_dict(unprocessed)
processed = make_dict(processed)
for rxn_id, rxn_smiles_1 in tqdm(processed.items()):
rxn_smiles_2 = unprocessed[rxn_id]
rxn_smiles_2 = RemoveReagent(rxn_smiles_2)
try:
smarts_1 = CGR_smarts(rxn_smiles_1)
smarts_2 = CGR_smarts(rxn_smiles_2)
except:
print(rxn_id)
print(rxn_smiles_1)
print(rxn_smiles_2)
mapping_curated += 1
continue
if is_curated(rxn_smiles_1, rxn_smiles_2):
curated += 1
elif is_atommap_corrected(rxn_smiles_1, rxn_smiles_2):
mapping_curated += 1
print(rxn_id)
print(rxn_smiles_1) # corrected
print(rxn_smiles_2) # original
else:
same += 1
records.append([same/total, mapping_curated/total, curated/total, failed/total])
mins = float(time_used.split(" min")[0])
hrs = mins // 60
mins = mins % 60
if hrs:
time_used = str(int(hrs))+" hrs "+str(int(mins))+" mins"
else:
time_used = str(int(mins))+" mins"
statistics_df = pd.concat([statistics_df, pd.DataFrame({
"Reaction type":[rxn_class],
"No. of reactions":[total],
"No. of universal templates":[number_templates],
"Residual rate":["{:.1f}%".format(100*(1-(failed)/total))],
"Time elapsed":[time_used]
})], axis=0)
total_data = pd.DataFrame(records, columns = ['no change', 'mapping curated' ,'reactant curated', 'removed'], index = all_rxn_class)
total_data = total_data.iloc[::-1]
total_data.to_csv('docs/analyze_results.csv')
plot = total_data.plot(kind='barh',stacked=True)
fig = plot.get_figure()
fig.savefig("docs/output.svg", format="svg")
statistics_df.reset_index(drop=True).to_csv('docs/statistics_df.csv')
| Lung-Yi/AutoTemplate | post_analysis.py | post_analysis.py | py | 5,677 | python | en | code | 2 | github-code | 90 |
44772195734 | from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from search_func import search_query, get_data_by_id
app = FastAPI()
# Configure CORS
app.add_middleware(
CORSMiddleware,
allow_origins=['*'], # Allow all origins
allow_credentials=True,
allow_methods=["*"], # Allow all methods
allow_headers=["*"], # Allow all headers
)
@app.get("/")
async def root():
return {"message": "Hello World"}
@app.get("/search")
async def search(query: str ,skip: int = 0, limit: int = 10, location: str = None, start_time: str = None, end_time: str = None):
# return data from search_query(query, skip, limit, start_time, end_time)
try:
data = await search_query(query, skip, limit, location, start_time, end_time)
except:
return {'message': 'Server error'}
else:
return data
@app.get("/search/{id}")
async def search_by_id(id: str):
# return data from get_data_by_id(id)
try:
data = await get_data_by_id(id)
except:
return {'message': 'Server error'}
else:
return data
| bhattaraisushma/DataB | backend/main.py | main.py | py | 1,099 | python | en | code | 0 | github-code | 90 |
73579975978 | import time
import numpy as np
import dfibers.numerical_utilities as nu
import itertools as it
import matplotlib.pyplot as plt
import dfibers.fixed_points as fx
class FiberTrace:
"""
A record of fiber traversal. Has fields:
status: "Terminated" | "Closed loop" | "Max steps" | "Timed out" | "Critical"
c: direction vector as N x 1 np.array
x: current fiber point as (N+1) x 1 np.array
DF: current fiber Jacobian as N x (N+1) np.array
z: current fiber tangent vector as (N+1) x 1 np.array
z_initial: initial fiber tangent vector as (N+1) x 1 np.array
points[p]: the p^th fiber point as (N+1) x 1 np.array
residuals[p]: the p^th residual error as float
step_amounts[p]: the p^th step amount as float
step_data[p]: additional data for the p^th step (user defined)
candidates[p]: True if p^{th} point is candidate root
"""
def __init__(self, c):
self.status = "Traversing"
self.c = c
self.x = None
self.DF = None
self.z = None
self.z_initial = None
self.points = []
self.tangents = []
self.residuals = []
self.step_amounts = []
self.step_data = []
self.candidates = np.empty(0, dtype=bool)
self.sign_changes = np.empty(0, dtype=bool)
self.alpha_mins = np.empty(0, dtype=bool)
def index_candidates(self, abs_alpha_min = True):
alpha = np.array([p[-1,0]
for p in self.points[len(self.candidates):]])
fixed_index, sign_changes, alpha_mins = fx.index_candidates(
alpha, abs_alpha_min)
self.candidates = np.concatenate((self.candidates, fixed_index))
self.sign_changes = np.concatenate((self.sign_changes, sign_changes))
self.alpha_mins = np.concatenate((self.alpha_mins, alpha_mins))
return self.candidates, self.sign_changes, self.alpha_mins
def halve_points(self, abs_alpha_min = True):
# Update candidate index
self.index_candidates(abs_alpha_min)
# Keep all candidates and half non-candidates
keep = self.candidates.copy()
non_candidates = np.flatnonzero(self.candidates == False)
keep_non_candidates = non_candidates[::2]
keep[keep_non_candidates] = True
# keep leading and last points for closed loop detection
keep[[0,1,2,-1]] = True
# Set up pruning
def prune(l):
return [l[k] for k in range(len(l)) if keep[k]]
# Do pruning
self.points = prune(self.points)
self.tangents = prune(self.tangents)
self.residuals = prune(self.residuals)
self.step_amounts = prune(self.step_amounts)
self.step_data = prune(self.step_data)
self.candidates = self.candidates[keep]
self.sign_changes = self.sign_changes[keep]
self.alpha_mins = self.alpha_mins[keep]
def compute_step_amount_factory(f2, f3):
"""
Build a compute_step_amount function for traversal.
f2(x) should bound |d^2f_i(x)/dx_j dx_k| for all i,j,k.
f3 should bound |d^3f_i(x)/dx_j dx_k dx_l| for all x,i,j,k,l.
returns compute_step_amount, a step function suitable for traverse.
The function signature is compute_step_amount(trace),
where trace includes fields DF, and z:
DF is the derivative of F(x), and z is the fiber tangent.
the first return value is the step amount
the second return value is the minimum singular value of Dg at x
the third return value is True only if x is a critical point
"""
def compute_step_amount(trace):
# lambda
Dg = np.concatenate((trace.DF, trace.z.T), axis=0)
sv_min, low_rank = nu.minimum_singular_value(Dg)
step_amount = 0
if not low_rank:
# delta
N = trace.x.shape[0]-1
A = N*f2(trace.x[:N])
B = N**1.5 * f3
delta = (2*A - (4*A**2 + 12*sv_min*B)**.5)/(-6*B)
# theta
mu = A + B*delta
step_amount = delta * ( 1 - delta * mu / sv_min)
return step_amount, sv_min, low_rank
return compute_step_amount
def eF(x, c, f, ef):
"""
Forward error in F(x)
x: point on fiber
c, f, ef as in traverse_fiber
"""
v, a = x[:-1,:], x[-1]
error = nu.eps(f(v) - a*c) + ef(v) + nu.eps(a*c) + nu.eps(a)*c
return error
def refine_initial(f, Df, ef, x, c, max_solve_iterations):
x, _, residuals = nu.nr_solve(x,
f = lambda x: f(x[:-1]) - x[-1]*c,
Df = lambda x: np.concatenate((Df(x[:-1])[0], -c), axis=1),
ef = lambda x: eF(x, c, f, ef),
max_iterations=max_solve_iterations)
return x, residuals
def compute_tangent(DF, z=None):
"""
Compute the tangent vector to the directional fiber
DF should be the Jacobian of F at the new point after the step (N by N+1 numpy.array)
z should be None or the previous tangent vector before the step (N+1 by 1 numpy.array)
returns z_new, the tangent vector after the step (N+1 by 1 numpy.array)
if z is not None, the sign of z_new is selected for positive dot product with z
"""
N = DF.shape[0]
if z is None: z = np.random.randn(N+1, 1) # random initial sign for tangent
DG = np.concatenate((DF,z.T), axis=0)
z_new = nu.solve(DG, np.concatenate((np.zeros((N,1)), [[1]]), axis=0)) # Fast DF null-space
z_new = z_new / np.sqrt((z_new**2).sum()) # faster than linalg.norm
return z_new
def take_step(f, Df, ef, c, z, x, step_amount, max_solve_iterations):
x0 = x
x, _, residuals = nu.nr_solve(
x0 + z*step_amount, # fast first step
f = lambda x: np.concatenate((
f(x[:-1]) - x[-1]*c,
z.T.dot(x - x0) - step_amount), axis=0),
Df = lambda x: np.concatenate((
np.concatenate((Df(x[:-1])[0], -c), axis=1),
z.T), axis=0),
ef = lambda x: np.concatenate((
eF(x, c, f, ef),
nu.eps(z.T.dot(x)) + z.T.dot(nu.eps(x)) + nu.eps(z*x).sum()),
axis = 0),
max_iterations=max_solve_iterations)
return x, residuals
def traverse_fiber(
f,
Df,
ef,
compute_step_amount,
v=None,
c=None,
z=None,
N=None,
terminate=None,
logger=None,
stop_time = None,
max_traverse_steps=None,
max_step_size=None,
max_solve_iterations=None,
abs_alpha_min=True,
max_history=None,
check_closed_loop=True,
):
"""
Traverses a directional fiber.
Traversal state is maintained in a FiberTrace object
The user provides functions f(v), Df(v), ef(v) where v is an N x 1 np.array:
f is the function, Df is its derivative, and ef is its forward error.
The user-provided function compute_step_amount(trace) should return:
step_amount: signed step size at point x along fiber with derivative DF and tangent z
step_data: output for any additional data that will be saved for post-traversal analysis
critical: True only if x is a critical point
v is an approximate starting point for traveral (defaults to the origin).
c is a direction vector (defaults to random).
z is an approximate initial tangent direction (automatically computed by default).
N is the dimensionality of the dynamical system (defaults to shape of v, c, or z).
At least one of v, c, and N should be provided.
If provided, the function terminate(trace) should return True when trace meets a custom termination criterion.
If provided, progress is written to the Logger object logger.
If provided, traversal terminates at stop_time.
If provided, traversal terminates after max_traverse_steps.
If provided, step sizes are truncated to max_step_size.
Each step is computed with Newton's method.
Residual error is measured by the maximum norm of G.
If provided, each step uses at most max_solve_iterations of Newton's method.
If abs_alpha_min is True, local minima of alpha magnitude are candidate roots.
If provided, max_history is the most fiber points saved in the trace.
When exceeded, half the non-root-candidate points, evenly spaced, are discarded.
If check_closed_loop is True, traversal terminates if a closed loop is detected.
Returns the FiberTrace object for the traversal
"""
# Set defaults
if v is not None: N = v.shape[0]
if c is not None: N = c.shape[0]
if c is None:
c = np.random.randn(N,1)
c = c/np.sqrt((c**2).sum())
x = np.zeros((N+1,1))
if v is not None:
x[:N,:] = v
x[N,:] = (f(v)[c != 0] / c[c != 0]).mean()
# Drive initial va to fiber in case of residual error
x, initial_residuals = refine_initial(f, Df, ef, x, c, max_solve_iterations)
# Initialize trace
trace = FiberTrace(c)
trace.x = x
trace.points.append(x)
trace.residuals.append(initial_residuals[-1])
# Traverse
for step in it.count(0):
# Update DF
DF = np.concatenate((Df(x[:N])[0], -c), axis=1)
# Update tangent
z = compute_tangent(DF, z)
# Update trace
trace.DF = DF
trace.z = z
if step == 0: trace.z_initial = z
# Get step size
step_amount, step_data, step_critical = compute_step_amount(trace)
if max_step_size is not None:
step_amount = np.sign(step_amount)*min(np.fabs(step_amount), max_step_size)
# Check for critical fiber
if step_critical:
trace.status = "Critical"
break
# Update x
x, step_residuals = take_step(f, Df, ef, c, z, x,
step_amount, max_solve_iterations)
# Log and store progress
trace.x = x
trace.points.append(x)
trace.tangents.append(z)
trace.residuals.append(step_residuals[-1])
trace.step_amounts.append(step_amount)
trace.step_data.append(step_data)
if logger is not None and step % 10 == 0:
logger.log("step %d: residual %f, theta %f, step data %s...\n"%(
step, step_residuals[-1], step_amount, step_data))
# Check for early termination criteria
if max_traverse_steps is not None and step + 1 >= max_traverse_steps:
trace.status = "Max steps"
break
if stop_time is not None and time.perf_counter() >= stop_time:
trace.status = "Timed out"
break
# Check custom termination criteria
if terminate is not None and terminate(trace):
trace.status = "Terminated"
break
# Check for closed loop
if check_closed_loop and len(trace.points) > 2:
current_distance = np.fabs(trace.points[-1]-trace.points[0]).max()
initial_distance = np.fabs(trace.points[2]-trace.points[0]).max()
if current_distance < initial_distance:
trace.status = "Closed loop"
break
# Check for maximum fiber history
if max_history is not None and len(trace.points) > max_history:
trace.halve_points(abs_alpha_min)
# final output
if logger is not None: logger.log("Status: %s\n"%trace.status)
return trace
def plot_fiber(X, Y, V, f, ax=None, scale_XY=1, scale_V=1, fiber_color='k'):
"""
Plots a fiber within a 2d state space
pt.show still needs to be called separately
X, Y: 2d ndarrays as returned by np.meshgrid
V: (2,P) ndarray of P points along the fiber
f: as in traverse_fiber
ax: axis on which to draw
"""
# Calculate direction vectors
XY = np.array([X.flatten(), Y.flatten()])
C_XY = f(XY)
C_V = f(V)
# Set up draw axes
if ax is None: ax = plt.gca()
# Draw ambient direction vectors
ax.quiver(XY[0,:],XY[1,:],C_XY[0,:],C_XY[1,:],color=0.5*np.ones((1,3)),
scale=scale_XY,units='xy',angles='xy')
# Draw fiber with incident direction vectors
ax.plot(V[0,:],V[1,:],color=fiber_color, linestyle='-', linewidth=1)
ax.quiver(V[0,:],V[1,:],C_V[0,:],C_V[1,:],color=0.0*np.ones((1,3)),
scale=scale_V,units='xy',angles='xy')
# Set limits based on grid and show
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(1)
ax.set_xlim((X.min(),X.max()))
ax.set_ylim((Y.min(),Y.max()))
| garrettkatz/directional-fibers | dfibers/traversal.py | traversal.py | py | 12,381 | python | en | code | 1 | github-code | 90 |
18543606289 | def main():
N, C = map(int, input().split())
sushis = [(0, 0) for _ in range(N)]
for i in range(N):
x, v = map(int, input().split())
sushis[i] = (x, v)
ls, rs = 0, 0
left = [(0, 0) for _ in range(N+1)]
right = [(0, 0) for _ in range(N+1)]
for i in range(N):
ls += sushis[i][1]
lx = sushis[i][0]
rs += sushis[-i-1][1]
rx = C - sushis[-i-1][0]
left[i+1] = (max(ls-lx, left[i][0]),
max(ls-lx*2, left[i][1]))
right[i+1] = (max(rs-rx, right[i][0]),
max(rs-rx*2, right[i][1]))
ans = 0
for i in range(N+1):
ans = max([left[i][0] + right[N-i][1],
left[i][1] + right[N-i][0],
ans])
print(ans)
if __name__ == "__main__":
main()
| Aasthaengg/IBMdataset | Python_codes/p03372/s015911268.py | s015911268.py | py | 817 | python | en | code | 0 | github-code | 90 |
40384396780 | import onnx_graphsurgeon as gs
import numpy as np
import onnx
scale_const = gs.Constant(name="scale_const_1", values=np.ones(shape=(63,), dtype=np.float32))
bias_const = gs.Constant(name="b_const_1", values=np.zeros(shape=(63,), dtype=np.float32))
@gs.Graph.register()
def replace_with_instancenormalization(self, inputs, outputs):
print("inputs : ", inputs)
print("outputs : ", outputs)
# Disconnect output nodes of all input tensors
for inp in inputs:
inp.outputs = [o for o in inp.outputs if not o.name.startswith("ReduceMean_") and not o.name.startswith("Sub_")]
# inp.outputs.clear()
# # Disconnet input nodes of all output tensors
for out in outputs:
out.inputs.clear()
# Insert the new node.
print("inputs[0].shape: ", inputs[0])
node = self.layer(op="LayerNorm", inputs=[inputs[0]], outputs=outputs, attrs={"epsilon": 0.00001})
# node = self.layer(op="InstanceNormalization", inputs=[inputs[0], scale_const, bias_const], outputs=outputs)
print("node: ", node)
return node
def merge_instance_norm(graph):
reducemean_nodes = [node for node in graph.nodes if node.name.startswith("ReduceMean") and node.outputs[0].outputs[0].name.startswith("Sub_")]
div_nodes = [node for node in graph.nodes if node.name.startswith("Div_") and node.inputs[0].inputs[0].name.startswith("Sub_") and node.inputs[1].inputs[0].name.startswith("Sqrt")]
mul_nodes = [ node.outputs[0].outputs[0] for node in div_nodes]
add_nodes = [ node.outputs[0].outputs[0] for node in mul_nodes]
print("len(reducemean_nodes): ", len(reducemean_nodes))
print("len(div_nodes): ", len(div_nodes))
print("len(mul_nodes): ", len(mul_nodes))
print("len(add_nodes): ", len(add_nodes))
assert len(reducemean_nodes) == len(div_nodes), "reducemean_nodes len is not equal with div_nodes len"
for rn, dn in zip(reducemean_nodes, div_nodes):
inputs = [inp for inp in rn.inputs]
outputs = [out for out in dn.outputs]
# scale = mn.inputs[1]
# bias = an.inputs[1]
graph.replace_with_instancenormalization(inputs, outputs) | BraveLii/trt-hackathon-swin-transformer | scripts/merge.py | merge.py | py | 2,136 | python | en | code | 4 | github-code | 90 |
25828981553 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
class ResNet(nn.Module):
def __init__(self, dataset, multi_loss, base_model, out_dim):
super(ResNet, self).__init__()
self.resnet_dict = {"resnet18": models.resnet18(pretrained=False),
"resnet50": models.resnet50(pretrained=False)}
resnet = self._get_basemodel(base_model)
num_ftrs = resnet.fc.in_features
self.features = nn.Sequential(*list(resnet.children())[:-1])
if multi_loss:
# Multi-label head
self.l1 = nn.Linear(num_ftrs, num_ftrs)
self.l2 = nn.Linear(num_ftrs, out_dim)
# Single-label head
self.l3 = nn.Linear(num_ftrs, num_ftrs)
self.l4 = nn.Linear(num_ftrs, out_dim)
def _get_basemodel(self, model_name):
try:
model = self.resnet_dict[model_name]
print("Feature extractor:", model_name)
return model
except:
raise ("Invalid model name. Check the config file and pass one of: resnet18 or resnet50")
def forward(self, x):
h = self.features(x)
h = torch.flatten(h, start_dim=1)
# single-label
x = self.l3(h)
x = F.relu(x)
x = self.l4(x)
# x = F.softmax(x, dim=-1)
return x
def forward_multi(self, x):
h = self.features(x)
h = torch.flatten(h, start_dim=1)
# multi-label
x = self.l1(h)
x = F.relu(x)
x = self.l2(x)
x = torch.sigmoid(x)
return x
| mozzielol/Hybrid | Hybrid_as_aug/models/resnet.py | resnet.py | py | 1,613 | python | en | code | 0 | github-code | 90 |
22550662416 | import jinja2
import requests
from time import sleep
from PIL import Image
import urllib.request
import qrcode
from io import BytesIO
from base64 import b64encode
ENV = jinja2.Environment(extensions=['jinja2.ext.loopcontrols'])
class NFT:
def __init__(self, contract_addr, id):
self.contract_addr = contract_addr
self.id = id
self.url = f'https://opensea.io/assets/{contract_addr}/{id}'
def qrc(self, w=100, h=100):
qr = qrcode.QRCode(version=1, box_size=15, border=5)
qr.add_data(self.url)
qr.make(fit=True)
img = qr.make_image(fill='black', back_color='white')
buf = BytesIO()
img.save(buf)
qrc_html = f'<img width={w} height={h} src="data:image/png;base64,{b64encode(buf.getvalue()).decode()}" />'
return qrc_html
def img(self, classes='', w=100, h=100):
sleep(0.5)
req = requests.get(f'https://api.opensea.io/api/v1/asset/{self.contract_addr}/{self.id}/', headers={'User-Agent': 'Mozilla/5.0'})
data = req.json()
try:
img_path = data['image_url']
except KeyError:
print(data)
return f'<img src="data:image/gif;base64,R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs=" width="{w}" height="{h}" alt="" />'
if img_path.startswith("http"):
req = urllib.request.Request(img_path, headers={'User-Agent': 'Mozilla/5.0'})
img_bytes = urllib.request.urlopen(req).read()
else:
img_bytes = Image.open(img_path)
img_html = f'<img class="{classes}" width={w} height={h} src=\"data:image/png;base64,{b64encode(img_bytes).decode()}\" />'
return img_html
ENV.filters = {
'NFT': NFT,
} | realjohnward/NFT-Framer | NFT_Framer/app/template_filters/filters.py | filters.py | py | 1,735 | python | en | code | 1 | github-code | 90 |
13090914335 | class Solution:
def maxProfit(self, prices: List[int]) -> int:
minSoFar = prices[0]
maxProfit = 0
for price in prices:
if (price < minSoFar):
minSoFar = price
currProfit = price - minSoFar
if (currProfit > maxProfit):
maxProfit = currProfit
return maxProfit
| magdumsuraj07/data-structures-algorithms | questions/striever_SDE_sheet/6_best_time_to_buy_and_sell_stock.py | 6_best_time_to_buy_and_sell_stock.py | py | 362 | python | en | code | 0 | github-code | 90 |
18494277069 | import sys
input = sys.stdin.buffer.readline
from collections import defaultdict
import copy
def main():
N,M = map(int,input().split())
d = defaultdict(int)
MOD = 10**9+7
R = 10**5+100
fac = [0 for _ in range(R+1)]
fac[0],fac[1] = 1,1
inv = copy.deepcopy(fac)
invfac = copy.deepcopy(fac)
for i in range(2,R+1):
fac[i] = (fac[i-1]*i)%MOD
inv[i] = MOD-(MOD//i)*inv[MOD%i]%MOD
invfac[i] = (invfac[i-1]*inv[i])%MOD
def coef(x,y):
num = (((fac[x+y]*invfac[y])%MOD)*invfac[x]%MOD)
return num
while M%2 == 0:
d[2] += 1
M //= 2
f = 3
while f ** 2 <= M:
if M % f == 0:
d[f] += 1
M //= f
else:
f += 2
if M != 1:
d[M] += 1
l = list(d.values())
ans = 1
for num in l:
ans *= coef(num,N-1)
ans %= MOD
print(ans)
if __name__ == "__main__":
main() | Aasthaengg/IBMdataset | Python_codes/p03253/s901379470.py | s901379470.py | py | 974 | python | en | code | 0 | github-code | 90 |
1495599402 | #!/usr/bin/env python
import csv
import numpy as np
from threading import Thread, current_thread
from subprocess32 import Popen, PIPE
from datetime import datetime
import matplotlib.pyplot as plt
from scipy.signal import convolve2d
from skimage.transform import rotate
from skimage import filters
import time
import cv2
import math
from sklearn.metrics import confusion_matrix
class TorchOptim:
bestResult = 1000
f_eval_count = 0
seed = 139
server = 'unset'
# Hyperparameter to optimise:
hyper_map = {
'w' : 0,
}
def __init__(self, seed, server, dim=1):
self.seed = seed
self.server = server
self.f_eval_count = 0
m = self.hyper_map
self.ground_truth = plt.imread()
self.xlow = np.zeros(dim)
self.xup = np.zeros(dim)
# human value 0.0000
self.xlow[m['w']] = 1
self.xup[m['w']] = 20
self.dim = dim
self.info = 'Optimise a simple MLP network over MNIST dataset'
# self.continuous = np.arange(0, 4)
self.integer = np.arange(0, dim)
def print_result_directly(self, x, result):
self.f_eval_count = self.f_eval_count + 1
experimentId = 'p-'+str(len(x))+'-'+str(self.f_eval_count)+'-'+self.seed+'-'+self.server
fileId = 'p-'+str(len(x))+'-'+self.seed+'-'+self.server
millis = int(round(time.time() * 1000))
if self.bestResult > result:
self.bestResult = result
row = [self.bestResult, -1, result, -1, self.f_eval_count, millis]
for xi in range(0, len(x)):
row.append(x[xi])
with open('logs/'+fileId+'-output.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow(row)
def objfunction(w):
millis = int(round(time.time() * 1000))
print('Started: ' + str(datetime.now()) + ' (' + str(millis) + ')')
# Read original image and edge ground_truth
image = plt.imread('./original.jpg').mean(axis=-1)
ground_truth = plt.imread('./best_map.png').mean(axis=-1)
# use dilation on ground truth with kernel of dimensions i x i
i = 7
kernel = np.ones((i, i), np.uint8)
ground_truth = cv2.dilate(ground_truth, kernel, iterations=1)
# If width is odd, make it even
if(w%2 == 0):
w+=1
# Function to create the filter
def createFilter_v2(w=None, k1=None, k2=None, o='vert'):
if(w != None):
k1 = k2 = math.ceil(w/2)
if(k1 != k2):
print(f"k1 has size of {k1} and k2 has size of {k2}. k1 and k2 must be equal")
return None
f = np.zeros((2*w+1, 2*w+1))
for i in range(k1):
f[:, i] = -1
for i in reversed(range(k1)):
i = i+1
f[:, -i] = 1
if(o=='horz'):
f = np.transpose(f)
return f
# Get vertical and horizontal sobels
vert_sobel = createFilter_v2(w)
horz_sobel = createFilter_v2(w, o='horz')
# Apply the filter on the image
vert_edge_pred = convolve2d(image, vert_sobel, mode='same')
horz_edge_pred = convolve2d(image, horz_sobel, mode='same')
edge_pred = np.add(vert_edge_pred, horz_edge_pred)
#Use otsu histogram thresholding to find an optimal threshold to binarize the edge prediction image
# vert_val = filters.threshold_otsu(vert_edge_pred)
# vert_final_map = np.zeros_like(vert_edge_pred)
# horz_val = filters.threshold_otsu(horz_edge_pred)
# horz_final_map = np.zeros_like(horz_edge_pred)
val = filters.threshold_otsu(edge_pred)
final_map = np.zeros_like(edge_pred)
print(final_map.shape)
print(ground_truth.shape)
# Binarize the image using the optimal threshold found using otsu
# vert_final_map[vert_edge_pred > vert_val] = 1
# horz_final_map[horz_edge_pred > horz_val] = 1
final_map[edge_pred > val] = 1
# Compute BER
cm = confusion_matrix(ground_truth.argmax(axis=-1).ravel(), final_map.argmax(axis=-1).ravel())
tn, fp, fn, tp = cm[0][0], cm[0][1], cm[1][0], cm[1][1]
## if statement to avoid division by zero error
if tp + fn > 0 and tn + fp > 0:
BER = 1 - 0.5 * (tp / (tp + fn) + tn / (tn + fp))
else:
BER = 1
# Plot ground truth and final map
fig,ax = plt.subplots(1,2)
ax[1].cla()
ax[0].cla()
ax[0].imshow(ground_truth,'Greys_r')
ax[0].set_title(f"Ground Truth (Dilated by {i} x {i})")
ax[1].imshow(final_map,'Greys_r')
ax[1].set_title(f"Final Map with kernel width of {w}")
fig.suptitle(f"Ground Truth vs. Final Map\nBER: {BER}")
fig.tight_layout()
fig.subplots_adjust(top=0.88)
plt.show(block=False)
plt.pause(1.5)
plt.savefig(f'comparisons/{w}_{w}')
end_millis = int(round(time.time() * 1000))
print('Ended: ' + str(datetime.now()) + ' (' + str(end_millis) + ')')
print(f"Time taken is {(end_millis - millis) / 1000.0000} secs")
# put a threshold, to binarize it
# otsu thresholding
# histogram threshsholding
# maximum distances from the peaks
# edge greater than val is one, anything below dont change value
# cm = confusion_matrix(new_GT.argmax(axis=-1).ravel(), final_pred.ravel())
# make sure tp and fn exist, if not then return 1, min is 0
# tn, fp, fn, tp = cm[0][0], cm[0][1], cm[1][0], cm[1][1]
# BER = 1 - 0.5 * (tp / (tp + fn) + tn / (tn + fp))
# # after filter is obtained apply filter with convolution on the image, edge map, compare to ground truth
# #compare that prediction to the ground truth,
# perform closing algorithm or smoothing on the edge map image
# use morphological operations from opencv python
# use dilation ******* using open cv2
# manually create filter and find various metrics to see which one is best
#BER metric (Balanced error rate) to measure performance
# don't do 1 - BER, minimize itself
#Mean IOU metric (Amount of alignment of edges between ground truth and prediction)
# return (1 - mean IOU)
#assess precision, recall -> High f1 for success
#1 - f1 score
#compare final_map to ground truth
# find boundary pixel accuracy metric
# # return 1- accuracy
self.f_eval_count = self.f_eval_count + 1
experimentId = 'p-'+str(len(x))+'-'+str(self.f_eval_count)+'-'+self.seed+'-'+self.server
fileId = 'p-'+str(len(x))+'-'+self.seed+'-'+self.server
m = self.hyper_map
exp_arg = []
exp_arg.append('th'),
exp_arg.append('eval_mnist_GPU.lua')
exp_arg.append('--mean')
exp_arg.append(str(x[m['mean']]))
exp_arg.append('--std')
exp_arg.append(str(x[m['std']]))
exp_arg.append('--learnRate')
exp_arg.append(str(x[m['learnRate']]))
exp_arg.append('--momentum')
exp_arg.append(str(x[m['momentum']]))
exp_arg.append('--epochs')
exp_arg.append(str(x[m['epochs']]))
exp_arg.append('--hiddenNodes')
exp_arg.append(str(x[m['hiddenNodes']]))
exp_arg.append('--experimentId')
exp_arg.append(experimentId)
exp_arg.append('--seed')
exp_arg.append(self.seed)
millis_start = int(round(time.time() * 1000))
proc = Popen(exp_arg, stdout=PIPE)
out, err = proc.communicate()
if proc.returncode == 0:
results = out.split('###')
result = float(results[0])
testResult = float(results[1])
millis = int(round(time.time() * 1000))
f_eval_time = millis - millis_start
if self.bestResult > result:
self.bestResult = result
row = [self.bestResult, f_eval_time, result, testResult, self.f_eval_count, millis]
for xi in range(0, len(x)):
row.append(x[xi])
with open('logs/'+fileId+'-output.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow(row)
return result
else:
print(err)
raise ValueError('Function evaluation error')
def print_parameters(self, x):
print(current_thread())
m = self.hyper_map
print('')
for p in m:
print(p+'\t : %g' % float(x[m[p]]))
| arihanv/croplands_optimization | pySOT_torch.py | pySOT_torch.py | py | 8,715 | python | en | code | 0 | github-code | 90 |
22383454857 | import pickle
class DiskIO:
def __compress_1_number_vbe(self, number):
binary = bin(number)[2:]
res = ''
i = 0
while len(binary) > 7:
res = '0' + binary[len(binary) - 7:] + res
binary = binary[:len(binary) - 7]
i += 1
res = '1' + binary.zfill(7) + res
i += 1
return int(res, base=2), i
def __compress_vbe(self, numbers):
res = 0
for number in numbers:
compressed = self.__compress_1_number_vbe(number)
res = (res << (compressed[1] * 8)) + compressed[0]
return res
def __decompress_vbe(self, compressed_int):
res = []
compressed_str = bin(compressed_int)[2:]
while len(compressed_str) > 0:
temp = ''
while len(compressed_str) > 0 and compressed_str[len(compressed_str) - 8] == '0':
temp = compressed_str[len(compressed_str) - 7:] + temp
compressed_str = compressed_str[:len(compressed_str) - 8]
temp = compressed_str[len(compressed_str) - 7:] + temp
compressed_str = compressed_str[:len(compressed_str) - 8]
res = [int(temp, base=2)] + res
return res
def __compress_gap(self, numbers):
res = [numbers[0]]
for index in range(1, len(numbers)):
res.append(numbers[index] - numbers[index - 1])
return res
def __decompress_gap(self, compressed):
res = [compressed[0]]
for index in range(1, len(compressed)):
res.append(compressed[index] + res[index - 1])
return res
def __string_to_nbr(self, doc_id):
result = 0
for char in doc_id:
result = result << 8
result += ord(char)
return result
def __nbr_to_string(self, nbr):
result = ''
while nbr > 0:
temp = nbr % 256
nbr = nbr >> 8
result = chr(temp) + result
return result
def __compress(self, dictionary):
result = []
last_key = 0
int_keys = []
for key in dictionary.keys():
int_keys.append(self.__string_to_nbr(key))
for key in sorted(int_keys):
result.append(key - last_key)
last_key = key
key = self.__nbr_to_string(key)
result.append(len(dictionary[key]))
result = result + self.__compress_gap(dictionary[key])
return self.__compress_vbe(result)
def decompress(self, vbe_compressed):
result = {}
compressed_list = self.__decompress_vbe(vbe_compressed)
int_key = 0
while len(compressed_list) > 0:
temp = compressed_list.pop(0)
int_key = int_key + temp
key = self.__nbr_to_string(int_key)
length = compressed_list.pop(0)
result[key] = self.__decompress_gap(compressed_list[:length])
compressed_list = compressed_list[length:]
return result
def __compress_all(self, dictionary):
res = {}
for key in dictionary:
res[key] = self.__compress(dictionary[key])
return res
def __decompress_all(self, compressed):
res = {}
for key in compressed:
res[key] = self.decompress(compressed[key])
return res
def write(self, index, file_name):
with open(file_name, 'wb') as outfile:
pickle.dump(self.__compress_all(index), outfile)
def read_and_decompress(self, file_name):
with open(file_name, 'rb') as infile:
saved = pickle.load(infile)
return self.__decompress_all(saved)
def read(self, file_name):
with open(file_name, 'rb') as infile:
saved = pickle.load(infile)
return saved
| borchaniz/search_index_compression | DiskIO.py | DiskIO.py | py | 3,919 | python | en | code | 0 | github-code | 90 |
22813410929 | #!/usr/bin/env python3
# encoding: utf-8
import os
from nose import with_setup
from nose.plugins.skip import SkipTest
from tests.utils import *
SOURCE = '''
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char *argv[]) {
puts("Hello rmlint. Why were you executing this?");
return EXIT_SUCCESS;
}
'''
def create_binary(path, stripped=False):
path = path + '.stripped' if stripped else path + '.nonstripped'
full_path = os.path.join(TESTDIR_NAME, path)
command = '{cc} -o {path} {option} -std=c99 -xc -'.format(
cc=os.environ.get('CC', 'gcc'), path=full_path, option='-s' if stripped else '-ggdb3',
)
subprocess.run(command, input=SOURCE, shell=True, universal_newlines=True, check=True)
@with_setup(usual_setup_func, usual_teardown_func)
def test_negative():
if has_feature('nonstripped') is False:
return
create_file(SOURCE, 'source.c')
create_binary('source.c', stripped=True)
head, *data, footer = run_rmlint('-T "none +nonstripped"')
assert footer['total_files'] == 2
assert footer['total_lint_size'] == 0
assert len(data) == 0
@with_setup(usual_setup_func, usual_teardown_func)
def test_positive():
if has_feature('nonstripped') is False:
return
create_file(SOURCE, 'source.c')
create_binary('source.c', stripped=False)
head, *data, footer = run_rmlint('-T "none +nonstripped"')
assert footer['total_files'] == 2
assert footer['total_lint_size'] == 0 # We cannot determine exact lint size.
assert data[0]['type'] == 'nonstripped'
# regression test for GitHub issue #555
@with_setup(usual_setup_func, usual_teardown_func)
def test_executable_fifo():
if has_feature('nonstripped') is False:
raise SkipTest("needs 'nonstripped' feature")
fifo_path = os.path.join(TESTDIR_NAME, 'fifo')
os.mkfifo(fifo_path)
os.chmod(fifo_path, 0o755)
# executable FIFO should not hang rmlint
head, *data, footer = run_rmlint('-T nonstripped', timeout=5)
assert footer['total_files'] == 0
assert footer['total_lint_size'] == 0
assert not data
| sahib/rmlint | tests/test_types/test_nonstripped.py | test_nonstripped.py | py | 2,125 | python | en | code | 1,672 | github-code | 90 |
19368287658 | # coding: utf-8
# modified from https://github.com/minzwon/self-attention-music-tagging
import os
import numpy as np
from torch.utils import data
import pickle as pkl
import librosa
import warnings
import math
class AudioFolder(data.Dataset):
def __init__(self,
input_length, # [s]
spec_path,
audio_path,
path_to_repo,
mode):
self.spec_path = os.path.expanduser(spec_path)
self.audio_path = os.path.expanduser(audio_path)
self.path_to_repo = os.path.expanduser(path_to_repo)
self.mode = mode
self.fs = 16000
self.window = 512
self.hop = 256
self.mel = 96
self.input_length = math.floor(input_length*self.fs/self.hop)
self.get_songlist()
self.idmsd_to_id7d = pkl.load(open(os.path.join(self.path_to_repo,
"training/msd_metadata/MSD_id_to_7D_id.pkl"),'rb'))
self.tags = pkl.load(open(os.path.join(self.path_to_repo,
"training/msd_metadata/msd_id_to_tag_vector.cP"), 'rb'))
def __getitem__(self, index):
spec = None
while spec is None:
try:
spec, tag_binary = self.get_spec(index)
except: # audio not found or broken (very very rare)
print(self.fl[index])
index = np.random.randint(0,high=len(self.fl))
spec = None
return spec.astype('float32'), tag_binary.astype('float32')
def get_songlist(self):
train = pkl.load(open(os.path.join(self.path_to_repo,
"training/msd_metadata/filtered_list_train.cP"), 'rb'))
if self.mode == 'train':
self.fl = train[:201680]
elif self.mode == 'valid':
self.fl = train[201680:]
elif self.mode == 'test':
self.fl = pkl.load(open(os.path.join(self.path_to_repo,
"training/msd_metadata/filtered_list_test.cP"), 'rb'))
else:
raise Exception("mode must be 'train', 'valid' or 'test'")
def compute_melspectrogram(self, audio_fn):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x, _ = librosa.core.load(audio_fn, sr=self.fs, res_type='kaiser_fast')
spec = librosa.core.amplitude_to_db(librosa.feature.melspectrogram(x,
sr=self.fs,
n_fft=self.window,
hop_length=self.hop,
n_mels=self.mel))
return spec
def get_spec(self, index):
fn=self.fl[index]
id7d = self.idmsd_to_id7d[fn]
audio_fn = os.path.join(self.audio_path,*id7d[:2],id7d+".clip.mp3")
spec_fn = os.path.join(self.spec_path,*id7d[:2], id7d+'.npy')
if not os.path.exists(spec_fn):
whole_spec = self.compute_melspectrogram(audio_fn)
spec_path = os.path.dirname(spec_fn)
os.makedirs(spec_path)
np.save(open(spec_fn, 'wb'), spec)
else:
whole_spec = np.load(spec_fn, mmap_mode='r')
if self.mode=='train':
upper_idx = math.floor(29*self.fs/self.hop)-self.input_length
random_idx = np.random.randint(0, high = upper_idx)
spec = whole_spec[:, random_idx:random_idx+self.input_length][np.newaxis]
elif self.mode in ['valid','test']:
# this is Won's configuration (as in https://arxiv.org/abs/2006.00751)
n_chunks = 16
hop = (whole_spec.shape[1] - self.input_length) // n_chunks
spec = np.zeros((n_chunks,whole_spec.shape[0],self.input_length))
for i in range(n_chunks):
spec[i] = whole_spec[:, i*hop:i*hop+self.input_length]
# this gives slightly better results
"""
n_chunks = whole_spec.shape[1] // self.input_length
spec = np.zeros((n_chunks,whole_spec.shape[0],self.input_length)) # stack of chunks
for i in range(n_chunks):
spec[i]=whole_spec[:,i*self.input_length:(i+1)*self.input_length]
"""
tag_binary = self.tags[fn].astype(int).reshape(50)
return spec, tag_binary
def __len__(self):
return len(self.fl)
def get_DataLoader(batch_size=32,
input_length=15, # [s]
spec_path ='/import/c4dm-datasets/rmri_self_att/msd',
audio_path='/import/c4dm-03/Databases/songs/',
path_to_repo='~/dl4am/',
mode='train',
num_workers=20):
if (mode=='valid' or mode=='test') and batch_size!=1:
raise Exception("Validation and test modes only allow batch_size=1")
data_loader = data.DataLoader(dataset=AudioFolder(input_length,
spec_path,
audio_path,
path_to_repo,
mode),
batch_size=batch_size,
shuffle=True,
pin_memory=True if mode=='train' else False, # for CUDA
num_workers=num_workers)
return data_loader
| marinelliluca/transformer-based-music-auto-tagging | training/data_loader.py | data_loader.py | py | 5,802 | python | en | code | 3 | github-code | 90 |
21180559486 | class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
sub_boxes = collections.defaultdict(set)
rows = collections.defaultdict(set)
cols = collections.defaultdict(set)
row_num = len(board)
col_num = len(board[0])
#Each row must contain the digits 1-9 without repetition.
for row in range(row_num):
for col in range(col_num):
if board[row][col] != ".":
if (board[row][col] in rows[row] or
board[row][col] in cols[col] or
board[row][col] in sub_boxes[(row//3,col//3)]):
return False
rows[row].add(board[row][col])
cols[col].add(board[row][col])
sub_boxes[(row//3,col//3)].add(board[row][col])
return True
| Tettey1/A2SV | leetcode-solutions/valid-sudoku.py | valid-sudoku.py | py | 879 | python | en | code | 0 | github-code | 90 |
302796985 | import myokit
import myokit.gui
from myokit.gui import Qt, QtCore, QtGui, QtWidgets
# GUI components
# Constants
SPACE = ' '
TABS = 4
INDENT = SPACE * TABS
BRACKETS = {
'(': ')',
')': '(',
'[': ']',
']': '['
}
BRACKETS_CLOSE = (')', ']')
FONT = myokit.gui.qtMonospaceFont()
FONT.setPointSize(11)
# Component and model headers
STYLE_HEADER = QtGui.QTextCharFormat()
# Comments
STYLE_COMMENT = QtGui.QTextCharFormat()
# Model annotations (including meta, labels, and units)
STYLE_ANNOT_KEY = QtGui.QTextCharFormat()
STYLE_ANNOT_VAL = QtGui.QTextCharFormat()
# Language keywords
STYLE_KEYWORD_1 = QtGui.QTextCharFormat()
STYLE_KEYWORD_2 = QtGui.QTextCharFormat()
# Literals: Numbers in model/protocol, Also booleans and strings in script
STYLE_LITERAL = QtGui.QTextCharFormat()
STYLE_INLINE_UNIT = QtGui.QTextCharFormat()
# Matching brackets are highlighted
COLOR_BRACKET = QtGui.QColor(240, 100, 0)
# Selected line is highlighted
COLOR_SELECTED_LINE = QtGui.QColor(238, 238, 238)
def _adapt_for_dark_mode(palette):
"""
Checks the default editor background color, and adjusts the color scheme
if it looks like dark-mode is enabled.
"""
# Don't mess with these directly: Use the SVG in myokit-docs
if not myokit.gui._in_dark_mode(palette):
STYLE_HEADER.setForeground(QtGui.QColor(0, 31, 231))
STYLE_COMMENT.setForeground(QtGui.QColor(103, 161, 107))
STYLE_ANNOT_KEY.setForeground(QtGui.QColor(0, 31, 231))
STYLE_ANNOT_VAL.setForeground(QtGui.QColor(57, 115, 214))
STYLE_KEYWORD_1.setForeground(QtGui.QColor(0, 128, 0))
STYLE_KEYWORD_1.setFontWeight(QtGui.QFont.Weight.Bold)
STYLE_KEYWORD_2.setForeground(QtGui.QColor(0, 128, 128))
STYLE_LITERAL.setForeground(QtGui.QColor(255, 20, 215))
STYLE_INLINE_UNIT.setForeground(QtGui.QColor(128, 0, 128))
else:
STYLE_HEADER.setForeground(QtGui.QColor(98, 178, 255))
STYLE_COMMENT.setForeground(QtGui.QColor(153, 153, 153))
STYLE_ANNOT_KEY.setForeground(QtGui.QColor(179, 179, 179))
STYLE_ANNOT_VAL.setForeground(QtGui.QColor(171, 177, 205))
STYLE_KEYWORD_1.setForeground(QtGui.QColor(10, 195, 87))
STYLE_KEYWORD_1.setFontWeight(QtGui.QFont.Weight.Bold)
STYLE_KEYWORD_2.setForeground(QtGui.QColor(10, 195, 87))
STYLE_LITERAL.setForeground(QtGui.QColor(255, 223, 12))
STYLE_INLINE_UNIT.setForeground(QtGui.QColor(168, 152, 33))
global COLOR_SELECTED_LINE
COLOR_SELECTED_LINE = QtGui.QColor(70, 70, 70)
# Classes & methods
class Editor(QtWidgets.QPlainTextEdit):
"""
Source code editor used in Myokit.
Provides the signal ``find_action(str)`` which is fired everything a find
action occurred with a description that can be used in an application's
status bar.
"""
def __init__(self, parent=None):
super().__init__(parent)
# Current style
self._palette = QtGui.QGuiApplication.palette()
_adapt_for_dark_mode(self._palette)
# Apply default settings
self._default_settings()
# Add line number area
self._line_number_area = LineNumberArea(self)
self._line_number_area.update_width(0)
# Add current line highlighting and bracket matching
self.cursorPositionChanged.connect(self.cursor_changed)
self.cursor_changed()
# Line position
try:
# https://doc.qt.io/qt-5/qfontmetrics.html#horizontalAdvance
# Qt 5.5.11 and onwards
self._line_offset = self.fontMetrics().horizontalAdvance(' ' * 79)
except AttributeError:
self._line_offset = self.fontMetrics().width(' ' * 79)
# Number of blocks in page up/down
self._blocks_per_page = 1
# Last position in line, used for smart up/down buttons
self._last_column = None
self.textChanged.connect(self._text_has_changed)
def cursor_changed(self):
""" Slot: Called when the cursor position is changed """
# Highlight current line
extra_selections = []
selection = QtWidgets.QTextEdit.ExtraSelection()
selection.format.setBackground(COLOR_SELECTED_LINE)
selection.format.setProperty(
QtGui.QTextFormat.Property.FullWidthSelection, True)
selection.cursor = self.textCursor()
selection.cursor.clearSelection()
extra_selections.append(selection)
# Bracket matching
cursor = self.textCursor()
if not cursor.hasSelection():
# Test if in front of or behind an opening or closing bracket
pos = cursor.position()
bracket = None
if not cursor.atEnd():
cursor.setPosition(
pos + 1, QtGui.QTextCursor.MoveMode.KeepAnchor)
text = cursor.selectedText()
if text in BRACKETS:
bracket = cursor
elif bracket is None and not cursor.atStart():
cursor.setPosition(pos - 1)
cursor.setPosition(pos, QtGui.QTextCursor.MoveMode.KeepAnchor)
text = cursor.selectedText()
if text in BRACKETS:
bracket = cursor
if bracket:
# Find matching partner
doc = self.document()
depth = 1
start = bracket.position()
while depth > 0:
if text in BRACKETS_CLOSE:
other = doc.find(
text, start - 1,
QtGui.QTextDocument.FindFlag.FindBackward)
match = doc.find(
BRACKETS[text], start - 1,
QtGui.QTextDocument.FindFlag.FindBackward)
else:
other = doc.find(text, start)
match = doc.find(BRACKETS[text], start)
if match.isNull():
break
if other.isNull():
depth -= 1
start = match.position()
elif text in BRACKETS_CLOSE:
if other.position() < match.position():
depth -= 1
start = match.position()
else:
depth += 1
start = other.position()
else:
if match.position() < other.position():
depth -= 1
start = match.position()
else:
depth += 1
start = other.position()
if depth == 0:
# Apply formatting
selection = QtWidgets.QTextEdit.ExtraSelection()
selection.cursor = bracket
selection.format.setBackground(self._palette.mid())
selection.format.setForeground(COLOR_BRACKET)
extra_selections.append(selection)
selection = QtWidgets.QTextEdit.ExtraSelection()
selection.cursor = match
selection.format.setBackground(self._palette.mid())
selection.format.setForeground(COLOR_BRACKET)
extra_selections.append(selection)
if extra_selections:
self.setExtraSelections(extra_selections)
def cursor_position(self):
"""
Returns a tuple ``(line, char)`` with the current cursor position. If
a selection is made only the left position is used.
Line and char counts both start at zero.
"""
cursor = self.textCursor()
line = cursor.blockNumber()
char = cursor.selectionStart() - cursor.block().position()
return (line, char)
def _default_settings(self):
""" Applies this editor's default settings. """
# Set font
self.setFont(FONT)
# Set frame
self.setFrameStyle(
QtWidgets.QFrame.Shape.WinPanel | QtWidgets.QFrame.Shadow.Sunken)
# Disable wrapping
self.setLineWrapMode(QtWidgets.QPlainTextEdit.LineWrapMode.NoWrap)
# Set tab width (if ever seen) to 4 spaces
try:
# https://doc.qt.io/qt-5/qtextedit-obsolete.html
# https://doc.qt.io/qt-5/qfontmetrics.html#horizontalAdvance
# Qt 5.10/5.11 and onwards
ts = self.fontMetrics().horizontalAdvance(' ' * 4)
self.setTabStopDistance(ts)
except AttributeError:
ts = self.fontMetrics().width(' ' * 4)
self.setTabStopWidth(ts)
def get_text(self):
""" Returns the text in this editor. """
return self.toPlainText()
def jump_to(self, line, char):
""" Jumps to the given line and row (with indices starting at 0). """
block = self.document().findBlockByNumber(line)
cursor = self.textCursor()
cursor.setPosition(block.position() + char)
self.setTextCursor(cursor)
self.centerCursor()
def keyPressEvent(self, event):
""" Qt event: A key was pressed. """
K = Qt.Key
KM = Qt.KeyboardModifier
MM = QtGui.QTextCursor.MoveMode
MO = QtGui.QTextCursor.MoveOperation
# Get key and modifiers
key = event.key()
mod = event.modifiers()
# Possible modifiers:
# NoModifier
# ShiftModifier, ControlModifier, AltModifiier
# MetaModifier (i.e. super key)
# KeyPadModifier (button is part of keypad)
# GroupSwitchModifier (x11 thing)
# Ignore the keypad modifier, we don't care!
if mod & KM.KeypadModifier:
mod = mod ^ KM.KeypadModifier # xor!
# Actions per key/modifier combination
if key == K.Key_Tab and mod == KM.NoModifier:
# Indent
cursor = self.textCursor()
start, end = cursor.selectionStart(), cursor.selectionEnd()
if cursor.hasSelection():
# Add single tab to all lines in selection
cursor.beginEditBlock() # Undo grouping
doc = self.document()
b = doc.findBlock(start)
e = doc.findBlock(end).next()
while b != e:
cursor.setPosition(b.position())
cursor.insertText(TABS * SPACE)
b = b.next()
cursor.endEditBlock()
else:
# Insert spaces until next tab stop
pos = cursor.positionInBlock()
cursor.insertText((TABS - pos % TABS) * SPACE)
elif key == K.Key_Backtab and mod == KM.ShiftModifier:
# Dedent all lines in selection (or single line if no selection)
'''
cursor = self.textCursor()
start, end = cursor.selectionStart(), cursor.selectionEnd()
cursor.beginEditBlock() # Undo grouping
doc = self.document()
# Get blocks in selection
blocks = []
b = doc.findBlock(start)
while b.isValid() and b.position() <= end:
blocks.append(b)
b = b.next()
# Dedent
for b in blocks:
t = b.text()
p1 = b.position()
p2 = p1 + min(4, len(t) - len(t.lstrip()))
c = self.textCursor()
c.setPosition(p1)
c.setPosition(p2, MM.KeepAnchor)
c.removeSelectedText()
cursor.endEditBlock()
'''
# This silly method is required because of a bug in qt5 (and 6?)
cursor = self.textCursor()
start, end = cursor.selectionStart(), cursor.selectionEnd()
first = self.document().findBlock(start)
q = 0
new_text = []
new_start, new_end = start, end
b = QtGui.QTextBlock(first)
while b.isValid() and b.position() <= end:
t = b.text()
p = min(4, len(t) - len(t.lstrip()))
new_text.append(t[p:])
if b == first:
new_start -= p
new_end -= p
q += p
b = b.next()
last = b.previous()
new_start = max(new_start, first.position())
new_end = max(new_end, new_start)
if q > 0:
# Cut text, replace with new
cursor.beginEditBlock()
cursor.setPosition(first.position())
cursor.setPosition(
last.position() + last.length() - 1, MM.KeepAnchor)
cursor.removeSelectedText()
cursor.insertText('\n'.join(new_text))
cursor.endEditBlock()
# Set new cursor
cursor.setPosition(new_start)
cursor.setPosition(new_end, MM.KeepAnchor)
self.setTextCursor(cursor)
elif key == K.Key_Enter or key == K.Key_Return:
# Enter/Return with modifier is overruled here to mean nothing
# This is very important as the default for shift-enter is to
# start a new line within the same block (this can't happen with
# copy-pasting, so it's safe to just catch it here).
if mod == KM.NoModifier:
# "Smart" enter:
# - If selection, selection is deleted
# - Else, autoindenting is performed
cursor = self.textCursor()
cursor.beginEditBlock()
if cursor.hasSelection():
# Replace selection with newline,
cursor.removeSelectedText()
cursor.insertBlock()
else:
# Insert new line with correct indenting
b = self.document().findBlock(cursor.position())
t = b.text()
i = t[:len(t) - len(t.lstrip())]
i = i[:cursor.positionInBlock()]
cursor.insertBlock()
cursor.insertText(i)
cursor.endEditBlock()
# Scroll if necessary
self.ensureCursorVisible()
elif key == K.Key_Home and (
mod == KM.NoModifier or mod == KM.ShiftModifier):
# Plain home button: move to start of line
# If Control is used: Jump to start of document
# Ordinary home button: Jump to first column or first
# non-whitespace character
cursor = self.textCursor()
block = cursor.block()
cp = cursor.position()
bp = block.position()
if cp != bp:
# Jump to first column
newpos = bp
# Smart up/down:
self._last_column = 0
else:
# Already at first column: Jump to first non-whitespace or
# end of line if all whitespace
t = block.text()
indent = len(t) - len(t.lstrip())
newpos = bp + indent
# Smart up/down:
self._last_column = indent
# If Shift is used: only move position (keep anchor, i.e. select)
cursor.setPosition(
newpos,
MM.KeepAnchor if mod == KM.ShiftModifier else MM.MoveAnchor)
self.setTextCursor(cursor)
elif key == K.Key_Home and (
mod == KM.ControlModifier
or mod == KM.ControlModifier & KM.ShiftModifier):
# Move to start of document
# If Shift is used: only move position (keep anchor, i.e. select)
cursor = self.textCursor()
cursor.setPosition(
0, MM.KeepAnchor if mod == KM.ShiftModifier else MM.MoveAnchor)
self.setTextCursor(cursor)
elif key in (K.Key_Up, K.Key_Down) and mod == KM.AltModifier:
# Move selected lines up or down
# Get current selection
doc = self.document()
cursor = self.textCursor()
start, end = cursor.selectionStart(), cursor.selectionEnd()
block1 = doc.findBlock(start)
if start == end:
block2 = block1
else:
block2 = doc.findBlock(end)
# Whole line selection? Then move end back 1 position
if end == block2.position():
end -= 1
block2 = block2.previous() # always valid
block2 = block1 if start == end else doc.findBlock(end)
# Check if we can move
if key == K.Key_Up:
if not block1.previous().isValid():
return
elif not block2.next().isValid():
return
# Select full line(s)
b1pos = block1.position()
cursor.beginEditBlock()
cursor.setPosition(b1pos)
cursor.setPosition(end, MM.KeepAnchor)
cursor.movePosition(MO.EndOfLine, MM.KeepAnchor)
line = cursor.selectedText()
size = cursor.selectionEnd() - cursor.selectionStart()
cursor.removeSelectedText()
if key == K.Key_Up:
cursor.deletePreviousChar()
cursor.movePosition(MO.StartOfLine)
cursor.insertText(line + '\n')
cursor.movePosition(MO.Left)
else:
cursor.deleteChar()
cursor.movePosition(MO.EndOfLine)
cursor.insertText('\n' + line)
cursor.endEditBlock()
# Cursor is at the end of the moved lines.
# Set moved lines as selection
cursor.movePosition(MO.Left, MM.KeepAnchor, size)
self.setTextCursor(cursor)
elif key in (K.Key_Up, K.Key_Down, K.Key_PageUp, K.Key_PageDown) \
and (mod == KM.NoModifier or mod == KM.ShiftModifier):
# Move cursor up/down
# Maintain the column position, even when the current row doesn't
# have as many characters. Reset this behavior as soon as a
# left/right home/end action is made or whenever the text is
# changed.
# Set up operation
anchor = (
MM.KeepAnchor if mod == KM.ShiftModifier else MM.MoveAnchor)
operation = (MO.PreviousBlock if key in (K.Key_Up, K.Key_PageUp)
else MO.NextBlock)
n = 1 if key in (K.Key_Up, K.Key_Down) else (
self._blocks_per_page - 3)
# Move
cursor = self.textCursor()
if self._last_column is None:
# Update "smart" column
self._last_column = cursor.positionInBlock()
if cursor.movePosition(operation, anchor, n):
column = min(cursor.block().length() - 1, self._last_column)
cursor.setPosition(cursor.position() + column, anchor)
else:
# Up/Down beyond document start/end? Move cursor to document
# start/end and update last column
if operation == MO.NextBlock:
cursor.movePosition(MO.EndOfBlock, anchor)
else:
cursor.movePosition(MO.StartOfBlock, anchor)
self._last_column = cursor.positionInBlock()
self.setTextCursor(cursor)
elif key in (K.Key_Left, K.Key_Right, K.Key_End) and not (
mod & KM.AltModifier):
# Allow all modifiers except alt
# Reset smart up/down behavior
self._last_column = None
# Pass to parent class
super().keyPressEvent(event)
elif key == K.Key_Insert and mod == KM.NoModifier:
# Insert/replace
self.setOverwriteMode(not self.overwriteMode())
else:
# Default keyboard shortcuts / functions:
# Backspace OK
# Delete OK
# Control+C OK
# Control+V OK
# Control+X OK
# Control+Insert OK
# Shift+Insert OK
# Shift+Delete OK
# Control+Z OK
# Control+Y OK
# LeftArrow Overwritten (maintained)
# RightArrow Overwritten (maintained)
# UpArrow Overwritten (maintained)
# DownArrow Overwritten (maintained)
# Control+RightArrow OK (Jump to next word)
# Control+LeftArrow OK (Jump to previous word)
# Control+UpArrow Removed
# Control+Down Arrow Removed
# PageUp Overwritten (maintained)
# PageDown Overwritten (maintained)
# Home Overwritten (maintained)
# End Overwritten (maintained)
# Control+Home Overwritten (maintained)
# Control+End Overwritten (maintained)
# Alt+Wheel OK (Horizontal scrolling)
# Control+Wheel OK (Fast scrolling)
# Control+K Removed
# Not listed, but very important:
# Shift-Enter Starts new line within the same block!
# Definitely removed
# Ctrl-i Undocumented, but inserts tab...
ctrl_ignore = (K.Key_K, K.Key_I)
if mod == KM.ControlModifier and key in ctrl_ignore:
# Control-K: ignore
pass
elif key == K.Key_Up or key == K.Key_Down:
# Up/down with modifiers: ignore
pass
else:
# Let parent class handle it
super().keyPressEvent(event)
def _line_number_area_width(self):
""" Returns the required width for the number area. """
text = str(max(1, self.blockCount()))
try:
# https://doc.qt.io/qt-5/qfontmetrics.html#horizontalAdvance
# Qt 5.5.11 and onwards
return 8 + self.fontMetrics().horizontalAdvance(text)
except AttributeError:
return 8 + self.fontMetrics().width(text)
def _line_number_area_paint(self, area, event):
""" Repaints the line number area. """
# Area to repaint
rect = event.rect()
etop = rect.top()
ebot = rect.bottom()
# Font metrics
metrics = self.fontMetrics()
height = metrics.height()
width = area.width()
# Create painter, set font color
painter = QtGui.QPainter(area)
painter.fillRect(rect, self._palette.button())
painter.setPen(self._palette.buttonText().color())
# Get top and bottom of first visible block
block = self.firstVisibleBlock()
geom = self.blockBoundingGeometry(block)
btop = int(geom.translated(self.contentOffset()).top())
bbot = int(btop + geom.height())
# Iterate over visible blocks
count = block.blockNumber()
while block.isValid() and btop <= ebot:
count += 1
if block.isVisible() and bbot >= etop:
painter.drawText(0, btop, width - 4, height,
Qt.AlignmentFlag.AlignRight, str(count))
block = block.next()
btop = bbot
bbot += int(self.blockBoundingRect(block).height())
def paintEvent(self, e):
""" Paints this editor. """
# Paint the editor
super().paintEvent(e)
# Paint a line between the editor and the line number area
x = int(
self.contentOffset().x()
+ self.document().documentMargin()
+ self._line_offset
)
p = QtGui.QPainter(self.viewport())
p.setPen(QtGui.QPen(QtGui.QColor('#ddd')))
rect = e.rect()
p.drawLine(x, rect.top(), x, rect.bottom())
def replace(self, text):
"""
Replaces the current text with the given text, in a single operation
that does not reset undo/redo.
"""
self.selectAll()
cursor = self.textCursor()
cursor.beginEditBlock()
cursor.removeSelectedText()
self.appendPlainText(str(text))
cursor.endEditBlock()
def resizeEvent(self, event):
""" Qt event: Editor is resized. """
super().resizeEvent(event)
# Update line number area
rect = self.contentsRect()
self._line_number_area.setGeometry(
rect.left(), rect.top(),
self._line_number_area_width(), rect.height())
# Set number of "blocks" per page
font = self.fontMetrics()
self._blocks_per_page = int(rect.height() / font.height())
def set_cursor(self, pos):
"""
Changes the current cursor to the given position and scrolls so that
its visible.
"""
cursor = self.textCursor()
cursor.setPosition(pos)
self.setTextCursor(cursor)
self.centerCursor()
def set_text(self, text):
""" Replaces the text in this editor. """
if text:
self.setPlainText(str(text))
else:
# Bizarre workaround for bug:
# https://bugreports.qt.io/browse/QTBUG-42318
self.selectAll()
cursor = self.textCursor()
cursor.removeSelectedText()
doc = self.document()
doc.clearUndoRedoStacks()
doc.setModified(False)
def _text_has_changed(self):
"""
Called whenever the text has changed, resets the smart up/down
behavior.
"""
self._last_column = None
def toggle_comment(self):
""" Comments or uncomments the selected lines """
# Comment or uncomment selected lines
cursor = self.textCursor()
start, end = cursor.selectionStart(), cursor.selectionEnd()
doc = self.document()
first, last = doc.findBlock(start), doc.findBlock(end)
# Determine minimum indent and adding or removing
block = first
blocks = [first]
while block != last:
block = block.next()
blocks.append(block)
lines = [block.text() for block in blocks]
indent = [len(t) - len(t.lstrip()) for t in lines if len(t) > 0]
indent = min(indent) if indent else 0
remove = True
for line in lines:
if line[indent:indent + 1] != '#':
remove = False
break
cursor.beginEditBlock()
if remove:
for block in blocks:
p = block.position() + indent
cursor.setPosition(p)
cursor.setPosition(
p + 1, QtGui.QTextCursor.MoveMode.KeepAnchor)
cursor.removeSelectedText()
else:
for block in blocks:
p = block.position()
n = len(block.text())
if len(block.text()) < indent:
cursor.setPosition(p)
cursor.setPosition(
p + n, QtGui.QTextCursor.MoveMode.KeepAnchor)
cursor.removeSelectedText()
cursor.insertText(' ' * indent + '#')
else:
cursor.setPosition(p + indent)
cursor.insertText('#')
cursor.endEditBlock()
def trim_trailing_whitespace(self):
""" Trims all trailing whitespace from this document. """
block = self.document().begin()
cursor = self.textCursor()
cursor.beginEditBlock() # Undo grouping
while block.isValid():
t = block.text()
a = len(t)
b = len(t.rstrip())
if a > b:
cursor.setPosition(block.position() + b)
cursor.setPosition(block.position() + a,
QtGui.QTextCursor.MoveMode.KeepAnchor)
cursor.removeSelectedText()
block = block.next()
cursor.endEditBlock()
class LineNumberArea(QtWidgets.QWidget):
"""
Line number area widget for the editor. All real actions are delegated to
the text area class.
The line number is drawn in the left margin of the :class:`Editor` widget,
the space to do so is created by setting the editor's viewport margins.
"""
def __init__(self, editor):
super().__init__(editor)
self._editor = editor
self._editor.blockCountChanged.connect(self.update_width)
self._editor.updateRequest.connect(self.update_contents)
def paintEvent(self, event):
""" Qt event: Paint this area. """
self._editor._line_number_area_paint(self, event)
def sizeHint(self):
""" Qt event: Suggest a size for this area. """
return QtCore.QSize(self._editor._line_number_area_width(), 0)
def update_contents(self, rect, scroll):
"""
Slot: Invoked when the text editor view has changed and the line
numbers need to be redrawn.
"""
if scroll:
# Scroll
self.scroll(0, scroll)
else:
self.update()
def update_width(self, count):
"""
Slot: Invoked when the number of lines in the text area changed, which
might change the size of the number area.
"""
# Update the editor margins, so that the line number area can be
# painted in the margins.
self._editor.setViewportMargins(
2 + self._editor._line_number_area_width(), 0, 0, 0)
class FindReplaceWidget(QtWidgets.QWidget):
"""
Find/replace widget for :class:`Editor`.
"""
# Signal: Find action happened, update with text
# Attributes: (description)
find_action = QtCore.Signal(str)
def __init__(self, parent, editor):
super().__init__(parent)
self._editor = editor
# Create widgets
self._replace_all_button = QtWidgets.QPushButton('Replace all')
self._replace_all_button.clicked.connect(self.action_replace_all)
self._replace_button = QtWidgets.QPushButton('Replace')
self._replace_button.clicked.connect(self.action_replace)
self._find_button = QtWidgets.QPushButton('Find')
self._find_button.clicked.connect(self.action_find)
self._search_label = QtWidgets.QLabel('Search for')
self._search_field = QtWidgets.QLineEdit()
self._replace_label = QtWidgets.QLabel('Replace with')
self._replace_field = QtWidgets.QLineEdit()
self._case_check = QtWidgets.QCheckBox('Case sensitive')
self._whole_check = QtWidgets.QCheckBox('Match whole word only')
# Create layout
text_layout = QtWidgets.QGridLayout()
text_layout.addWidget(self._search_label, 0, 0)
text_layout.addWidget(self._search_field, 0, 1)
text_layout.addWidget(self._replace_label, 1, 0)
text_layout.addWidget(self._replace_field, 1, 1)
check_layout = QtWidgets.QBoxLayout(
QtWidgets.QBoxLayout.Direction.TopToBottom)
check_layout.addWidget(self._case_check)
check_layout.addWidget(self._whole_check)
button_layout = QtWidgets.QGridLayout()
button_layout.addWidget(self._replace_all_button, 0, 1)
button_layout.addWidget(self._replace_button, 0, 2)
button_layout.addWidget(self._find_button, 0, 3)
layout = QtWidgets.QBoxLayout(
QtWidgets.QBoxLayout.Direction.TopToBottom)
layout.addLayout(text_layout)
layout.addLayout(check_layout)
layout.addLayout(button_layout)
layout.addStretch(1)
self.setLayout(layout)
# Accept keyboard focus on search and replace fields
self._search_field.setEnabled(True)
self._replace_field.setEnabled(True)
def action_find(self):
""" Qt slot: Find (next) item. """
query = self._search_field.text()
if query == '':
self.find_action.emit('No query set')
return
flags = QtGui.QTextDocument.FindFlag(0)
if self._case_check.isChecked():
flags |= QtGui.QTextDocument.FindFlag.FindCaseSensitively
if self._whole_check.isChecked():
flags |= QtGui.QTextDocument.FindFlag.FindWholeWords
if flags:
found = self._editor.find(query, flags)
else:
found = self._editor.find(query)
if found is False:
# Not found? Try from top of document
previous_cursor = self._editor.textCursor()
previous_scroll = self._editor.verticalScrollBar().value()
cursor = self._editor.textCursor()
cursor.setPosition(0)
self._editor.setTextCursor(cursor)
if flags:
found = self._editor.find(query, flags)
else:
found = self._editor.find(query)
if found is False:
self._editor.setTextCursor(previous_cursor)
self._editor.verticalScrollBar().setValue(previous_scroll)
self.find_action.emit('Query not found.')
return
cursor = self._editor.textCursor()
line = 1 + cursor.blockNumber()
char = cursor.selectionStart() - cursor.block().position()
self.find_action.emit(
'Match found on line ' + str(line) + ' char ' + str(char) + '.')
def action_replace(self):
""" Qt slot: Replace found item with replacement. """
query = self._search_field.text()
replacement = self._replace_field.text()
if query == '':
self.find_action.emit('No query set')
return
cursor = self._editor.textCursor()
a, b = cursor.selectedText(), query
if not self._case_check.isChecked():
a, b = a.lower(), b.lower()
if a == b:
cursor.insertText(replacement)
self.action_find()
def action_replace_all(self):
""" Qt slot: Replace all found items with replacement """
query = self._search_field.text()
replacement = self._replace_field.text()
if query == '':
self.find_action.emit('No query set')
return
flags = QtGui.QTextDocument.FindFlag(0)
if self._case_check.isChecked():
flags |= QtGui.QTextDocument.FindFlag.FindCaseSensitively
if self._whole_check.isChecked():
flags |= QtGui.QTextDocument.FindFlag.FindWholeWords
n = 0
found = True
scrollpos = self._editor.verticalScrollBar().value()
grouping = self._editor.textCursor()
grouping.beginEditBlock()
continue_from_top = True
while found:
if flags:
found = self._editor.find(query, flags)
else:
found = self._editor.find(query)
if not found and continue_from_top:
# Not found? Try from top of document
cursor = self._editor.textCursor()
cursor.setPosition(0)
self._editor.setTextCursor(cursor)
if flags:
found = self._editor.find(query, flags)
else:
found = self._editor.find(query)
# Don't keep going round and round
# (This can happen if you replace something with itself, or
# with a different case version of itself in a case-insensitive
# search).
continue_from_top = False
if found:
cursor = self._editor.textCursor()
cursor.insertText(replacement)
n += 1
grouping.endEditBlock()
self._editor.setTextCursor(grouping)
self._editor.verticalScrollBar().setValue(scrollpos)
self.find_action.emit('Replaced ' + str(n) + ' occurrences.')
def activate(self):
""" Updates the contents of the search field and gives it focus. """
cursor = self._editor.textCursor()
if cursor.hasSelection():
self._search_field.setText(cursor.selectedText())
self._search_field.selectAll()
self._search_field.setFocus()
def keyPressEvent(self, event):
""" Qt event: A key-press reaches the widget. """
key = event.key()
if key == Qt.Key.Key_Enter or key == Qt.Key.Key_Return:
self.action_find()
else:
super().keyPressEvent(event)
def load_config(self, config, section):
"""
Loads this search's configuration using the given :class:`ConfigParser`
``config``. Loads all settings from the section ``section``.
"""
if config.has_section(section):
# Find options: case sensitive / whole word
if config.has_option(section, 'case_sensitive'):
self._case_check.setChecked(
config.getboolean(section, 'case_sensitive'))
if config.has_option(section, 'whole_word'):
self._whole_check.setChecked(
config.getboolean(section, 'whole_word'))
def save_config(self, config, section):
"""
Saves this search's configuration using the given :class:`ConfigParser`
``config``. Stores all settings in the section ``section``.
"""
config.add_section(section)
# Find options: case sensitive / whole word
config.set(section, 'case_sensitive', self._case_check.isChecked())
config.set(section, 'whole_word', self._whole_check.isChecked())
class ModelHighlighter(QtGui.QSyntaxHighlighter):
"""
Syntax highlighter for ``mmt`` model definitions.
"""
KEYWORD_1 = ['use', 'as']
KEYWORD_2 = ['and', 'or', 'not']
ANNOT_KEYS = ['in', 'bind', 'label']
def __init__(self, document):
super().__init__(document)
# Expressions used to find strings & comments
R = QtCore.QRegularExpression
self._string = R(r'"""')
# Headers
name = r'[a-zA-Z]+[a-zA-Z0-9_]*'
self._rule_head = R(r'^\s*(\[{1,2}' + name + '\]{1,2})')
# Simple rules
self._rules = []
# Numbers
pattern = R(r'\b[+-]?[0-9]*\.?[0-9]+([eE][+-]?[0-9]+)?\b')
self._rules.append((pattern, STYLE_LITERAL))
unit = r'\[([a-zA-Z0-9/^-]|\*)+\]'
self._rules.append((R(unit), STYLE_INLINE_UNIT))
# Keywords
for keyword in self.KEYWORD_1:
self._rules.append((R(r'\b' + keyword + r'\b'), STYLE_KEYWORD_1))
for keyword in self.KEYWORD_2:
self._rules.append((R(r'\b' + keyword + r'\b'), STYLE_KEYWORD_2))
# Meta-data coloring
self._rules_labels = [
R(r'(\s*)(bind)\s+(' + name + ')'),
R(r'(\s*)(label)\s+(' + name + ')'),
]
self._rule_meta = R(r'^\s*(' + name + r':)(\s*)(.+)')
self._rule_var_unit = R(r'^(\s*)(in)(\s*)(' + unit + ')')
# Comment
self._comment = R(r'#')
def _highlight_ok(self, strings, start, length):
""" Checks if the string ``start`` to ``length`` needs formatted. """
for lo, hi in strings:
if lo <= start < hi or lo <= start + length < hi:
return False
return True
def highlightBlock(self, text):
""" Qt: Called whenever a block should be highlighted. """
# To avoid formatting within strings each is stored as a (start, end).
strings = []
# If the start has been handled, set the offset.
offset = 0
# If the end has been handled, chop it off the string
# Multi-line strings are done first, because they overrule a lot of
# things and we can skip formatting if we're inside one.
# Block states: 0=No string, 1=A """ string
self.setCurrentBlockState(0)
# Continuing a multi-line string?
if self.previousBlockState() == 1:
# Search for string stop
ms = self._string.match(text)
if ms.hasMatch():
# Terminate the multi-line string
offset = ms.capturedEnd(0)
self.setFormat(0, offset, STYLE_ANNOT_VAL)
else:
# Whole line in the string
self.setCurrentBlockState(1)
self.setFormat(0, len(text), STYLE_ANNOT_VAL)
return
else:
# Search for string start
ms = self._string.match(text)
if ms.hasMatch():
# Potential start, but check that it's not commented out
start = ms.capturedStart()
mc = self._comment.match(text)
if not (mc.hasMatch() and mc.capturedStart() < start):
# Definitely a string start. See if it ends on this line
me = self._string.match(text, offset=ms.capturedEnd())
if me.hasMatch():
# Terminate the single-line string
end = me.capturedEnd()
self.setFormat(start, end - start, STYLE_ANNOT_VAL)
strings.append((start, end))
else:
# Multi-line string
self.setCurrentBlockState(1)
self.setFormat(start, len(text), STYLE_ANNOT_VAL)
# Comment
i = self._comment.globalMatch(text, offset=offset)
while i.hasNext():
m = i.next()
x = m.capturedStart()
if self._highlight_ok(strings, x, 1):
self.setFormat(x, len(text) - x, STYLE_COMMENT)
text = text[:x]
break
# Rule-based formatting
for (pattern, style) in self._rules:
i = pattern.globalMatch(text, offset=offset)
while i.hasNext():
m = i.next()
x, w = m.capturedStart(), m.capturedLength()
if self._highlight_ok(strings, x, w):
self.setFormat(x, w, style)
# Model and component headers (must be at start of string)
if offset == 0:
m = self._rule_head.match(text)
if m.hasMatch():
x, w = m.capturedStart(1), m.capturedLength(1)
self.setFormat(x, w, STYLE_HEADER)
# Variable units (must be at start of string)
if offset == 0:
m = self._rule_var_unit.match(text)
if m.hasMatch():
self.setFormat(
m.capturedStart(2), m.capturedLength(2), STYLE_ANNOT_KEY)
self.setFormat(
m.capturedStart(4), m.capturedLength(4), STYLE_ANNOT_VAL)
# Binds and labels
for pattern in self._rules_labels:
i = pattern.globalMatch(text, offset=offset)
while i.hasNext():
m = i.next()
x, w = m.capturedStart(), m.capturedLength()
if self._highlight_ok(strings, x, w):
self.setFormat(m.capturedStart(2), m.capturedLength(2),
STYLE_ANNOT_KEY)
self.setFormat(m.capturedStart(3), m.capturedLength(3),
STYLE_ANNOT_VAL)
# Meta properties (must be at start of string)
if offset == 0:
m = self._rule_meta.match(text)
if m.hasMatch():
self.setFormat(
m.capturedStart(1), m.capturedLength(1), STYLE_ANNOT_KEY)
# Don't reformat strings (or bits after string end!)
if m.captured(3)[:3] != '"""':
self.setFormat(m.capturedStart(3), m.capturedLength(3),
STYLE_ANNOT_VAL)
class ProtocolHighlighter(QtGui.QSyntaxHighlighter):
"""
Syntax highlighter for ``mmt`` protocol definitions.
"""
def __init__(self, document):
super().__init__(document)
# Headers and units
R = QtCore.QRegularExpression
self._rule_head = R(r'^\s*(\[\[[a-zA-Z0-9_]+\]\])')
# Highlighting rules
self._rules = []
# Numbers
self._rules.append(
(R(r'\b[+-]?[0-9]*\.?[0-9]+([eE][+-]?[0-9]+)?\b'), STYLE_LITERAL))
# Keyword "next"
self._rules.append((R(r'\bnext\b'), STYLE_KEYWORD_1))
# Comments
self._rules.append((R(r'#[^\n]*'), STYLE_COMMENT))
def highlightBlock(self, text):
""" Qt: Called whenever a block should be highlighted. """
# Rule based formatting
for (pattern, style) in self._rules:
i = pattern.globalMatch(text)
while i.hasNext():
m = i.next()
self.setFormat(m.capturedStart(), m.capturedLength(), style)
# Protocol header (must be at strart of string)
m = self._rule_head.match(text)
if m.hasMatch():
self.setFormat(
m.capturedStart(1), m.capturedLength(1), STYLE_HEADER)
class ScriptHighlighter(QtGui.QSyntaxHighlighter):
"""
Syntax highlighter for ``mmt`` script files.
"""
def __init__(self, document):
super().__init__(document)
# Script header
R = QtCore.QRegularExpression
self._rule_head = R(r'^\s*(\[\[[a-zA-Z0-9_]+\]\])')
# Highlighting rules
self._rules = []
# Keywords
import keyword
for kw in keyword.kwlist:
self._rules.append((R(r'\b' + kw + r'\b'), STYLE_KEYWORD_1))
# Built-in essential functions
for func in _PYFUNC:
self._rules.append((R(r'\b' + str(func) + r'\b'), STYLE_KEYWORD_2))
# Literals: numbers, True, False, None
# Override some keywords
self._rules.append((R(r'\b[+-]?[0-9]*\.?[0-9]+([eE][+-]?[0-9]+)?\b'),
STYLE_LITERAL))
self._rules.append((R(r'\bTrue\b'), STYLE_LITERAL))
self._rules.append((R(r'\bFalse\b'), STYLE_LITERAL))
self._rules.append((R(r'\bNone\b'), STYLE_LITERAL))
# Strings
self._s1 = R(r'"')
self._s2 = R(r"'")
self._ms1 = R(r'"""')
self._ms2 = R(r"'''")
self._s_start = R(r'"""|\'\'\'|"|\'')
self._s_end = {
'"': self._s1, "'": self._s2, '"""': self._ms1, "'''": self._ms2}
# Comments
self._comment = R(r'#')
def _highlight_ok(self, strings, start, length):
""" Checks if the string ``start`` to ``length`` needs formatted. """
for lo, hi in strings:
if lo <= start < hi or lo <= start + length < hi:
return False
return True
def highlightBlock(self, text):
""" Qt: Called whenever a block should be highlighted. """
# To avoid formatting within strings each is stored as a (start, end).
strings = []
# If the start has been handled, set the offset.
offset = 0
# If the end has been handled, chop it off the string
# Multi-line strings are done first, because they overrule a lot of
# things and we can skip formatting if we're inside one.
# Block states: 0=No string, 1=A " " " string, 2=A ' ' ' string
self.setCurrentBlockState(0)
# Continuing a multi-line string?
previous = self.previousBlockState()
if previous == 1 or previous == 2:
# Search for string stop
r = self._ms1 if previous == 1 else self._ms2
ms = r.match(text)
if ms.hasMatch():
# Terminate the multi-line string, and increase global offset
offset = ms.capturedEnd(0)
self.setFormat(0, offset, STYLE_LITERAL)
else:
# Whole line in the string
self.setCurrentBlockState(previous)
self.setFormat(0, len(text), STYLE_LITERAL)
return
# Search for string starts (single or multi-line)
stroff = offset # Offset for string start/end searching
m1 = self._s_start.match(text, offset=stroff)
while m1.hasMatch():
stroff = m1.capturedEnd()
start = m1.capturedStart()
# Are we in a comment?
mc = self._comment.match(text)
if (mc.hasMatch() and mc.capturedStart() < start):
# No point searching for further string starts
stroff = len(text)
else:
# Find string end
m2 = self._s_end[m1.captured()].match(text, offset=stroff)
if m2.hasMatch():
stroff = m2.capturedEnd()
# Ignore if escaped
if text[m2.capturedStart() - 1] != '\\':
# Terminate the single line string and move on
self.setFormat(start, stroff - start, STYLE_LITERAL)
strings.append((start, stroff))
elif m1.capturedLength() > 1:
# Multi-line string start. Block finished!
self.setCurrentBlockState(
1 if m1.captured() == '"""' else 2)
self.setFormat(start, len(text) - start, STYLE_LITERAL)
return
# No Match? Then not a string so ignore and continue
m1 = self._s_start.match(text, offset=stroff)
# Comment
i = self._comment.globalMatch(text, offset=offset)
while i.hasNext():
m = i.next()
x = m.capturedStart()
if self._highlight_ok(strings, x, 1):
self.setFormat(x, len(text) - x, STYLE_COMMENT)
text = text[:x]
break
# Script header (must be at start of string)
if offset == 0:
m = self._rule_head.match(text)
if m.hasMatch():
self.setFormat(
m.capturedStart(1), m.capturedLength(1), STYLE_HEADER)
# Rule based formatting
for (pattern, style) in self._rules:
i = pattern.globalMatch(text, offset=offset)
while i.hasNext():
m = i.next()
x, w = m.capturedStart(), m.capturedLength()
if self._highlight_ok(strings, x, w):
self.setFormat(x, w, style)
# List of essential built-in python functions
_PYFUNC = [
'abs()',
'aiter()',
'all()',
'any()',
'anext()',
'ascii()',
'bin()',
'bool()',
'breakpoint()',
'bytearray()',
'bytes()',
'callable()',
'chr()',
'classmethod()',
'compile()',
'complex()',
'delattr()',
'dict()',
'dir()',
'divmod()',
'enumerate()',
'eval()',
'exec()',
'filter()',
'float()',
'format()',
'frozenset()',
'getattr()',
'globals()',
'hasattr()',
'hash()',
'help()',
'hex()',
'id()',
'input()',
'int()',
'isinstance()',
'issubclass()',
'iter()',
'len()',
'list()',
'locals()',
'map()',
'max()',
'memoryview()',
'min()',
'next()',
'object()',
'oct()',
'open()',
'ord()',
'pow()',
'print()',
'property()',
'range()',
'repr()',
'reversed()',
'round()',
'set()',
'setattr()',
'slice()',
'sorted()',
'staticmethod()',
'str()',
'sum()',
'super()',
'tuple()',
'type()',
'vars()',
'zip()',
'__import__()',
]
| myokit/myokit | myokit/gui/source.py | source.py | py | 52,152 | python | en | code | 29 | github-code | 90 |
25593921281 | from urllib.request import build_opener, HTTPCookieProcessor
from http.cookiejar import LWPCookieJar
cookie = LWPCookieJar()
cookie.load('cookie2.txt', ignore_expires=True, ignore_discard=True)
handler = HTTPCookieProcessor(cookie)
opener = build_opener(handler)
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.67 Safari/537.36')]
response = opener.open('https://www.baidu.com')
print(response.read().decode('utf-8')) | zhimin7/web_spider | 01 urllib_demo/cookiejar_demo5.py | cookiejar_demo5.py | py | 506 | python | en | code | 0 | github-code | 90 |
11237399510 | # pylint: disable=missing-function-docstring, missing-module-docstring
# pylint: disable=missing-class-docstring, invalid-name, unused-argument
import http
import unittest
from unittest.mock import patch
from parameterized import parameterized
from shmelegram import app, db
from shmelegram.models import Chat, User, Message, ChatKind
from shmelegram.service import UserService, ChatService, MessageService
from shmelegram.rest_api import (
user as user_api, chat as chat_api, message as message_api
)
db.drop_all()
db.create_all()
user_1 = User(username='ruser1', password="rUser1_")
user_2 = User(username='ruser2', password="rUser2_")
user_3 = User(username='ruser3', password="rUser3_")
chat_group = Chat(kind=ChatKind.GROUP, title='Group 1')
chat_private = Chat(kind=ChatKind.PRIVATE)
chat_group.add_member(user_1)
chat_group.add_member(user_3)
chat_private.add_member(user_2)
chat_private.add_member(user_1)
msg_1 = Message(
from_user=user_1, chat=chat_group, text='text1'
)
msg_2 = Message(
from_user=user_3, chat=chat_group, text='text2'
)
msg_3 = Message(
from_user=user_2, chat=chat_private, text='text2'
)
class ApiBaseTestCase(unittest.TestCase):
def setUp(self):
self.client = app.test_client()
class UserApiTestCase(ApiBaseTestCase):
def test_get_list(self):
mock_return_value = [
UserService.to_json(user) for user in (
user_1, user_2, user_3
)
]
with patch(
'shmelegram.rest_api.user.UserService.get_list', autospec=True,
return_value=mock_return_value
):
response = self.client.get('/api/users?startwith=ruse')
self.assertEqual(response.status_code, http.HTTPStatus.OK)
self.assertEqual(response.json, {'users': mock_return_value})
@parameterized.expand([(user_1, ), (user_2, ), (user_3, )])
def test_get_user(self, mock_return_value: User):
with patch(
'shmelegram.rest_api.user.User.get', autospec=True,
return_value=mock_return_value
):
response = self.client.get('/api/users/1')
self.assertEqual(response.status_code, http.HTTPStatus.OK)
self.assertEqual(
response.json, UserService.to_json(mock_return_value)
)
def test_get_user_failure(self):
with patch(
'shmelegram.rest_api.user.User.get', autospec=True,
side_effect=ValueError()
):
response = self.client.get('/api/users/0')
self.assertEqual(response.status_code, http.HTTPStatus.NOT_FOUND)
self.assertEqual(
response.json, user_api.UserApi.NOT_EXISTS_MESSAGE
)
class ChatApiTestCase(ApiBaseTestCase):
@parameterized.expand([(chat_group, ), (chat_private, )])
def test_get_chat(self, mock_return_value: Chat):
with patch(
'shmelegram.rest_api.chat.Chat.get', autospec=True,
return_value=mock_return_value
):
response = self.client.get('/api/chats/1')
self.assertEqual(response.status_code, http.HTTPStatus.OK)
self.assertEqual(
response.json, ChatService.to_json(mock_return_value)
)
def test_get_chat_failure(self):
with patch(
'shmelegram.rest_api.chat.Chat.get', autospec=True,
side_effect=ValueError()
):
response = self.client.get('/api/chats/0')
self.assertEqual(response.status_code, http.HTTPStatus.NOT_FOUND)
self.assertEqual(
response.json, chat_api.ChatApi.NOT_EXISTS_MESSAGE
)
def test_get_list(self):
mock_return_value = [ChatService.to_json(chat_group)]
with patch(
'shmelegram.rest_api.chat.ChatService.get_list', autospec=True,
return_value=mock_return_value
):
response = self.client.get('/api/chats?startwith=Gro')
self.assertEqual(response.status_code, http.HTTPStatus.OK)
self.assertEqual(response.json, {'chats': mock_return_value})
class MessageApiTestCase(ApiBaseTestCase):
@parameterized.expand([(msg_1, ), (msg_2, ), (msg_3, )])
def test_get_message(self, mock_return_value: Message):
with patch(
'shmelegram.rest_api.message.Message.get', autospec=True,
return_value=mock_return_value
):
response = self.client.get('/api/messages/1')
self.assertEqual(response.status_code, http.HTTPStatus.OK)
self.assertEqual(
response.json, MessageService.to_json(mock_return_value)
)
def test_get_message_failure(self):
with patch(
'shmelegram.rest_api.message.Message.get', autospec=True,
side_effect=ValueError()
):
response = self.client.get('/api/messages/0')
self.assertEqual(response.status_code, http.HTTPStatus.NOT_FOUND)
self.assertEqual(
response.json, message_api.MessageApi.NOT_EXISTS_MESSAGE
)
@parameterized.expand([(chat_group, [msg_1, msg_2]), (chat_private, [msg_3])])
def test_get_chat_messages(self, chat: Chat, mock_return_value: list[Message]):
mock_return_value = [
MessageService.to_json(msg) for msg in mock_return_value
]
with patch(
'shmelegram.rest_api.message.ChatService.get_chat_messages',
autospec=True, return_value=mock_return_value
):
response = self.client.get('/api/messages/chat/1')
self.assertEqual(response.status_code, http.HTTPStatus.OK)
self.assertEqual(response.json, {'messages': mock_return_value})
| Hukyl/shmelegram | tests/test_api.py | test_api.py | py | 5,775 | python | en | code | 1 | github-code | 90 |
13128111659 | import sys
input= sys.stdin.readline
n=int(input())
answer=[]
l=[i for i in range(2,n+1)]
a= [0]+list(map(int, input().split()))
answer.append(1)
index=0
m=a[1]
while 1 != len(a):
index+=m
if len(a)<index:
index=index//m
b=l.pop(index)
answer.append(b)
m=a[index]
print(answer) | koreabeginner96/CodeTest | Simulation/Back_Ballnoon_S3.py | Back_Ballnoon_S3.py | py | 305 | python | en | code | 0 | github-code | 90 |
18543939849 | import numpy as np
N,C = list(map(int, input().split()))
XV = [list(map(int, input().split())) for _ in range(N)]
X = np.array([0] + [x for x,v in XV])
V = np.array([0] + [v for x,v in XV])
V_cw_cumsum = V.cumsum()
right_one_ways = V_cw_cumsum - X
V_ccw_cumsum = V[::-1].cumsum()[::-1]
left_one_ways = V_ccw_cumsum - (C-X)
def reduce_each(func, l):
temp = l.copy()
for i in range(1,len(l)):
temp[i] = func(temp[i-1], temp[i])
return temp
additional_left = np.roll(reduce_each(max, left_one_ways[::-1])[::-1], -1)
additional_left[-1] = 0
right_to_left = V_cw_cumsum - 2*X + additional_left
additional_right = np.roll(reduce_each(max, right_one_ways), 1)
additional_right[0] = 0
left_to_right = V_ccw_cumsum - 2*(C-X) + additional_right
print(max(max(right_one_ways),
max(left_one_ways),
max(right_to_left),
max(left_to_right))) | Aasthaengg/IBMdataset | Python_codes/p03372/s404416571.py | s404416571.py | py | 876 | python | en | code | 0 | github-code | 90 |
18454349399 | s = int(input())
lst = [s]
for i in range(10**6):
if int(lst[-1]) & 1 == 0:
next_a = lst[-1]/2
if next_a in lst:
m = i + 1
break
lst.append(next_a)
else:
next_aa = 3*lst[-1] +1
if next_aa in lst:
m = i + 1
break
lst.append(next_aa)
print(m+1) | Aasthaengg/IBMdataset | Python_codes/p03146/s416013601.py | s416013601.py | py | 348 | python | en | code | 0 | github-code | 90 |
43514284403 | from django.urls import path
from myappF23 import views
app_name = 'myappF23'
# urlpatterns = [
# path(r'', views.index, name='index'),
# path('<int:category_no>/', views.detail, name='detail'),
# path('aboutwebapp/', views.about, name='about')
# ]
urlpatterns = [
path('', views.index, name='index'),
path('about/', views.about, name='about'),
path('<int:category_no>/', views.detail, name='detail'),
path('instructorDetails/<int:instructor_id>/', views.instructor_courses, name='instructor_courses'),
path('courses/', views.courses, name='courses'),
path('courses/<int:course_id>/', views.coursedetail, name='coursedetail'),
path('place_order/', views.place_order, name='place_order'),
path('login/',views.user_login,name='login'),
path('logout/',views.user_logout,name='logout'),
path('myaccount/', views.myaccount, name='myaccount'),
path('set/', views.set_test_cookie, name='set_test_cookie'),
path('check/', views.check_test_cookie, name='check_test_cookie'),
path('delete/', views.delete_test_cookie, name='delete_test_cookie'),
] | Flash-7/DistanceEd | myappF23/urls.py | urls.py | py | 1,105 | python | en | code | 0 | github-code | 90 |
2179604531 | class Solution:
def mergeAlternately(self, word1: str, word2: str) -> str:
result = ""
for char1, char2 in zip(word1, word2):
result += char1 + char2
len1 = len(word1)
len2 = len(word2)
if len1 < len2:
result += word2[len1:len2]
elif len1 > len2:
result += word1[len2:len1]
return result
| wlyu1208/Leet-Code | 1769_Minimum_Number_of_Operations_to_Move_All_Balls_to_Each_Box/code.py | code.py | py | 411 | python | en | code | 1 | github-code | 90 |
18180636519 | #D
N=int(input())
X=str(input())
CNT=0
for i in range(N):
if X[i]=="1":
CNT+=1
NUM=int(X,2)
to0_cnt=[0 for i in range(N)]
for i in range(1,N):
ans=0
num=i
while True:
cnt=0
for j in bin(num)[2:]:
if j=="1":
cnt+=1
if cnt==0:
break
r=num%cnt
ans+=1
if r==0:
break
num=r
to0_cnt[i]=ans
R=[NUM%(CNT+1),NUM%(CNT-1) if CNT!=1 else 0]
for i in range(N):
if X[i]=="0":
cnt=CNT+1
r=(R[0]+pow(2,N-i-1,cnt))%cnt
else:
cnt=CNT-1
if NUM-pow(2,N-i-1)==0:
print(0)
continue
r=(R[1]-pow(2,N-i-1,cnt))%cnt
ans=1+to0_cnt[r]
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02609/s269902866.py | s269902866.py | py | 760 | python | en | code | 0 | github-code | 90 |
34842111844 | """ creates a binary search tree """
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
def __repr__(self):
return f"{self.data}"
class BST:
def __init__(self):
pass
def insert(self, root, node):
# if there is no root
if root is None:
root = node
print(f'{node.data} inserted at root')
# if the data is less than root search for left node
else:
if root.data > node.data:
if root.left is None:
root.left = node
print(f'{node.data} inserted at left')
else:
self.insert(root.left, node)
else:
if root.right is None:
root.right = node
print(f'{node.data} inserted at right')
else:
self.insert(root.right, node)
def inorder(self, root):
if root is None:
return
self.inorder(root.left)
print(root)
self.inorder(root.right)
def postorder(self, root):
if root is None:
return
self.postorder(root.left)
self.postorder(root.right)
print(root)
def preorder(self, root):
if root is None:
return
print(root)
self.postorder(root.left)
self.postorder(root.right)
if __name__ == "__main__":
n1 = Node(1)
n2 = Node(2)
n3 = Node(3)
n4 = Node(4)
n5 = Node(5)
# print(n1)
bst = BST()
bst.insert(n1, n2)
bst.insert(n1, n3)
bst.insert(n1, n4)
bst.insert(n1, n5)
bst.inorder(n1)
print("*" * 5)
bst.postorder(n1)
print("*" * 5)
bst.preorder(n1)
| ds-praveenkumar/python-tutorial | datastructures/bst.py | bst.py | py | 1,846 | python | en | code | 0 | github-code | 90 |
9139791151 | class Solution:
def merge(self, intervals):
#O(nlogn)
intervals.sort(key = lambda i: i[0])
output = [intervals[0]]
for start, end in intervals[1:]:
lastEnd = output[-1][1]
if start <= lastEnd:
output[-1][1] = max(lastEnd, end)
else:
output.append([start, end])
return output
sol = Solution()
print(sol.merge(intervals = [[1,3],[2,6],[8,10],[15,18]])) | sohbanm/LeetCode | Lists/Merge Intervals.py | Merge Intervals.py | py | 467 | python | en | code | 1 | github-code | 90 |
6126119750 | from flask import Flask, render_template, request, redirect, url_for
app = Flask(__name__)
email=""
@app.route("/")
def index():
return render_template("index.html")
@app.route("/signin", methods=['POST', 'GET'])
def signin():
if request.method == "POST":
try:
global email
email = request.form["email"]
password = request.form["password"]
# print(email)
# print(password)
return redirect(url_for("home"), code=302)
except:
return render_template("signin.html")
elif request.method == "GET":
return render_template("signin.html")
else:
return render_template("signin.html")
@app.route("/home")
def home():
print(f"email is {email}")
return render_template("home.html", username=email)
@app.route("/register")
def register():
return "you are now attempting to register a new user"
app.run(host="0.0.0.0", debug=True) | incub4t0r/signin_example | app.py | app.py | py | 967 | python | en | code | 0 | github-code | 90 |
45134205093 | import os
def create_data(data_dir_path, data_file_path):
if not os.path.exists(data_dir_path):
os.makedirs(data_dir_path)
open(data_dir_path, 'w').close()
elif not os.path.exists(data_file_path):
open(data_file_path, 'w').close()
def mess_menue(full_course, data_dir_path):
print(" ===== You are in the main menue. =====")
print("# You currently have {} recipe(s) register.".format(len(full_course)))
print("# What would you like to do?")
print(" - type 1 if you want to pass an Order.")
print(" - type 2 if you want to update from the json file in {}.".format(data_dir_path))
print(" - type 3 if you want to print the list of recipe.")
print(' - type 0 if you want to leave.')
print("\n")
def wrong_choice_loop(choice, exclude_list, error_message=None, input_message=None):
while choice not in exclude_list:
if isinstance(error_message, str):
print(error_message)
if isinstance(input_message, str):
choice = input(input_message)
else:
choice = input('Enter input')
return choice
def lists_match(list1, list2):
return len(list2) == len(set(list2).intersection(list1)) | runinrunin/cooking_helper | src/utils.py | utils.py | py | 1,234 | python | en | code | 0 | github-code | 90 |
41586807948 | from jira import JIRA
from time import sleep
import datetime
from sys import exit
from tkinter.filedialog import askopenfilename, asksaveasfilename
import tkinter as tk
from tkinter import *
from openpyxl import Workbook
from openpyxl.styles import Font, PatternFill
import os
import json
# This should be your JIRA instance URL, if you don't want to use Proxy
# os.environ['NO_PROXY'] = 'jira.com'
# Excel configs
red_font = Font(color='00FF0000', italic=True)
header_font = Font(color='00000000', bold=True)
header_fill = PatternFill(fill_type="solid", fgColor="8db5e2")
hyperlink = Font(underline='single', color='0563C1')
# Default Excel columns configuration
excel_columns = {'ID': {'index': 0, 'visible': 1, 'name': 'ID'},
'Type': {'index': 1, 'visible': 1, 'name': 'Type'},
'Summary': {'index': 2, 'visible': 1, 'name': 'Summary'},
'Components': {'index': 3, 'visible': 1, 'name': 'Component/s'},
'Status': {'index': 4, 'visible': 1, 'name': 'Status'},
'fixVersions': {'index': 5, 'visible': 1, 'name': 'Fix Versions'},
'Reporter': {'index': 6, 'visible': 1, 'name': 'Reporter'},
'Assignee': {'index': 7, 'visible': 1, 'name': 'Assignee'},
'Labels': {'index': 8, 'visible': 1, 'name': 'Labels'},
'Due Date': {'index': 9, 'visible': 1, 'name': 'Due Date'},
'Parent': {'index': 10, 'visible': 1, 'name': 'Parent'},
'Priority': {'index': 11, 'visible': 1, 'name': 'Priority'},
'Created': {'index': 12, 'visible': 1, 'name': 'Created'},
'Updated': {'index': 13, 'visible': 1, 'name': 'Updated'},
'Linked Issues': {'index': 14, 'visible': 1, 'name': 'Linked Issues'},
'Description': {'index': 15, 'visible': 1, 'name': 'Description'},
}
# For Aggregated Excel Sheet, if applicable
aggregated_sheet = {'name': 'Aggregated',
'visible': 0
}
# Program configs - defaults
override_checkbox = 0
config_file = 'config.json'
report_name = 'JIRA Export.xlsx'
zoom_scale = 90
jira_sheet_title = 'Items from JIRA'
jira_base_url = 'https://issuetracking.jira.com/jira'
jql = 'issuetype = Story'
# Creation of Excel in-memory
wb = Workbook()
# Name of the reporting Excel with creation time in UTC timezone
time_format = "%Y-%m-%dT%H:%M:%S"
now = datetime.datetime.strptime(str(datetime.datetime.utcnow().isoformat()).split(".", 1)[0], time_format)
default_output_excel = report_name.split('.xls')[0] + '_' + str(now).replace(':', '-').replace(' ', '_') + '_UTC.xlsx'
output_excel = default_output_excel
JIRAs_column = 0
# Formatting functions
def get_str_from_lst(lst, sep=','):
"""This function returns list as comma separated string - for exporting in excel"""
st = ""
for l in lst:
if l != '':
st += str(l).strip() + sep + ' '
st = st[0:-2]
return st
def get_visible_columns():
visible_columns = []
for v in excel_columns.values():
if v['visible'] == 1:
visible_columns.append(v['index'])
return visible_columns
# Working with Excel files
def select_output_file():
global output_excel
dir_name = os.getcwd()
output_excel = asksaveasfilename(initialdir=dir_name, title="Select file", filetypes=(("JIRA list with details", ".xlsx"), ("all files", "*.*")))
if not output_excel.endswith('.xlsx'):
output_excel += '.xlsx'
out_xls.delete(0, END)
out_xls.insert(0, output_excel)
def create_excel_sheet(sheet_data, title):
global jira_base_url
wb.create_sheet(title)
ws = wb.get_sheet_by_name(title)
start_column = 1
start_row = 1
visible_cols = get_visible_columns()
# Creating Excel sheet based on data
for i in range(len(sheet_data)):
for y in range(len(sheet_data[i])):
if y in visible_cols:
if (y == excel_columns['ID']['index'] and start_row != 1 and sheet_data[i][y] is not None and sheet_data[i][y] != ''):
ws.cell(row=start_row, column=start_column+y).hyperlink = jira_base_url + '/browse/' + sheet_data[i][y]
ws.cell(row=start_row, column=start_column+y).font = hyperlink
try:
ws.cell(row=start_row, column=start_column+y).value = sheet_data[i][y]
except:
converted_value = ''
for letter in sheet_data[i][y]:
if letter.isalpha() or letter.isnumeric() or letter in [' ', ',', '.', '&', ':', ';', '"', "'", '/']:
converted_value += letter
else:
converted_value += '?'
ws.cell(row=start_row, column=start_column+y).value = converted_value
start_row += 1
for y in range(1, ws.max_column+1):
ws.cell(row=1, column=y).fill = header_fill
ws.cell(row=1, column=y).font = header_font
ws.title = title
def remove_columns():
start_column = 1
for ws in wb.worksheets:
# Removing columns
cols_to_remove = []
for v in excel_columns.values():
if v['visible'] == 0:
cols_to_remove.append(start_column+v['index'])
cols_to_remove.sort(reverse=True)
for z in cols_to_remove:
ws.delete_cols(z)
# Updating width of columns
for column_cells in ws.columns:
length = max(len(str(cell.value)) for cell in column_cells)
if length > 80:
ws.column_dimensions[column_cells[0].column_letter].width = 80
else:
ws.column_dimensions[column_cells[0].column_letter].width = length + 4
ws.auto_filter.ref = ws.dimensions
def save_file():
global input_excel, output_excel
# Saving Excel file and removing not required sheets
sheet_names = wb.sheetnames
for s in sheet_names:
ws = wb.get_sheet_by_name(s)
if ws.dimensions == 'A1:A1':
wb.remove_sheet(wb[s])
try:
if output_excel == '':
time_format = "%Y-%m-%dT%H:%M:%S"
now = datetime.datetime.strptime(str(datetime.datetime.utcnow().isoformat()).split(".", 1)[0], time_format)
output_excel = input_excel.split('.xls')[0] + '_' + str(now).replace(':', '-').replace(' ', '_') + '_UTC.xlsx'
set_zoom(output_excel)
print("File \"", output_excel, "\" successfully generated.", sep='')
print()
sleep(2)
exit()
except Exception as e:
print()
print("ERROR:", e)
os.system("pause")
exit()
def set_zoom(file, zoom_scale=90):
for ws in wb.worksheets:
ws.sheet_view.zoomScale = zoom_scale
wb.save(file)
def get_columns():
global excel_columns
excel_columns_list = ['' for i in range(len(excel_columns))]
for v in excel_columns.values():
excel_columns_list[v['index']] = v['name']
return excel_columns_list
def get_issues_by_jql(jql):
"""This function returns list of JIRA keys for provided list of JIRA JQL queries"""
auth_jira = JIRA(jira_base_url)
issues, items = ([], [])
start_idx, block_num, block_size = (0, 0, 100)
while True:
start_idx = block_num * block_size
tmp_issues = auth_jira.search_issues(jql_str=jql, startAt=start_idx, maxResults=block_size, fields='key, issuetype')
if len(tmp_issues) == 0:
# Retrieve issues until there are no more to come
break
issues.extend(tmp_issues)
block_num += 1
items = list(set([i.key for i in issues]))
items = [[i] for i in items]
return items
def main_program():
global output_excel, issues, input_excel, report_name, jql, aggregated_sheet, jira_base_url
output_excel = out_xls.get().strip()
if not output_excel.endswith('.xlsx'):
output_excel += '.xlsx'
report_name = output_excel
jira_base_url = jira_instance.get().strip()
jql = j_query.get().strip()
config_file = conf.get().strip().split('.json')[0] + '.json'
if override_checkbox == 1:
save_config(configfile=config_file)
master.destroy()
if jira_base_url == '':
print("JIRA URL has not been entered. Program stopped.")
os.system("pause")
exit()
try:
jira = JIRA(jira_base_url)
except Exception as er:
print("Exception with JIRA connection: {}".format(er))
print("Program stopped.")
os.system("pause")
exit()
issues = {}
if jql != '':
issues[jira_sheet_title] = get_issues_by_jql(jql)
else:
print("JQL has not been entered. Program stopped.")
os.system("pause")
exit()
updated_issues = {}
for k, v in issues.items():
updated_issues[k] = []
try:
updated_issues[k].append(get_columns())
except:
pass
print("Metadata for Issues is downloading from JIRA...")
for k, v in issues.items():
n = 0
print("Total JIRA issues for '{}' sheet to be processed: {}".format(k, len(v)))
for i in range(len(v)):
n += 1
details = ['' for i in range(len(excel_columns))]
if v[i][JIRAs_column] != '':
details[excel_columns['ID']['index']] = v[i][JIRAs_column]
try:
issue = jira.issue(v[i][JIRAs_column])
details[excel_columns['Type']['index']] = issue.fields.issuetype.name
details[excel_columns['Summary']['index']] = issue.fields.summary
details[excel_columns['Description']['index']] = str('' if issue.fields.description is None else issue.fields.description.replace('\\\\', '_x000D_'))
# Components update
components = get_str_from_lst([i.name for i in issue.fields.components])
details[excel_columns['Components']['index']] = components
# Labels update
labels = get_str_from_lst([i for i in issue.fields.labels])
details[excel_columns['Labels']['index']] = labels
details[excel_columns['Status']['index']] = issue.fields.status.name
details[excel_columns['fixVersions']['index']] = get_str_from_lst([i.name for i in issue.fields.fixVersions])
details[excel_columns['Reporter']['index']] = get_str_from_lst('' if issue.fields.reporter is None else issue.fields.reporter.displayName for i in range(1))
details[excel_columns['Assignee']['index']] = get_str_from_lst('' if issue.fields.assignee is None else issue.fields.assignee.displayName for i in range(1))
details[excel_columns['Due Date']['index']] = issue.fields.duedate
try:
parent = get_str_from_lst('' if issue.fields.parent is None else issue.fields.parent)
except:
parent = ''
details[excel_columns['Parent']['index']] = parent
details[excel_columns['Priority']['index']] = issue.fields.priority.name
details[excel_columns['Created']['index']] = issue.fields.created.split('T')[0]
details[excel_columns['Updated']['index']] = issue.fields.updated.split('T')[0]
l = []
for link in issue.fields.issuelinks:
if hasattr(link, "outwardIssue"):
l.append(link.type.outward + ' ' + link.outwardIssue.key)
if hasattr(link, "inwardIssue"):
l.append(link.type.inward + ' ' + link.inwardIssue.key)
details[excel_columns['Linked Issues']['index']] = get_str_from_lst(l)
# Extend list for Excel export
updated_issues[k].append(details)
except Exception as e:
print("Exception '{}' for retrieving JIRA details for JIRA_ID: {}".format(e, v[i][JIRAs_column]))
if n % 100 == 0:
print("Processed {} issues out of {} so far.".format(n, len(v)))
print("Metadata for issues was successfully downloaded from JIRA.")
print()
# First sheet - all data aggregated
all_issues = [get_columns()]
unique_issues = []
duplicates = set()
for k, v in updated_issues.items():
dd = []
for i in range(1, len(v)):
if v[i][excel_columns['ID']['index']] not in unique_issues:
unique_issues.append(v[i][excel_columns['ID']['index']])
temp = v[i]
temp.append(k)
dd.append(temp)
duplicates.add((v[i][excel_columns['ID']['index']], k))
all_issues.extend(dd)
if aggregated_sheet['visible'] == 1:
create_excel_sheet(all_issues, aggregated_sheet['name'])
# Existing sheets - placed after first one
for k, v in updated_issues.items():
create_excel_sheet(v, k)
remove_columns()
# Saving Excel file
save_file()
def load_config(configfile=config_file):
global jira_base_url, excel_columns, aggregated_sheet, jira_sheet_title, report_name, jql, output_excel
if os.path.exists(configfile) is True:
try:
with open(configfile) as json_data_file:
data = json.load(json_data_file)
for k, v in data.items():
if k == 'jira_base_url':
jira_base_url = v
elif k == 'jql':
jql = v
elif k == 'excel_columns':
excel_columns = v
# elif k == 'aggregated_sheet': #TODO
# aggregated_sheet = v
elif k == 'jira_sheet_title':
jira_sheet_title = v
elif k == 'report_name':
report_name = v
output_excel = v
print("Configuration file '{}' has been successfully loaded.".format(configfile))
except Exception as er:
print("Failed to load file '{}', due to Exception: '{}'".format(configfile, er))
if override_checkbox == 1:
print("Configuration file is corrupted. Default '{}' would be created instead.".format(configfile))
print()
save_config()
else:
print("Config File not found. Default '{}' would be created.".format(configfile))
print()
save_config()
def save_config(configfile=config_file):
data = {'jira_base_url': jira_base_url,
'jql': jql,
'jira_sheet_title': jira_sheet_title,
# 'aggregated_sheet': aggregated_sheet,
'report_name': report_name,
'zoom_scale': zoom_scale,
'excel_columns': excel_columns,
}
if configfile == '.json':
time_format = "%Y-%m-%dT%H:%M:%S"
now = datetime.datetime.strptime(str(datetime.datetime.utcnow().isoformat()).split(".", 1)[0], time_format)
configfile = 'config' + '_' + str(now).replace(':', '-').replace(' ', '_') + '_UTC.json'
try:
with open(configfile, 'w') as outfile:
json.dump(data, outfile)
except PermissionError as er:
print("ERROR: File '{}' has been opened for editing and can't be saved. Exception: {}".format(configfile, er))
return
except Exception as ex:
print("ERROR: '{}' can't be saved. Exception: {}".format(configfile, ex))
return
print("Config file '{}' has been created.".format(configfile))
print()
def add_aggrigate_list(*args):
global aggregated_sheet
aggregated_sheet['visible'] = add_aggregated.get()
def change_override(*args):
global override_checkbox
override_checkbox = override.get()
# Open File dialog to open config file in the same location and refresh UI values
def open_file():
global config_file, input_excel, report_name, aggregated_sheet
dir_name = os.getcwd()
filename = askopenfilename(initialdir=dir_name, title="Select file", filetypes=(("Configuration File", "*.json"),
("all files", "*.*")))
if filename != '':
config_file = filename
conf.delete(0, END)
conf.insert(0, config_file.split('/')[-1])
load_config(configfile=config_file)
j_query.delete(0, END)
j_query.insert(0, jql)
jira_instance.delete(0, END)
jira_instance.insert(0, jira_base_url)
out_xls.delete(0, END)
out_xls.insert(0, report_name)
add_aggregated.set(aggregated_sheet['visible'])
else:
print("No config file was found.")
# ------------------ MAIN PROGRAM -----------------------------------
# load_config()
print("Program started. Please DO NOT CLOSE this window...")
print()
master = tk.Tk()
Title = master.title("JIRA Export Tool")
tk.Label(master, text="Please enter JIRA instance URL and required JQL for export. Specify Report File Name.", font=("Helvetica", 10)).grid(row=0, column=0, pady=5, columnspan=3)
tk.Label(master, text="JIRA Instance URL:").grid(row=2, column=0, pady=2, padx=3)
tk.Label(master, text="JQL for Export:", font=("Helvetica", 9)).grid(row=3, column=0, pady=2, padx=3)
tk.Label(master, text="Report File:").grid(row=4, column=0, pady=2, padx=3)
jira_instance = tk.Entry(master, width=70)
jira_instance.insert(END, jira_base_url)
jira_instance.grid(row=2, column=1, padx=0, sticky=W, columnspan=2)
j_query = tk.Entry(master, width=70)
j_query.insert(END, jql)
j_query.grid(row=3, column=1, pady=5, columnspan=2, sticky=W)
out_xls = tk.Entry(master, width=50)
out_xls.insert(END, output_excel)
out_xls.grid(row=4, column=1, padx=0, sticky=W)
tk.Button(master, text='Browse', command=select_output_file, width=15).grid(row=4, column=2, pady=3, padx=8)
tk.Label(master, text="________________________________________________________________________________________________").grid(row=5, columnspan=3)
add_aggregated = IntVar(value=aggregated_sheet['visible'])
# Checkbutton(master, text="Add Aggregated list as the first one", font=("Helvetica", 9, "italic"), variable=add_aggregated).grid(row=6, sticky=W, padx=20, columnspan=3, pady=0)
# add_aggregated.trace('w', add_aggrigate_list)
override = IntVar()
Checkbutton(master, text="Save current values in the file for future use:", font=("Helvetica", 9, "italic"), variable=override).grid(row=7, sticky=W, padx=20, columnspan=2, pady=0)
override.trace('w', change_override)
conf = tk.Entry(master, width=20)
conf.insert(END, config_file)
conf.grid(row=7, column=0, padx=135, columnspan=3, sticky=E)
tk.Button(master, text='Reload configs', command=open_file, width=15).grid(row=7, column=2, pady=0, padx=15, columnspan=1, stick=W)
tk.Button(master, text='Quit', font=("Helvetica", 9, "bold"), command=master.quit, width=20, heigh=2).grid(row=8, column=0, pady=5, padx=60, columnspan=2, sticky=W)
tk.Button(master, text='Generate Report', font=("Helvetica", 9, "bold"), state='active', command=main_program, width=20, heigh=2).grid(row=8, column=1, pady=10, padx=60, columnspan=2, sticky=E)
tk.mainloop()
| delsakov/JIRA_Tools | Export From JIRA.py | Export From JIRA.py | py | 19,410 | python | en | code | 3 | github-code | 90 |
15660278599 | """ Retrieve slp, uwnd and uflx data for MERRA via ftp from pre-defined
lists of urls. urls were created with the tool at:
http://disc.sci.gsfc.nasa.gov/daac-bin/FTPSubset.pl?LOOKUPID_List=MATMNXSLV
Files arrive in monthly chunks and are concatenated with cdo
.. moduleauthor:: Neil Swart <neil.swart@ec.gc.ca>
"""
import subprocess
import glob
import os
import cdo; cdo = cdo.Cdo()
import mv_to_dest
def get_merra_data(destination='.', src_path='./'):
varnames = ['slp', 'u10m', 'uflx']
for var in varnames:
wget_file = os.path.join(src_path, 'wget_merra_' + var)
subprocess.Popen(['wget', '--content-disposition', '-i', wget_file]).wait()
# time-merge the data
subprocess.Popen(['cdo', 'mergetime', 'MERRA*',
'MERRA_' + var + '.mon.mean.nc']).wait()
# Remove input files
files = glob.glob('*SUB.nc')
for f in files:
os.remove(f)
# Change some variable names
cdo.chname('taux,uflx', input='MERRA_uflx.mon.mean.nc', output='tmp1.nc')
os.rename('tmp1.nc', 'MERRA_uflx.mon.mean.nc')
# move to destination
files = glob.glob('MERRA*.mon.mean.nc')
mv_to_dest.mv_to_dest(destination, *files)
if __name__=='__main__':
get_merra_data(destination='../data/')
| swartn/sam-vs-jet-paper | data_retrieval/merra/get_merra_data.py | get_merra_data.py | py | 1,373 | python | en | code | 3 | github-code | 90 |
72040022696 | # -*- coding: utf-8 -*-
"""
function top_binarizer() was previously used to create the binary matrix `bmc_mat`.
top_binarizer() can be built upon to make a more realistic binary matrix.
because currently it is based on rank of market cap.
in reality, however, it is more complex.
for example, nr 11 will have to beat nr 10 for three consecutive months in order to be a constituent.
"""
def top_binarizer(row, p):
"""
Return 1 if a coin's mcap is within top p
return 0 otherwise
"""
# convert to pandas series. maybe not needed but safer
row = pd.Series(row)
# sort row
row_sorted = row.sort_values(ascending=False)
# find threshold value (e.g 11th value)
threshold = row_sorted[p]
# give a zero if a coin's mcap is less than threshold:
binary = (row.values > threshold).astype('int')
return binary
#threshold = row_sorted[p]
#binary = (row.values > threshold).astype('int')
| jacolind/crinfu | py/function-top_binarizer.py | function-top_binarizer.py | py | 956 | python | en | code | 1 | github-code | 90 |
21655010852 | import requests
from quart import Quart, request
import json
import copy
from datetime import datetime
import moment
import asyncio
import os
from time import sleep
import uuid
from quart_cors import cors
import python_pachyderm
#////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#// Configuration
#////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
pachd_address = "" # usually it'll be the one that ends with 30650
dblend_image = "" # a container image that has all the dependencies for distributed blender (psst! new york times maintains one!)
#////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#// Initializations
#////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
app = Quart(__name__)
app = cors(app, allow_origin="*")
app.config["MAX_CONTENT_LENGTH"] = 100 * 1000 * 1000
client = python_pachyderm.Client.new_from_pachd_address(pachd_address)
@app.route("/uploadBlend", methods=["POST"])
async def uploadBlend():
jobs = []
for name, file in (await request.files).items():
print("Filename: " + name)
t = name.split(".")
t.pop()
prejobfilename = "".join(t) + ".blend"
new_job_id = str(uuid.uuid4())
jobs.append(new_job_id)
new_job_filename = new_job_id + ".blend"
print(new_job_id)
actual_file = file.read()
print("Size: " + str(len(actual_file)))
with open("/var/www/html/blends/" + new_job_filename, "wb") as reader:
reader.write(actual_file)
blends = new_job_id + "-blends"
splitter = new_job_id + "-splitter"
renderer = new_job_id + "-renderer"
merger = new_job_id + "-merger"
watermarker = new_job_id + "-watermarker"
unenczipper = new_job_id + "-unenczipper"
enczipper = new_job_id + "-enczipper"
megazipper = new_job_id + "-megazipper"
client.create_repo(blends)
with client.commit(blends, "master") as commit:
client.put_file_bytes(
commit,
"/" + new_job_id,
open("/var/www/html/blends/" + new_job_filename, "rb"),
)
os.remove("/var/www/html/blends/" + new_job_filename)
client.create_repo(splitter)
client.create_pipeline(
splitter,
transform=python_pachyderm.Transform(
cmd=["python3", "/brorender.py", "split", blends],
image=dblend_image,
image_pull_secrets=["laneonekey"],
),
input=python_pachyderm.Input(
pfs=python_pachyderm.PFSInput(glob="/*", repo=blends)
),
)
client.create_repo(renderer)
client.create_pipeline(
renderer,
transform=python_pachyderm.Transform(
cmd=["python3", "/brorender.py", "render", blends, splitter],
image=dblend_image,
image_pull_secrets=["laneonekey"],
),
input=python_pachyderm.Input(
cross=[
python_pachyderm.Input(
pfs=python_pachyderm.PFSInput(glob="/*", repo=blends)
),
python_pachyderm.Input(
pfs=python_pachyderm.PFSInput(glob="/*/*", repo=splitter)
),
]
),
parallelism_spec=python_pachyderm.ParallelismSpec(coefficient=1),
resource_requests=python_pachyderm.ResourceSpec(cpu=2),
)
client.create_repo(merger)
client.create_pipeline(
merger,
transform=python_pachyderm.Transform(
cmd=["python3", "/brorender.py", "merge", renderer],
image=dblend_image,
image_pull_secrets=["laneonekey"],
),
input=python_pachyderm.Input(
pfs=python_pachyderm.PFSInput(glob="/*/", repo=renderer)
),
)
client.create_repo(enczipper)
client.create_pipeline(
enczipper,
transform=python_pachyderm.Transform(
cmd=["python3", "/brorender.py", "enczip", merger],
image=dblend_image,
image_pull_secrets=["laneonekey"],
),
input=python_pachyderm.Input(
pfs=python_pachyderm.PFSInput(glob="/*/", repo=merger)
),
)
client.create_repo(watermarker)
client.create_pipeline(
watermarker,
transform=python_pachyderm.Transform(
cmd=["python3", "/brorender.py", "watermark", merger],
image=dblend_image,
image_pull_secrets=["laneonekey"],
),
input=python_pachyderm.Input(
pfs=python_pachyderm.PFSInput(glob="/*/", repo=merger)
),
)
client.create_repo(unenczipper)
client.create_pipeline(
unenczipper,
transform=python_pachyderm.Transform(
cmd=["python3", "/brorender.py", "unenczip", watermarker],
image=dblend_image,
image_pull_secrets=["laneonekey"],
),
input=python_pachyderm.Input(
pfs=python_pachyderm.PFSInput(glob="/*/", repo=watermarker)
),
)
client.create_repo(megazipper)
client.create_pipeline(
megazipper,
transform=python_pachyderm.Transform(
cmd=["python3", "/brorender.py", "megazip", enczipper, unenczipper],
image=dblend_image,
image_pull_secrets=["laneonekey"],
),
input=python_pachyderm.Input(
join=[
python_pachyderm.Input(
pfs=python_pachyderm.PFSInput(
glob="/*", repo=unenczipper, branch="master"
)
),
python_pachyderm.Input(
pfs=python_pachyderm.PFSInput(
glob="/*", repo=enczipper, branch="master"
)
),
]
),
)
# return "Ack!"
return json.dumps(jobs)
@app.route("/uploadZip", methods=["POST"])
async def uploadZip():
for name, file in (await request.files).items():
print("Filename: " + name)
t = name.split(".")
t.pop()
prejobid = "".join(t)
prejobfilename = "".join(t) + ".zip"
actual_file = file.read()
print("Size: " + str(len(actual_file)))
with open("/var/www/html/zips/" + prejobfilename, "wb") as reader:
reader.write(actual_file)
blends = prejobid + "-blends"
splitter = prejobid + "-splitter"
renderer = prejobid + "-renderer"
merger = prejobid + "-merger"
watermarker = prejobid + "-watermarker"
unenczipper = prejobid + "-unenczipper"
enczipper = prejobid + "-enczipper"
megazipper = prejobid + "-megazipper"
client.delete_repo(megazipper)
client.delete_pipeline(megazipper)
client.delete_repo(unenczipper)
client.delete_pipeline(unenczipper)
client.delete_repo(watermarker)
client.delete_pipeline(watermarker)
client.delete_repo(enczipper)
client.delete_pipeline(enczipper)
client.delete_repo(merger)
client.delete_pipeline(merger)
client.delete_repo(renderer)
client.delete_pipeline(renderer)
client.delete_repo(splitter)
client.delete_pipeline(splitter)
client.delete_repo(blends)
print("Cleaned up job " + prejobid)
return "Ack!"
app.run(host="0.0.0.0", ssl_context=("adhoc")) | theycallmeloki/Edith | interfacer/server.py | server.py | py | 8,054 | python | en | code | 6 | github-code | 90 |
43442611975 | import os
import glob
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import argparse
from torch.utils.data import DataLoader, Subset, Dataset
from torchvision import datasets, transforms
from sklearn.model_selection import train_test_split
from PIL import Image
class SimpleCNN(nn.Module):
def __init__(self):
super(SimpleCNN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 3)
self.conv2 = nn.Conv2d(16, 32, 3)
self.fc1 = nn.Linear(32 * 6 * 6, 128)
self.fc2 = nn.Linear(128, 8)
def forward(self, x):
x = nn.functional.relu(self.conv1(x))
x = nn.functional.max_pool2d(x, 2)
x = nn.functional.relu(self.conv2(x))
x = nn.functional.max_pool2d(x, 2)
x = x.view(-1, 32 * 6 * 6)
x = nn.functional.relu(self.fc1(x))
x = self.fc2(x)
return nn.functional.log_softmax(x, dim=1)
class ImageTestDataset(Dataset):
def __init__(self, root, transform=None):
self.root = root
self.transform = transform
self.image_files = glob.glob(os.path.join(root, "*.jpg"))
def __len__(self):
return len(self.image_files)
def __getitem__(self, idx):
image = Image.open(self.image_files[idx])
if self.transform:
image = self.transform(image)
return image, os.path.basename(self.image_files[idx])
def load_and_split_datasets(transform):
dataset = datasets.ImageFolder(root=DATASET_DEVELOPMENT_DIR_PATH, transform=transform)
train_idx, val_idx = train_test_split(list(range(len(dataset))), test_size=0.2, random_state=42)
train_dataset = Subset(dataset, train_idx)
val_dataset = Subset(dataset, val_idx)
return train_dataset, val_dataset, {v: k for k, v in dataset.class_to_idx.items()}
def train_and_validate_model(model, train_loader, val_loader):
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
n_epochs = 5
for epoch in range(n_epochs):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
print(f"Epoch [{epoch+1}/{n_epochs}], Step [{batch_idx+1}/{len(train_loader)}], Loss: {loss.item():.4f}")
model.eval()
correct = 0
total = 0
with torch.no_grad():
for data, target in val_loader:
output = model(data)
_, predicted = torch.max(output.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
print(f'Validation Accuracy: {100 * correct / total:.2f}%')
torch.save(model, "model.pth")
def test_model_and_generate_predictions(model, test_loader, idx_to_class_mapping):
model.eval()
with open('predictions.txt', 'w') as f:
with torch.no_grad():
for batch_idx, (data, filenames) in enumerate(test_loader):
output = model(data)
_, predicted = torch.max(output.data, 1)
for i in range(len(filenames)):
f.write(f"{filenames[i]} {idx_to_class_mapping[predicted[i].item()]}\n")
print(f"Prediction file generated: predictions.txt")
if __name__ == "__main__":
# Argument parsing
parser = argparse.ArgumentParser(description='PyTorch Training Script')
parser.add_argument('-d', '--development_dir', type=str, required=True, help='Path to the development dataset directory')
parser.add_argument('-t', '--testing_dir', type=str, required=True, help='Path to the testing dataset directory')
args = parser.parse_args()
DATASET_DEVELOPMENT_DIR_PATH = args.development_dir
DATASET_TESTING_DIR_PATH = args.testing_dir
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
])
train_dataset, val_dataset, idx_to_class_mapping = load_and_split_datasets(transform=transform)
model = SimpleCNN()
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False)
train_and_validate_model(model, train_loader, val_loader)
test_dataset = ImageTestDataset(root=DATASET_TESTING_DIR_PATH, transform=transform)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)
test_model_and_generate_predictions(model, test_loader, idx_to_class_mapping)
| simula/presimal-sample-submission | run.py | run.py | py | 4,620 | python | en | code | 0 | github-code | 90 |
9088105650 | import pymysql.cursors
from datetime import datetime
class mysqlUtil(object):
#也可以使用参数进行初始化数据库连接self,host,user,password,db,port,charset
def __init__(self):
self.host="127.0.0.1"
self.user="root"
self.password="123456"
self.db="parking_system"
self.port=3306
self.charset="utf8"
self.connection=self.connection()
if(self.connection):
self.cursor=self.connection.cursor()
#连接数据库
def connection(self):
connec=False
try:
connec=pymysql.Connect(
host=self.host,
user=self.user,
password=self.password,
db=self.db,
port=self.port,
charset=self.charset
)
except Exception as e:
print(e)
connec=False
return connec
#查询方法
def querySql(self,sql):
try:
if sql != "":
rows_count = self.cursor.execute(sql)
self.connection.commit()
if(rows_count > 0):
return self.cursor.fetchall()
else:
return ""
except Exception as e:
print(e)
return ""
#添加方法
def insertSql(self,sql,parameter=[]):
try:
if sql != '':
self.cursor.execute(sql,parameter)
self.connection.commit()
return True
except Exception as e:
print (e)
return False
#修改方法
def updateSql(self,sql):
try:
if sql != "":
self.cursor.execute(sql)
self.connection.commit()
return "修改成功"
except Exception as e:
print(e)
return "修改失败"
#删除方法
def deleteSql(self,sql):
try:
self.cursor.execute(sql)
self.connection.commit()
return True
except Exception as e:
print (e)
return False
#关闭连接
def closeConnec(self):
try:
if type(self.cursor)=='object':
self.cursor.close()
if type(self.connection) =='object':
self.connection.close()
return True
except Exception as e:
print(e)
return False
class Util():
#检查账号
def check_password(self,username, password,is_admin):
self.db = mysqlUtil()
if(is_admin == True):
sql = "select admin_password from admin_table where admin_name = \"" + username + "\""
results = self.db.querySql(sql)
self.db.closeConnec()
if(results == ""):
return False
else:
for row in results:
userpassword = row[0]
if(userpassword == password):
return True
else:
return False
else:
sql = "select tollman_password from tollman_table where tollman_name = \"" + username + "\""
results = self.db.querySql(sql)
self.db.closeConnec()
if(results == ""):
return False
else:
for row in results:
userpassword = row[0]
if(userpassword == password):
return True
else:
return False
# car_number varchar(10),
# entry_time datetime,
#入库
def car_entry(self,car_number):
self.db = mysqlUtil()
nowtime = datetime.now()
#str_to_date(\'%s\','%%Y-%%m-%%d %%H:%%i:%%s')
sql = "select * from parking_car_table where car_number = \'" + car_number + "\'"
results1 = self.db.querySql(sql)
for row in results1 :
get_car_number = row[0]
entry_time = row[1]
if(get_car_number == car_number):
self.db.closeConnec()
return False
else:
sql = "insert into parking_car_table(car_number,entry_time) values(%s,%s)"
a = [car_number,nowtime.strftime("%Y-%m-%d %H:%M:%S")]
re1 = self.db.insertSql(sql,a)
self.db.closeConnec()
return re1
# history_car_number varchar(10),
# history_entry_time varchar(20),
# history_out_time varchar(20),
# history_parking_time int(10),
# history_charge int(10)
#出库
def car_out(self,car_number):
self.db = mysqlUtil()
nowtime = datetime.now()
print(nowtime.strftime("%Y-%m-%d %H:%M:%S"))
sql = "select * from parking_car_table where car_number = \'" + car_number + "\'"
results1 = self.db.querySql(sql)
for row in results1 :
car_number = row[0]
entry_time = row[1]
sql = "DELETE FROM parking_car_table WHERE car_number = \'" + car_number + "\'"
self.db.deleteSql(sql)
#str_to_date(\'%s\','%%Y-%%m-%%d %%H:%%i:%%s')
#计算时间差和价格
entrytime = datetime.strptime(entry_time, "%Y-%m-%d %H:%M:%S")
print((nowtime-entrytime).seconds)
second = (nowtime-entrytime).seconds
days = (nowtime-entrytime).days
hours = second//(60*60)
minites = second%(60*60)//60
parking_history_time = str(days) + "days " + str(hours) + "huors " + str(minites) + "minites"
# lower60 int(10),
# lower300 int(10),
# higher300 int(10)
chargetime = 0
if(minites>0):
chargetime = days * 24 + hours + 1
else:
chargetime = days * 24 + hours
sql = "select * from charge_table"
results2 = self.db.querySql(sql)
for row in results2 :
lower60 = row[0]
lower300 = row[1]
higher300 = row[2]
if(chargetime <= 1):
history_charge = lower60
elif(chargetime<=5):
history_charge = lower60 + (chargetime-1) * lower300
elif(chargetime>5):
history_charge = lower60 + lower300 * 4 + (chargetime-5) * higher300
print(history_charge)
sql = "insert into parking_history_table(history_car_number,history_entry_time,history_out_time,history_parking_time,history_charge) values(%s,%s,%s,%s,%s)"
a = [car_number,entry_time,nowtime.strftime("%Y-%m-%d %H:%M:%S"),parking_history_time,history_charge]
self.db.insertSql(sql,a)
self.db.closeConnec()
return str(days) + "天 " + str(hours) + "小时" + str(minites) + "分钟", history_charge
#设置收费标准,小于1小时,大于1小时小于5小时部分,大于5小时部分
def set_charge(self,lower60,lower300,higher300):
self.db = mysqlUtil()
sql = "select * from charge_table"
results = self.db.querySql(sql)
re1 = False
re2 = False
if(results == ""):
sql = "INSERT INTO charge_table(lower60,lower300,higher300) VALUES (%s, %s, %s);"
a = [lower60,lower300,higher300]
re1 = self.db.insertSql(sql,a)
else:
for row in results :
lower60 = row[0]
sql = "DELETE FROM charge_table WHERE lower60=" + str(lower60)
print(self.db.deleteSql(sql))
sql = "INSERT INTO charge_table(lower60,lower300,higher300) VALUES (%s, %s, %s);"
a = [lower60,lower300,higher300]
re2 = self.db.insertSql(sql,a)
self.db.closeConnec()
if(re1 or re2):
return True
else:
return False
def query_timecar(self,starttime,endtime):
self.db = mysqlUtil()
strstarttime = starttime.strftime("%Y-%m-%d %H:%M:%S")
strendtime = endtime.strftime("%Y-%m-%d %H:%M:%S")
sql = "select * from parking_history_table";
results = self.db.querySql(sql)
car_count = 0
charge = 0
for row in results :
car_number = row[0]
entry_time = row[1]
out_time = row[2]
car_charge = row[4]
if(entry_time>=strstarttime and out_time<=strendtime):
car_count = car_count + 1
charge = charge + car_charge
self.db.closeConnec()
print(car_count)
print(charge)
return car_count,charge
# #测试
# a=datetime.now()
# print('当前时间:',a)
# db = mysqlUtil()
# #插入测试
# #print("------插入测试---------")
# #sql1 = "INSERT INTO admin_table(admin_id,admin_password,admin_name) VALUES (%s, %s, %s);"
# #a = ["18","123456","admin"]
# #db.insertSql(sql1,a)
# #sql12 = "INSERT INTO tollman_table(tollman_id,tollman_password,tollman_name) VALUES (%s, %s, %s);"
# #b = ["18","123456","Alx"]
# #db.insertSql(sql12,b)
# #查询测试
# #print("------查询测试---------")
# #sql2 = "select * from admin_table"
# #results = db.querySql(sql2)
# #print("admin_id","admin_password","admin_name")
# ##遍历结果
# #for row in results :
# # id = row[0]
# # name = row[1]
# # password = row[2]
# # print(id,name,password)
# #删除测试
# #print("------删除测试---------")
# #sql3 = "DELETE FROM admin_table WHERE admin_id=\"18\""
# #db.deleteSql(sql3)
# #print(db.insertSql(sql1,a))
# #results = db.querySql(sql2)
# #print("admin_id","admin_password","admin_name")
# ##遍历结果
# #for row in results :
# # id = row[0]
# # name = row[1]
# # password = row[2]
# # print(id,name,password)
# #db.closeConnec()
# #check_password测试
# print("------check_password测试---------")
# ut = Util()
# t1 = ut.check_password("admin","123456",True)
# if(t1 == True):
# print("yes")
# else:
# print("no")
# t2 = ut.check_password("Alx","123456",False)
# if(t2 == True):
# print("yes")
# else:
# print("no")
# ut.set_charge(11,9,7)
# #测试car_entry
# print("------car_entry测试---------")
# ut.car_entry("113")
# #测试car_out
# print("------car_out测试---------")
# ut.car_out("113")
# #测试set_charge
# print("------set_charge测试---------")
# ut.set_charge(10,9,7)
# #测试query_timecar
# print("------query_timecar测试------")
# stime = datetime(2019, 10, 22, 0, 0, 0)
# print(stime)
# etime = datetime(2019, 10, 23, 0, 0, 0)
# print(etime)
# ut.query_timecar(stime,etime) | zhenglinyi/MyCarPlateRecognition | DataBase.py | DataBase.py | py | 8,682 | python | en | code | 1 | github-code | 90 |
35753854911 | #!/usr/bin/env python
arr=[]
for i in range(5):
name=input()
if len(name)>10:
continue
name=name.replace("FBI","*");
if name.find("*")==-1:
continue
arr.append(i+1)
if arr:
print(*arr)
else:
print("HE GOT AWAY!")
| hansojin/python | string/bj2857.py | bj2857.py | py | 259 | python | en | code | 0 | github-code | 90 |
26545500864 | # https://leetcode.com/problems/kth-largest-element-in-a-stream
import heapq
from typing import List
class KthLargest:
def __init__(self, k: int, nums: List[int]):
self.k = k
self.nums = [num for num in nums]
heapq.heapify(self.nums)
def add(self, val: int) -> int:
heapq.heappush(self.nums, val)
while (len(self.nums) > self.k):
last = heapq.heappop(self.nums)
return self.nums[0]
| peulsilva/leetcode-problems | problems/kth_largest_element_in_a_stream.py | kth_largest_element_in_a_stream.py | py | 473 | python | en | code | 0 | github-code | 90 |
5006342605 | # evolution.py
# (C)2015
# Scott Ernst
from __future__ import \
print_function, absolute_import, \
unicode_literals, division
import numpy as np
from scipy import stats
import pandas as pd
import plotly.plotly as plotly
from plotly import graph_objs as plotlyGraph
from plotly import tools as plotlyTools
#===============================================================================
# P U B L I C
#_______________________________________________________________________________
def generatePercentiles(batting, fields, doPlots =True):
""" Calculates the percentiles and returns a new batting DataFrame with the
additional percentile columns listed in the fields. Plotly plots are
also generated if the doPlots argument is True.
:param batting: DataFrame
:param fields: dict
:param doPlots: bool
:return: DataFrame
"""
batting = batting.copy()
fieldData = []
for key, title in fields.items():
percentileColumnName = 'per' + key
teamPercentileColumnName = 'tper' + key
batting.loc[:, percentileColumnName] = 0.0
batting.loc[:, teamPercentileColumnName] = 0.0
fieldData.append(dict(
sourceColumnName=key,
evolution=dict(
traces=[],
title='Evolution: Yearly %s' % title,
filename='MLB/Yearly-%s-evolution' % title.replace(' ', '-') ),
yearly=dict(
columnName=percentileColumnName,
title='Yearly %s' % title,
filename='MLB/Yearly-%s' % title.replace(' ', '-') ),
team=dict(
columnName=teamPercentileColumnName,
title='Team Yearly %s' % title,
filename='MLB/Team-Yearly-%s' % title.replace(' ', '-') )))
#---------------------------------------------------------------------------
# SLICING
# For each year in the data calculate a percentile field for each field
# definition. Also calculate for each year the percentile in the within
# the player's team.
for year in batting.yearID.unique():
yearSlice = batting[batting.yearID == year].copy()
for entry in fieldData:
columnName = entry['yearly']['columnName']
yearSlice = _calculatePercentilesInSlice(
dataSlice=yearSlice,
sourceColumnName=entry['sourceColumnName'],
targetColumnName=columnName)
if doPlots:
entry['evolution']['traces'].append(plotlyGraph.Box(
y=list(yearSlice[columnName].values),
name='%s' % year))
for teamID in yearSlice.teamID.unique():
columnName = entry['team']['columnName']
teamSlice = _calculatePercentilesInSlice(
dataSlice=yearSlice[yearSlice.teamID == teamID].copy(),
sourceColumnName=entry['sourceColumnName'],
targetColumnName=columnName)
yearSlice = teamSlice.combine_first(yearSlice)
batting = yearSlice.combine_first(batting)
if not doPlots:
return batting
#---------------------------------------------------------------------------
# PLOT RESULTS
# Merge the percentile data back into the batting table and slice out
# the MVP data for plotting
mvps = batting[batting.MVP]
for entry in fieldData:
_plotEvolutionData(entry)
_plotFieldData(columnData=entry, mvps=mvps)
return batting
#===============================================================================
# P R O T E C T E D
#_______________________________________________________________________________
def _calculatePercentilesInSlice(dataSlice, sourceColumnName, targetColumnName):
""" Calculates the percentiles in the slice and adds those values to the
slice's target column. """
values = dataSlice[sourceColumnName].values
mn = values.mean()
std = values.std()
percentiles = []
for value in values:
percentiles.append(100.0*stats.norm.cdf((mn - value)/std))
dataSlice.loc[:, targetColumnName] = pd.Series(
data=np.array(percentiles),
index=dataSlice.index)
return dataSlice
#_______________________________________________________________________________
def _plotEvolutionData(columnData):
columnSubData = columnData['evolution']
d = plotlyGraph.Data(columnSubData['traces'])
l = plotlyGraph.Layout(
title=columnSubData['title'],
showlegend=False)
url = plotly.plot(
plotlyGraph.Figure(data=plotlyGraph.Data(d), layout=l),
filename=columnSubData['filename'],
auto_open=False)
print('EVOLUTION[%s]: %s' % (columnData['sourceColumnName'], url))
#_______________________________________________________________________________
def _plotFieldData(columnData, mvps):
srcKey = columnData['sourceColumnName']
fig = plotlyTools.make_subplots(
rows=2, cols=3,
print_grid=False,
specs=[
[{}, {}, {}],
[{'colspan':2}, None, {}]])
traces = list()
traces.append(_createHistogram(
index=1,
color='blue',
label='Percentile in Year (%)',
data=mvps[columnData['yearly']['columnName']]))
traces.append(_createHistogram(
index=2,
color='purple',
label='Percentile in Team (%)',
data=mvps[columnData['team']['columnName']] ))
traces.append(_createHistogram(
index=3,
color='red',
label='Absolute',
data=mvps[srcKey] ))
traces.append(_createCumulativeDistributionPlot(
index=4,
color='blue',
label='Percentile CD',
series=mvps[columnData['yearly']['columnName']]))
traces.append(_createCumulativeDistributionPlot(
index=4,
color='purple',
label='Team Percentile CD',
series=mvps[columnData['team']['columnName']]))
traces.append(_createCumulativeDistributionPlot(
index=4,
color='red',
label='Absolute CD',
series=mvps[srcKey]))
fig['data'] += plotlyGraph.Data(traces)
fig['layout'].update(title=columnData['yearly']['title'])
url = plotly.plot(
fig,
filename=columnData['yearly']['filename'],
auto_open=False)
print('STATS[%s]: %s' % (srcKey, url))
#_______________________________________________________________________________
def _createHistogram(index, color, data, label):
return plotlyGraph.Histogram(
name=label,
x=data,
xaxis='x%s' % int(index),
yaxis='y%s' % int(index),
marker=plotlyGraph.Marker(
color=color,
line=plotlyGraph.Line(width=0)) )
#_______________________________________________________________________________
def _createCumulativeDistributionPlot(index, series, color, label):
density = np.histogram(a=series.values, bins=20)
return plotlyGraph.Scatter(
name=label,
x=density[1][:-1],
xaxis='x%s' % int(index),
yaxis='y%s' % int(index),
line=plotlyGraph.Line(color=color),
y=density[0].cumsum()/density[0].sum())
| sernst/MVP_Analysis | src/mlb/analysis/evolution.py | evolution.py | py | 7,366 | python | en | code | 0 | github-code | 90 |
19414741112 | import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense , Conv2D, MaxPooling2D , Dropout, Input,Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# compute the number of labels
num_labels = len(np.unique(y_train))
#conveting to one hot encoder
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# reshape and normalize input images
image_size = x_train.shape[1]
x_train = np.reshape(x_train,[-1, image_size, image_size, 1])
x_test = np.reshape(x_test,[-1, image_size, image_size, 1])
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# parameters
input_shape = (image_size, image_size, 1)
batch_size = 128
kernel_size = 3
filters = 64
dropout = 0.3
#functional API for building CNN
inputs = Input(shape = input_shape)
"""
Y is the output tensor where as X is the input tensor
in let's say a function
Y = Conv2D(32)(X)
"""
y = Conv2D(filters = filters , kernel_size =kernel_size , activation = 'relu')(inputs)
y = MaxPooling2D()(y)
y = Conv2D(filters = filters , kernel_size =kernel_size , activation = 'relu')(y)
y = MaxPooling2D()(y)
y = Conv2D(filters = filters , kernel_size =kernel_size , activation = 'relu')(y)
#flatten the image before dense layers
y = Flatten()(y)
#reguarlize it for reducing the parameters
y = Dropout(dropout)(y)
outputs = Dense(num_labels, activation = 'softmax')(y)
#building the model by supplying inputs and outputs
model = Model(inputs= inputs, outputs= outputs)
print(model.summary())
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train,
y_train,
validation_data=(x_test, y_test),
epochs=20,
batch_size=batch_size)
score = model.evaluate(x_test,
y_test,
batch_size=batch_size,
verbose=2)
print("\nTest accuracy: %.1f%%" % (100.0 * score[1]))
| Pavankunchala/Deep-Learning | Tensorflow_Basics/Functional-Model/cnn_functional.py | cnn_functional.py | py | 2,139 | python | en | code | 31 | github-code | 90 |
13386701439 | #!/bin/python
#
# Author : Ye Jinchang
# Date : 2016-04-14 11:06:13
# Title : 199 binary tree right side view
# Given a binary tree, imagine yourself standing on the right side of it, return the values of the nodes you can see ordered from top to bottom.
#
# For example:
# Given the following binary tree,
#
# 1 <---
# / \
# 2 3 <---
# \ \
# 5 4 <---
#
# You should return [1, 3, 4].
#
# Credits:
# Special thanks to @amrsaqr for adding this problem and creating all test cases.
#
# Subscribe to see which companies asked this question
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def rightSideView(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if not root:
return []
res = []
self.dfs(root, res, 1)
return res
def dfs(self, node, res, level):
if not node:
return
if level > len(res):
res.append(node.val)
self.dfs(node.right, res, level + 1)
self.dfs(node.left, res, level + 1)
| Alwayswithme/LeetCode | Python/199-binary-tree-right-side-view.py | 199-binary-tree-right-side-view.py | py | 1,252 | python | en | code | 1 | github-code | 90 |
74119952616 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 20:17:55 2018
@author: user
字串索引
"""
val=input()
for i in range(len(val)):
print("Index of '{:}': {:}".format(val[i],i)) | junyi1997/TQC_Python | 8.第八類/PYD801.py | PYD801.py | py | 186 | python | en | code | 0 | github-code | 90 |
12049370544 | import uuid
import logging
from django.conf import settings
from django.core.management.base import BaseCommand
from kombu import Connection, Exchange, Queue
from kombu.mixins import ConsumerMixin
from search_engine.crawlers import web_crawler, text_preprocess, process_text_metadata
LOGGER = logging.getLogger(__name__)
class RabbitConsumer(ConsumerMixin):
def __init__(self, connection, queues):
self.connection = connection
self.queues = queues
def get_consumers(self, Consumer, channel):
url_consumer = Consumer(
queues=self.queues[0],
callbacks=[self.on_url],
on_decode_error=self.on_decode_error,
accept=['msgpack'],
tag_prefix='URLS||seeker_consumer'+str(uuid.uuid4()),
prefetch_count=5
)
raw_consumer = Consumer(
queues=self.queues[1],
callbacks=[self.on_raw],
on_decode_error=self.on_decode_error,
accept=['msgpack'],
tag_prefix='RAW_TEXT||seeker_consumer'+str(uuid.uuid4()),
prefetch_count=1
)
proc_consumer = Consumer(
queues=self.queues[2],
callbacks=[self.on_proc],
on_decode_error=self.on_decode_error,
accept=['msgpack'],
tag_prefix='PROC_TEXT||seeker_consumer'+str(uuid.uuid4()),
prefetch_count=1
)
return [url_consumer, raw_consumer, proc_consumer]
def on_url(self, body, message):
try:
web_crawler(body)
message.ack()
except Exception as e:
LOGGER.error(str(e))
def on_raw(self, body, message):
try:
text_preprocess(body)
message.ack()
except Exception as e:
LOGGER.error(str(e))
def on_proc(self, body, message):
try:
process_text_metadata(body)
message.ack()
except Exception as e:
LOGGER.error(str(e))
class Command(BaseCommand):
def handle(self, *args, **options):
exchange = Exchange(
settings.AMQP['routing']['exchange_name'],
settings.AMQP['routing']['exchange_type']
)
urls_rk = settings.AMQP['routing']['rk_prefix']+chr(46)+chr(468 >> 2)+chr(458 >> 2)+chr(433 >> 2)
raw_rk = settings.AMQP['routing']['rk_prefix']+chr(46)+''.join(chr(i>>2) for i in [456, 388, 476, 380, 464, 404, 480, 467])
proc_rk = settings.AMQP['routing']['rk_prefix']+chr(46)+''.join(chr(i>>2) for i in [451, 459, 447, 399, 383, 467, 407, 483, 467])
urls_queue = Queue(
'URLS',
exchange=exchange,
bindings=[urls_rk],
no_declare=True
)
raw_text_queue = Queue(
'RAW_TEXT',
exchange=exchange,
bindings=[raw_rk],
no_declare=True
)
proc_text_queue = Queue(
'PROC_TEXT',
exchange=exchange,
bindings=[proc_rk],
no_declare=True
)
queues = [urls_queue, raw_text_queue, proc_text_queue]
with Connection(**settings.AMQP['connection']) as conn:
LOGGER.info('Initializing message consumption with the following config:')
for q in queues:
LOGGER.info('Queue: %s', str(q.name))
consumer = RabbitConsumer(conn, queues)
consumer.run()
| brunolcarli/seeker | search_engine/management/commands/amqp_consumer.py | amqp_consumer.py | py | 3,446 | python | en | code | 1 | github-code | 90 |
39203379176 | from tkinter import *
class MyWindow:
def __init__(self, window):
self.label_title = Label(window, text = "My Full Name", fg = "red", font = "verdana")
self.label_title.place(x=200, y=40)
self.label_firstname = Label(window, text = "Enter Given Name:", fg = "red")
self.label_firstname.place(x=100, y=80)
self.label_middlename = Label(window, text = "Enter Middle Name:", fg = "red")
self.label_middlename.place(x=100, y=110)
self.label_lastname = Label(window, text = "Enter Last Name:", fg = "red")
self.label_lastname.place(x=100, y=140)
self.label_fullname = Label(window, text = "My Full Name is:", fg = "red")
self.label_fullname.place(x=100, y=180)
self.txt_firstname = Entry(window, bd=2)
self.txt_firstname.place(x=275, y=80)
self.txt_middlename = Entry(window, bd=2)
self.txt_middlename.place(x=275, y=110)
self.txt_lastname = Entry(window, bd=2)
self.txt_lastname.place(x=275, y=140)
self.txt_fullname = Entry(window, bd=2, width=30)
self.txt_fullname.place(x=275, y=180)
self.btn_dsplyflnm = Button(window, text="Show Full Name", command=self.display)
self.btn_dsplyflnm.place(relx=0.5, y=230, anchor=CENTER)
self.btn_clearall = Button(window, text="Clear All", command=self.clear_all)
self.btn_clearall.place(relx=0.5, y=260, anchor=CENTER)
def display(self):
self.txt_fullname.delete(0, END)
firstname = str(self.txt_firstname.get())
middlename = str(self.txt_middlename.get())
lastname = str(self.txt_lastname.get())
fullname = firstname + " " + middlename + " " + lastname + " "
self.txt_fullname.insert(END, str(fullname))
def clear_all(self):
self.txt_firstname.delete(0, END)
self.txt_middlename.delete(0, END)
self.txt_lastname.delete(0, END)
self.txt_fullname.delete(0, END)
window = Tk()
MyWindow(window)
window.geometry("500x300+10+10")
window.mainloop()
| SamanthaLapena/58002_OOP | Midterm Exam Problem 2_LAPEÑA.py | Midterm Exam Problem 2_LAPEÑA.py | py | 2,117 | python | en | code | 0 | github-code | 90 |
10935925743 | import os
import logging
import pandas as pd
import matplotlib.pyplot as plt
PRE_PATH = '../data/preprocessed/'
LOGS_PATH = '../logs/'
PLOTS_PATH = './plots/'
logging.basicConfig(format='%(levelname)s - %(asctime)s: %(message)s',
datefmt='%d/%m/%Y %H:%M:%S',
filename=(LOGS_PATH + 'generate_graphs.log'),
filemode='w',
level=logging.INFO)
if not os.path.exists(PLOTS_PATH):
logging.warning("Plots directory does not exist")
logging.info("Creating plots dir")
os.mkdir(PLOTS_PATH)
logging.info('Starting graphs.')
files_list = [x for x in os.listdir(PRE_PATH) if 'cmd' in x]
for file_ in files_list:
try:
logging.info('-'*20)
df = pd.read_csv((PRE_PATH + file_),
sep='\t',
usecols=[0])
df['Timestamp'] = pd.to_datetime(df['Timestamp'])
df = df['Timestamp'].value_counts().sort_index()
logging.info(f"File {file_} loaded")
year = df.index.year.unique().values[0]
str_template = "{year}-{month}-{day} {hour}:{minute}:{second}"
for month in df.index.month.unique().values:
fig, ax = plt.subplots(nrows=6, ncols=4, figsize=(24, 16))
plt.tight_layout()
for day in df.index.day.unique():
for hour in range(24):
init = str_template.format(
year=year,
month=month,
day=day,
hour=hour,
minute='00',
second='00'
)
end = str_template.format(
year=year,
month=month,
day=day,
hour=hour,
minute='59',
second='59'
)
if not df[init:end].empty:
logging.info("Plotting graph for interval"
f"{hour}-{hour + 1} @ day {day}")
df[init:end].plot(
ax=ax[hour % 6][hour//6], x='index', y='Timestamp'
)
else:
logging.warning("No data for interval "
f"{hour}-{hour + 1} @ day {day}")
plt.savefig(
PLOTS_PATH + file_ + '_' + str(month) + '_hourly_plots.png'
)
logging.info(f"Graph for {file_} created")
except Exception as e:
logging.error(str(e))
logging.info("Process finished")
| Gonmeso/TFM_Anomaly_Detection | src/EDA/generate_graphs.py | generate_graphs.py | py | 2,728 | python | en | code | 0 | github-code | 90 |
72058832937 | repetitions = -1
numbers = [0, 12, 15]
answer = 0
_sum = 0
print(len(numbers))
print('if you want to stop the program just type 999 in the program')
while answer != 999:
numbers.append(answer)
_sum += answer
answer = int(input('Enter a integer: '))
repetitions += 1
print(f'{repetitions} numbers were typed, the sum of all these numbers is {_sum}')
| vytorrennan/Curso-de-python | ex/old 057 until 064/ex064.py | ex064.py | py | 365 | python | en | code | 0 | github-code | 90 |
20259307035 | import pygame
from pygame import display, event, key, draw
from pygame.constants import K_KP0, K_KP1, K_KP2, K_KP3, K_KP4, K_KP5, K_KP6, K_KP7, K_KP8, K_KP9
import math
import requests
from json import dumps
pygame.init()
display.set_caption("Place")
running = True
screen = display.set_mode((900,900))
c_souris = False
color = 1
cells = []
size_grille = (30,30)
size_cell = (30,30)
# Headers HTTP pour indiquer le format au serveur
headers = { 'content-type': 'application/json' }
CALL_LIMIT = 5000
i=0
COLORS = [
(255, 255, 255), # White
(0, 0, 0), # Black
(255, 0, 0), # Red
(51, 204, 51), # Green
(0, 153, 255), # Blue
(255, 51, 204), # Pink
(255, 153, 0), # Orange
(255, 255, 0), # Yellow
(153, 0, 204), # Purple
(128, 128, 128), # Gray
]
def update_grille():
cells = requests.get('http://127.0.0.1:5000/full').json()
print("ok")
def init_grille(size_grille):
for x in range(size_grille[0]):
rows = []
for y in range(size_grille[1]):
rows.append(0)
cells.append(rows)
screen.fill((255,255,255))
display.update()
init_grille(size_grille)
while running :
if i == CALL_LIMIT:
update_grille()
i = 0
for e in event.get():
if e.type == pygame.QUIT:
running = False
if e.type == pygame.MOUSEBUTTONDOWN:
if not c_souris:
c_souris = True
pos = pygame.mouse.get_pos()
x_temp = pos[0] / 30
y_temp = pos[1] / 30
x_pos = math.trunc(x_temp)
y_pos = math.trunc(y_temp)
cells[x_pos][y_pos] = color
body = {'x' : x_pos, 'y' : y_pos, 'color' : color}
requests.post('http://127.0.0.1:5000/place',dumps(body),headers=headers)
for x in range(size_grille[0]):
for y in range(size_grille[1]):
draw.rect(screen, COLORS[cells[x][y]],((x*size_cell[0],y*size_cell[1]),(size_cell[0],size_cell[1])))
display.update()
#fonction remplir couleur sur la case
else :
c_souris = False
if key.get_pressed()[K_KP0]:
color = 0
if key.get_pressed()[K_KP1]:
color = 1
if key.get_pressed()[K_KP2]:
color = 2
if key.get_pressed()[K_KP3]:
color = 3
if key.get_pressed()[K_KP4]:
color = 4
if key.get_pressed()[K_KP5]:
color = 5
if key.get_pressed()[K_KP6]:
color = 6
if key.get_pressed()[K_KP7]:
color = 7
if key.get_pressed()[K_KP8]:
color = 8
if key.get_pressed()[K_KP9]:
color = 9
i += 1
| RyroyNotFound/pixel | Place/main.py | main.py | py | 2,861 | python | en | code | 0 | github-code | 90 |
11584670283 | from typing import List
from ..domain.models import Channel, Subscription
from ..domain.repositories import (
ChannelRepository, DeviceRepository,
MessageRepository, SubscriptionRepository)
from ..domain.services import DeliveryService
from ..domain.common import RecordList
class SubscriptionManager:
def __init__(self, channel_repository: ChannelRepository,
device_repository: DeviceRepository,
message_repository: MessageRepository,
subscription_repository: SubscriptionRepository,
delivery_service: DeliveryService) -> None:
self.channel_repository = channel_repository
self.device_repository = device_repository
self.message_repository = message_repository
self.subscription_repository = subscription_repository
self.delivery_service = delivery_service
async def create_channel(self, channel_dicts: RecordList) -> None:
channels = await self.channel_repository.add([
Channel(**channel_dict)
for channel_dict in channel_dicts])
async def delete_channel(self, channel_ids: List[str]) -> bool:
channels = await self.channel_repository.search(
[('id', 'in', channel_ids)])
if not channels:
return False
messages = await self.message_repository.search(
[('recipient_id', 'in', [
channel.id for channel in channels]),
('kind', '=', 'channel')])
subscriptions = await self.subscription_repository.search(
[('channel_id', 'in', [
channel.id for channel in channels])])
await self.message_repository.remove(messages)
await self.subscription_repository.remove(subscriptions)
return await self.channel_repository.remove(channels)
async def subscribe(self, subscription_dicts: RecordList) -> None:
device_ids = [record["device_id"] for record in subscription_dicts]
channel_ids = [record["channel_id"] for record in subscription_dicts]
devices = {item.id: item for item in
await self.device_repository.search(
[('id', 'in', device_ids)])}
channels = {item.id: item for item in
await self.channel_repository.search(
[('id', 'in', channel_ids)])}
for subscription_dict in subscription_dicts:
device = devices[subscription_dict["device_id"]]
channel = channels[subscription_dict["channel_id"]]
self.delivery_service.subscribe(channel.code, device.locator)
await self.subscription_repository.add(
Subscription(**subscription_dict))
async def delete_subscribe(self, subscription_ids: List[str]) -> bool:
subscriptions = await self.subscription_repository.search(
[('id', 'in', subscription_ids)])
return await self.subscription_repository.remove(subscriptions)
| knowark/instark | instark/application/managers/subscription_manager.py | subscription_manager.py | py | 2,995 | python | en | code | 2 | github-code | 90 |
13942285883 | import random
pc = random.randint(1,3)
i = 0
for i in range(1,4):
py = int(input("请输入1:石头 2:剪刀 3:布"))
if py > 0 and py < 4:
if (py == 1 and pc == 2) or (py == 2 and pc == 3) or (py == 3 and pc == 1):
print("玩家赢")
elif py == pc:
print("平局")
else:
print("电脑赢")
else:
print("输入不合法")
| nijunge/1807-2 | 1807/18day/月考编程练习/04.py | 04.py | py | 342 | python | en | code | 0 | github-code | 90 |
30237009236 | from scipy.linalg import hadamard
import numpy as np
import matplotlib.pyplot as plt
import time
import cv2
from run_generation import cali_gen
""" Plot the calibration images (Hadamard matrix) using open-cv, a faster way than using plt """
T_start = time.time()
N = 32
H = hadamard(N)
I_vector = np.ones((N, 1))
pic_size = 550
""" Horizontal """
for i in range(0, N):
T1 = time.time()
h_k = H[:, i]
X_k_1 = np.outer(h_k, I_vector)
X_k_2 = -X_k_1 # The negative image
X_k_1[X_k_1 == -1] = 0 # Set negative entries to 0
X_k_1[X_k_1 == 1] = 255
X_k_2[X_k_2 == -1] = 0
X_k_2[X_k_2 == 1] = 255
"""
It's important to choose the appropriate interpolation algorithm for resizing
The method using cv2 instead of plt is much faster and have better quality
"""
pic_1 = cv2.resize(X_k_1, (pic_size, pic_size), interpolation=cv2.INTER_AREA)
pic_2 = cv2.resize(X_k_2, (pic_size, pic_size), interpolation=cv2.INTER_AREA)
name_1 = "data/display/calibration/horizontal/" + str(i + 1) + "_h_1.png"
name_2 = "data/display/calibration/horizontal/" + str(i + 1) + "_h_2.png"
cv2.imwrite(name_1, pic_1)
cv2.imwrite(name_2, pic_2)
T2 = time.time()
print(name_1 + " and " + name_2 + " is saved, time consumed: " + str(T2 - T1) + "s")
""" Vertical """
for j in range(0, N):
T1 = time.time()
h_k = H[:, j]
X_k_1 = np.outer(I_vector, h_k)
X_k_2 = -X_k_1
X_k_1[X_k_1 == -1] = 0
X_k_1[X_k_1 == 1] = 255
X_k_2[X_k_2 == -1] = 0
X_k_2[X_k_2 == 1] = 255
pic_1 = cv2.resize(X_k_1, (pic_size, pic_size), interpolation=cv2.INTER_AREA)
pic_2 = cv2.resize(X_k_2, (pic_size, pic_size), interpolation=cv2.INTER_AREA)
name_1 = "data/display/calibration/vertical/" + str(j + 1) + "_v_1.png"
name_2 = "data/display/calibration/vertical/" + str(j + 1) + "_v_2.png"
cv2.imwrite(name_1, pic_1)
cv2.imwrite(name_2, pic_2)
T2 = time.time()
print(name_1 + " and " + name_2 + " is saved, time consumed: " + str(T2 - T1) + "s")
T_end = time.time()
print("Total time consumed: " + str(T_end - T_start) + "s")
""" Generate the image for display """
cali_gen(N)
| TTimelord/lensless | flatcam/picture_get_cv.py | picture_get_cv.py | py | 2,228 | python | en | code | 0 | github-code | 90 |
4397517602 | from pyspark.sql import SQLContext, Row
from pyspark import SparkContext
from pyspark.sql import SQLContext
sc=SparkContext()
sqlContext = SQLContext(sc)
lines = sc.textFile("/home/gpurama/Spark_task/spark-test/product.txt")
parts = lines.map(lambda l: l.split("|"))
people = parts.map(lambda p: Row(product_id=int(p[0]), product_name=p[1],product_type=p[2],product_version=p[3],product_price=p[4]))
schemaPeople = sqlContext.createDataFrame(people)
mode = "overwrite"
url = "jdbc:postgresql://localhost/gopal"
properties = {"user": "postgres","driver": "org.postgresql.Driver"}
schemaPeople.write.jdbc(url=url, table="product", mode=mode, properties=properties)
print("data imported succesfully")
| gopal354/practice | product.py | product.py | py | 701 | python | en | code | 0 | github-code | 90 |
22966186871 | import random
from tkinter import N
class Node:
def __init__(self, data, next=None):
self.data = data
self.next = next
class LinkedList:
def __init__ (self):
self.head = None
def __str__ (self):
node = self.head
while node is not None:
print (node.data)
node = node.next
def append(self, data):
if not self.head:
self.head = Node(data)
return
current = self.head
while current.next:
current = current.next
current.next = Node(data)
def search(self, target):
current = self.head
while current.next:
if current.data == target:
return True
else:
current == current.next
return False
def remove(self, target):
if self.head == target:
self.head = self.head.next
return
current = self.head
previous = None
while current:
if current.data == target:
previous.next = current.next
previous = current
current = current.next
def reverse_list(self):
current = self.head
previous = None
while current:
next = current.next
current.next = previous
previous = current
current = next
def detect_cycle(self):
slow = self.head
fast = self.head
while True:
try:
slow = slow.next
fast = fast.next.next
if slow is fast:
return True
except:
return False
a_list = LinkedList()
for i in range(0,20):
j = random.randint(1,30)
a_list.append(j)
print(j)
| brandopakel/Python-Data-Structure-and-Algorithm-Practice | linked_list.py | linked_list.py | py | 1,797 | python | en | code | 0 | github-code | 90 |
15863486576 | '''
Created on Mar 8, 2020
@author: ballance
'''
import os
from typing import Set
class RunCtxt(object):
"""Collects information about what is being run"""
def __init__(self):
self.rundir = None
self.launch_dir = None
self.project_cfg = None
self.engine = None
self.engine_info = None
self.tools : Set[str] = set()
self.tool_info = []
self.test_info = []
self.regress_mode = False
self.regress_id = "regression_id"
def add_tool(self, tool):
if tool not in self.tools:
self.tools.append(tool)
def get_builddir(self):
return os.path.join(
self.rundir,
self.project,
"none" if self.engine is None else self.engine)
def get_testdir(self, testname, id):
if self.regress_mode:
return os.path.join(
self.rundir,
self.project,
"regress",
self.regress_id,
("%s_%04d" % (testname,id)))
else:
return os.path.join(
self.rundir,
self.project,
"tests",
testname
)
| fvutils/testsuite-runner | src/tsr/run_ctxt.py | run_ctxt.py | py | 1,268 | python | en | code | 1 | github-code | 90 |
18072337119 | from math import factorial
h, w, a, b = map(int, input().split())
MOD = 10**9+7
fact = [1]
# 累積乗を作る
for i in range(1, h+w-1):
fact.append(fact[-1] * i % MOD)
# 累積乗の逆元
inv_fact = [pow(fact[-1], MOD-2, MOD)] # x^(-1) = x^(10^9+5) % (10^9+7), フェルマーの小定理
for i in range(h+w-2, 0, -1): # xが最大の場合を求め、後ろ向きに計算していく
inv_fact.append(inv_fact[-1] * i % MOD)
inv = list(reversed(inv_fact)) # 逆順に取得
#print(fact, inv_fact, inv)
def comb(x, y):
return (fact[x] * inv[y] * inv[x-y])
ans = 0
for x in range(b, w):
ans += ((comb((h-a-1 + x), x)) * (comb((a-1 + w-x-1), a-1)) % MOD)
ans %= MOD
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p04046/s184490188.py | s184490188.py | py | 705 | python | ja | code | 0 | github-code | 90 |
16947863161 | """
Count the number of prime numbers less than a non-negative number, n.
"""
class Solution(object):
def countPrimes(self, n):
"""
:type n: int
:rtype: int
"""
if n < 2:
return 0
sieve = [True] * n
sieve[0] = False
sieve[1] = False
for i in range(2, n):
if sieve[i]:
for j in range(i * i, n, i):
sieve[j] = False
count = 0
i = 0
while i < n:
if sieve[i]:
count = count + 1
i += 1
return count
A = 10
s = Solution()
print(s.countPrimes(A))
| iamsuman/algorithms | iv/Leetcode/easy/204_count_primes.py | 204_count_primes.py | py | 653 | python | en | code | 2 | github-code | 90 |
35480287371 | def month_name(month, language):
eng_months = { # можно было просто через список и искать по индексу
'1': 'january',
'2': 'february',
'3': 'march',
'4': 'april',
'5': 'may',
'6': 'june',
'7': 'july',
'8': 'august',
'9': 'september',
'10': 'october',
'11': 'november',
'12': 'december'
}
rus_months = {
'1': 'январь',
'2': 'февраль',
'3': 'март',
'4': 'апрель',
'5': 'май',
'6': 'июнь',
'7': 'июль',
'8': 'август',
'9': 'сентябрь',
'10': 'октябрь',
'11': 'ноябрь',
'12': 'декабрь'
}
if 'en' in language:
return eng_months.get(month, 'There is no month with this number.')
elif 'ru' in language:
return rus_months.get(month, 'There is no month with this number.')
print(month_name('8', 'english')) # looking for 8-th month in english
| VladaLukovskaya/Python | lesson19_return_in_functions/month_name.py | month_name.py | py | 1,078 | python | ru | code | 0 | github-code | 90 |
830381150 | import requests
import send_email
topic = 'tesla'
# Define API key and url of info from NewsAPI along with different parameters of API
api_key = "48f8f6d6299f4890a1a651935f6ae891"
url = f"https://newsapi.org/v2/everything?q={topic}&" \
"sortBy=publishedAt&" \
"apiKey=48f8f6d6299f4890a1a651935f6ae891&" \
"language=en"
# Make request
request = requests.get(url)
# Convert Request data into readable python dictionary
content = request.json()
# Assign variables to store the Topic number and the full text of the email
n = 0
full_email = ''
# Get a dictionary with data
for article in content['articles'][:20]:
n = n + 1
text = str(n) + '.' + str(article['title']) + '\n' \
+ str(article['description']) + '\n' \
+ str(article['url'])+ 2 * '\n'
full_email = full_email + text + '\n'
# Send the email using ssl and smptlib
send_email.send_mail(("Subject: Today's news" + '\n' + full_email).encode("utf-8"), reciever="nageee@hotmail.com")
| Odinroast/App5-simplewebapi- | main.py | main.py | py | 994 | python | en | code | 0 | github-code | 90 |
10024747505 | import numpy as np
import tensorflow as tf
from helpers import *
def submission_per_patch(session, graph, images, img_number, window_size, patch_size=16, stride=16, threshold=0.5):
'''
:param session: Give a tensorflow session to be run on.
:param graph: Give a default graph
:param images: test images
:param img_number: number of test images to be predicted
:param window_size: window size (be consistent with the graph)
:param patch_size: patch size
:param stride: stride
:param threshold: threshold at which the prediction is put to 1 (0.5 since balanced)
:return: yield prediction per patch of size patch, per image
'''
padding = (window_size - patch_size) // 2
# Access feed_dict and output
X = graph.get_tensor_by_name("X:0")
p = graph.get_tensor_by_name("p:0")
op_to_restore = graph.get_tensor_by_name("out:0")
# Processed and crop images
test_images = process(images, gt=False)
test_patches = np.array(img_crop(test_images[img_number - 1], patch_size, patch_size, stride, padding))
# Test augmentation
flip_ud = np.array([np.flipud(test_patches[i]) for i in range(test_patches.shape[0])])
flip_lr = np.array([np.fliplr(test_patches[i]) for i in range(test_patches.shape[0])])
flip_rot90 = np.array([np.rot90(test_patches[i], 1) for i in range(test_patches.shape[0])])
flip_rot180 = np.array([np.rot90(test_patches[i], 2) for i in range(test_patches.shape[0])])
flip_rot270 = np.array([np.rot90(test_patches[i], 3) for i in range(test_patches.shape[0])])
# Run each augmentation
Z = session.run(op_to_restore, feed_dict={X: test_patches, p: 1})
Z_ud = session.run(op_to_restore, feed_dict={X: flip_ud, p: 1})
Z_lr = session.run(op_to_restore, feed_dict={X: flip_lr, p: 1})
Z_rot90 = session.run(op_to_restore, feed_dict={X: flip_rot90, p: 1})
Z_rot180 = session.run(op_to_restore, feed_dict={X: flip_rot180, p: 1})
Z_rot270 = session.run(op_to_restore, feed_dict={X: flip_rot270, p: 1})
# Raw predictions
pred_fold = np.array([sigmoid(Z[i]) for i in range(test_patches.shape[0])])
pred_fold_ud = np.array([sigmoid(Z_ud[i]) for i in range(test_patches.shape[0])])
pred_fold_lr = np.array([sigmoid(Z_lr[i]) for i in range(test_patches.shape[0])])
pred_fold_rot90 = np.array([sigmoid(Z_rot90[i]) for i in range(test_patches.shape[0])])
pred_fold_rot180 = np.array([sigmoid(Z_rot180[i]) for i in range(test_patches.shape[0])])
pred_fold_rot270 = np.array([sigmoid(Z_rot270[i]) for i in range(test_patches.shape[0])])
# Average predictions to 0-1 by thresholding
pred_mean = (pred_fold + pred_fold_ud + pred_fold_lr + pred_fold_rot90 + pred_fold_rot180 + pred_fold_rot270) / 6
prediction = (pred_mean > threshold) * 1
nb = 0
print("Processing " + str(img_number - 1))
for j in range(0, images[img_number - 1].shape[1], patch_size):
for i in range(0, images[img_number - 1].shape[0], patch_size):
label = int(prediction[nb])
nb += 1
yield ("{:03d}_{}_{},{}".format(img_number, j, i, label))
def tf_restore_predict(filename_saver, images, window_size, to_submit_filename=None, threshold=0.5):
'''
Provide a submission csv
:param filename_saver: path for session access
:param images: test images
:param window_size: window size
:param to_submit_filename: csv filename for submission
:param threshold: threshold to predict 1
:return: submission.csv
'''
# Restore meta graph and weights
sess = tf.Session()
saver = tf.train.import_meta_graph('../models/' + filename_saver + '.ckpt.meta')
saver.restore(sess, '../models/' + filename_saver + '.ckpt')
graph = tf.get_default_graph()
# File submissions sample
with open(to_submit_filename, 'w') as f:
f.write('id,prediction\n')
for nb_test in range(1, TEST_SIZE + 1, 1):
print(nb_test)
f.writelines('{}\n'.format(s) for s in submission_per_patch(sess, graph, images, nb_test, window_size, threshold=threshold))
| KennethThNg/RoadSegmentation | restore_submission.py | restore_submission.py | py | 4,182 | python | en | code | 0 | github-code | 90 |
26317636194 | import logging
from semantic_kernel.orchestration.sk_context import SKContext
from semantic_kernel.skill_definition import sk_function, sk_function_context_parameter
from gptui.gptui_kernel.manager import auto_init_params
mylogger = logging.getLogger("mylogger")
class WriteFile:
def __init__(self, manager):
self.manager = manager
@auto_init_params("0")
@classmethod
def get_init_params(cls, manager) -> tuple:
return (manager,)
@sk_function(
description="Write a file.",
name="write_file",
)
@sk_function_context_parameter(
name="file_name",
description="The name of the file, including the extension.",
)
@sk_function_context_parameter(
name="file_content",
description="The content to be written into the file."
)
def write_file(self, context: SKContext) -> str:
file_name = context["file_name"]
file_content = context["file_content"]
self.manager.client.common_resources["temp_files_from_tube"] = {file_name: file_content}
return ""
| happyapplehorse/gptui | tests/unit_tests/gptui_kernel/plugins_test_data/FileIO.py | FileIO.py | py | 1,090 | python | en | code | 3 | github-code | 90 |
70591359658 | from index import db, bcrypt
import networkx as nx
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from collections import Counter
import operator
# class User(db.Model):
# id = db.Column(db.Integer(), primary_key=True)
# email = db.Column(db.String(255), unique=True)
# password = db.Column(db.String(255))
#
# def __init__(self, email, password):
# self.email = email
# self.active = True
# self.password = User.hashed_password(password)
#
# @staticmethod
# def hashed_password(password):
# return bcrypt.generate_password_hash(password)
#
# @staticmethod
# def get_user_with_email_and_password(email, password):
# user = User.query.filter_by(email=email).first()
# if user and bcrypt.check_password_hash(user.password, password):
# return user
# else:
# return None
class Network1:
# siec wygenerowana
g = None
size = 10
distance = 'random'
k = 1
def getProbabilityOfRanking(self, size):
prob = [1 / (sum([1 / float(k) for k in range(1, size)]) * i) for i in range(1, size)]
return prob
def generateDistanceMatrix(self, size, distance):
distanceMatrix = []
if (distance == 'random'):
distanceMatrix = np.random.uniform(low=0.0, high=1.0, size=(size, size))
elif(distance == 'degree'):
distanceMatrix = np.ones(shape=(size,size))
return distanceMatrix
def generateRankings(self, distanceMatrix):
np.fill_diagonal(distanceMatrix, 2)
rankMatrix = np.argsort(-distanceMatrix)
return rankMatrix[:, 1:]
def sampleFromRankMat(self, rankMat, k, size):
res = np.zeros((size, k))
for i in range(rankMat.shape[0]):
res[i, :] = list(np.random.choice(rankMat[i], size=k, replace=False, p=self.getProbabilityOfRanking(size)))
return res
def generateEdges(self, sampledRank, size):
nodeIds = range(size)
edges = [[i[0], int(i[1][j])] for i in zip(nodeIds, sampledRank) for j in range(len(i[1]))]
return edges
def __init__(self, size=50, distance='random', k=2):
self.distance = distance
self.size = size
self.k = k
self.g = nx.empty_graph(self.size, create_using=nx.Graph())
self.g.add_edges_from(self.generateEdges(
self.sampleFromRankMat(self.generateRankings(self.generateDistanceMatrix(size, distance)), k,
size), size))
def getNetwork(self):
return nx.node_link_data(self.g, {'link': 'edges', 'source': 'from', 'target': 'to'})
# def getNetwork(self):
# return nx.node_link_data(self.g, {'link': 'edges', 'source': 'from', 'target': 'to'})
def getDegreeDistr(self):
deg = nx.degree(self.g)
deg_val = [i[1] for i in deg]
a = self.discrete_histogram(deg_val)
a = list(map(lambda x: {'x': x[0], 'y': x[1]}, a.items()))
print(a)
return a
def getShortestPathDist(self):
sp = nx.shortest_path(self.g)
lens = [len(j) for i in sp for j in sp[i].values()]
a = self.discrete_histogram(lens)
a = list(map(lambda x: {'x': x[0], 'y': x[1]}, a.items()))
# a = np.histogram(lens, bins=np.max(lens), range=(0, np.max(lens)))
# return list(map(lambda x: {'x': x[0], 'y': x[1]}, list(zip(list(a[1]), [0] + list(a[0])))))
return a
def getClusteringCoeffDist(self):
clust = nx.clustering(self.g.to_undirected())
clust_val = list(clust.values())
a = np.histogram(clust_val, bins=10) # 'doane')
return list(
map(lambda x: {'x': float("{0:.2f}".format(x[0])), 'y': x[1]}, list(zip(list(a[1]), [0] + list(a[0])))))
def discrete_histogram(self, data):
hist = Counter(data)
hist = dict(hist)
# for i in range(min(data)):
# hist.update({i: 0})
for i in range(max(data)):
if(i not in hist):
hist.update({i: 0})
return hist
class Network:
g = None
size = 10
distance = 'random'
k = 1
attr1 = None
attr2 = None
def getProbabilityOfRanking(self, size):
prob = [1 / (sum([1 / float(k) for k in range(1, size)]) * i) for i in range(1, size)]
return prob
def generateDistanceMatrix(self, size, distance):
distanceMatrix = []
if (distance == 'random'):
distanceMatrix = np.random.uniform(low=0.0, high=1.0, size=(size, size))
elif (distance == 'degree'):
distanceMatrix = np.ones(shape=(size, size))
return distanceMatrix
def __init__(self, size=50, distance='random', k=2):
self.distance = distance
self.size = size
self.k = k
self.g = nx.empty_graph(self.size, create_using=nx.DiGraph())
# self.g.add_edges_from(self.generateEdges(
# self.sampleFromRankMat(self.generateRankings(self.generateDistanceMatrix(size, distance)), k,
# size), size))
self.attr1 = np.random.randint(0, 255, size)
self.attr2 = np.random.randint(0, 255, size)
# self.attr3 = np.random.randint(0, 255, size)
self.generateIterativelyEdges()
def getNetwork(self):
return nx.node_link_data(self.g, {'link': 'edges', 'source': 'from', 'target': 'to'})
# def getNetwork(self):
# return nx.node_link_data(self.g, {'link': 'edges', 'source': 'from', 'target': 'to'})
def getDegreeDistr(self):
deg = nx.degree(self.g)
deg_val = [i[1] for i in deg]
a = self.discrete_histogram(deg_val)
a = list(map(lambda x: {'x': x[0], 'y': x[1]}, a.items()))
return a
def getShortestPathDist(self):
sp = nx.shortest_path(self.g)
lens = [len(j) for i in sp for j in sp[i].values()]
a = self.discrete_histogram(lens)
a = list(map(lambda x: {'x': x[0], 'y': x[1]}, a.items()))
# a = np.histogram(lens, bins=np.max(lens), range=(0, np.max(lens)))
# return list(map(lambda x: {'x': x[0], 'y': x[1]}, list(zip(list(a[1]), [0] + list(a[0])))))
return a
def getClusteringCoeffDist(self):
clust = nx.clustering(self.g.to_undirected())
clust_val = list(clust.values())
a = np.histogram(clust_val, bins=10) # 'doane')
return list(
map(lambda x: {'x': float("{0:.2f}".format(x[0])), 'y': x[1]}, list(zip(list(a[1]), [0] + list(a[0])))))
def discrete_histogram(self, data):
hist = Counter(data)
hist = dict(hist)
# for i in range(min(data)):
# hist.update({i: 0})
for i in range(max(data)):
if (i not in hist):
hist.update({i: 0})
return hist
def calculateDistance(self, distance, node_i, node_j):
result = None
if (distance == 'random'):
result = np.random.uniform(low=0.0, high=1.0, size=1)[0]
elif (distance == 'degree'):
# print(self.g.degree(node_j))
result = 1/(self.g.degree(node_j)+0.001)
elif (distance == 'betweenness'):
bet=(nx.betweenness_centrality(self.g, k=int(len(list(self.g.nodes)) / 10)))
result = 1/(bet[node_j]+0.001)
#TODO zrobic optymalizacje ze zwracane jest k wartosci
elif (distance == 'closseness'):
result=1/(nx.closeness_centrality(self.g,u=node_j)+0.001)
elif (distance == 'page_rank'):
pag=nx.pagerank_scipy(self.g)
result = 1 / (pag[node_j] + 0.001)
elif (distance == 'cosine'):
result = 1-cosine_similarity(np.array([self.attr1[node_i],self.attr2[node_i]]).reshape(1, -1),
np.array([self.attr1[node_j],self.attr2[node_j]]).reshape(1, -1));
# print(self.attr1[node_i], self.attr1[node_j],result)
elif (distance == 'euclidean_1'):
result = np.abs(self.attr1[node_i]-self.attr1[node_j]);
# print(self.attr1[node_i], self.attr1[node_j],result)
elif (distance == 'euclidean_2'):
result = np.sqrt(np.power(self.attr1[node_i]-self.attr1[node_j], 2)+np.power(self.attr2[node_i]-self.attr2[node_j], 2));
# print(self.attr1[node_i], self.attr1[node_j], result)
elif (distance == 'aggregate'):
w1=0.7
w2=0.3
result = w1*np.abs(self.attr1[node_i] - self.attr1[node_j])+w2*np.abs(self.attr2[node_i] - self.attr2[node_j]);
# print(self.attr1[node_i], self.attr1[node_j], result)
return result
def getRandomNode(self):
return np.random.choice(list(self.g.nodes), size=1)[0]
def calculateDistances(self, distance, currentNode):
distances = {}
for i in self.g.nodes:
# print("edge in network", (currentNode, i) in self.g.edges)
if (i == currentNode or (currentNode, i) in self.g.edges):
# print("not addind distance",i,currentNode)
continue
else:
distances[i] = self.calculateDistance(distance, currentNode, i)
return {currentNode: distances}
def generateRanking(self, distances):
x = distances.values()[0]
sorted_x = sorted(x.items(), key=operator.itemgetter(1))
return sorted_x
def sampleFromRankMat(self, rankingDistance):
# rankMat2 = rankMat[wichInTurn + 1:]
rankingDistance = [i[0] for i in rankingDistance]
# print(len(rankMat2))
# print(self.getProbabilityOfRanking(len(rankMat2)))
res = np.random.choice(rankingDistance, size=1, replace=False, p=self.getProbabilityOfRanking(len(rankingDistance) + 1))[0]
return res
def generateIterativelyEdges(self):
for i in range(self.size):
for j in range(self.k):
if (len(list(self.g.edges)) < 1):
self.g.add_edge(i, self.getRandomNode())
else:
# print("i: "+str(i)+" j: "+str(j))
dist = self.calculateDistances(self.distance, i)
# print("distances",dist)
rank = self.generateRanking(dist)
# print("ranking",rank)
edgesToAdd = self.sampleFromRankMat(rank)
self.g.add_edge(i, edgesToAdd)
# print("edges in net",list(self.g.edges))
# struktura dla trzymania odleglosci i rankingow
# {1: {2: 0.34, 3: 0.11}, 2: {3: 0.12}, ...}
| kajdanowicz/priorityAttachmentWeb | application/models.py | models.py | py | 10,679 | python | en | code | 0 | github-code | 90 |
20732862712 | from record import Record
# work with file to save records
class fileWork:
def __init__(self, name):
self.name = name
self.setRecord()
# cancel current records and write to the file
def zeroFile(self):
with open(self.name, "w") as inFile:
for i in range(0, 3):
inFile.write("level - " + str(i + 1) + ": 0\n")
# update current records
def updateFile(self):
with open(self.name, "w") as inFile:
inFile.write("level - 1: " + str(self.record.record_lv1) + " \n")
inFile.write("level - 2: " + str(self.record.record_lv2) + " \n")
inFile.write("level - 3: " + str(self.record.record_lv3) + " \n")
# show file content
def showFile(self):
with open(self.name, "r") as outFile:
line = outFile.readlines()
print(line)
# get records from the file
def setRecord(self):
with open(self.name, "r") as outFile:
line = outFile.readline()
list = line.split()
l1 = int(list[-1])
line = outFile.readline()
list = line.split()
l2 = int(list[-1])
line = outFile.readline()
list = line.split()
l3 = int(list[-1])
self.record = Record(l1, l2, l3)
| VladTkach/Saper_Qt | fileWork.py | fileWork.py | py | 1,319 | python | en | code | 0 | github-code | 90 |
72683018858 | # 4 folds of OOF and a submission using Catboost by Yandex
import numpy as np
import pandas as pd
from scipy import sparse
from sklearn.model_selection import KFold
import csv
from catboost import CatBoostRegressor
pth = '../'
out_pth = '../OOF/Catboost6000/'
with open(pth + 'sparse-features/fnames.csv', 'r') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
fnames = row
deal_prob = pd.read_csv(pth + 'sparse-features/train_item_ids_target.csv')
train_arr = sparse.load_npz(pth + 'sparse-features/train_full_sparse.npz')
train_arr = train_arr[:, :244].todense() # the other features are the large TF-IDF part, which doesn't fit in RAM
train_arr = np.array(train_arr)
Y = deal_prob['deal_probability'].values
kf = KFold(n_splits=5, random_state=14)
n_folds_max = 4
depth = 7
lr = 0.03
onehotmax = 3
metric_period = 200
colsample_bylevel = 0.4
niter = 6000
counter = 0
for train_index, test_index in kf.split(train_arr):
if counter < n_folds_max:
counter += 1
reg_CB = CatBoostRegressor(learning_rate=lr, iterations=niter,
one_hot_max_size=onehotmax,
depth=depth,
colsample_bylevel=0.4,
metric_period=metric_period)
print('Training CB, fold', counter)
reg_CB.fit(train_arr[train_index, :], Y[train_index],
cat_features=list(range(12)))
print('Predicting')
pred_CB = reg_CB.predict(train_arr[test_index, :])
item_ids = deal_prob['item_id'][test_index]
pd_CB = {'item_id': item_ids, 'deal_probability': pred_CB}
pd_CB = pd.DataFrame(pd_CB)
pd_CB.to_csv(out_pth + 'CB_fold_' + str(counter) + '.csv', index=False)
reg_CB = CatBoostRegressor(learning_rate=lr, iterations=niter,
one_hot_max_size=onehotmax,
depth=depth,
colsample_bylevel=0.4,
metric_period=metric_period)
print('Training on full train set')
reg_CB.fit(train_arr, Y)
del train_arr
test_arr = sparse.load_npz(pth + 'sparse-features/test_full_sparse.npz')
test_item_ids = pd.read_csv(pth + 'sparse-features/test_item_ids.csv')
test_arr = test_arr[:, :244].todense()
test_arr = np.array(test_arr)
output = reg_CB.predict(test_arr)
output[output<0] = 0
output[output>1] = 1
sub = pd.DataFrame({'item_id': test_item_ids['item_id'], 'deal_probability': output})
sub.to_csv('submissions/CB6k.csv', index=False, header=True)
| knstmrd/avitodemandprediction | catboost_CV.py | catboost_CV.py | py | 2,443 | python | en | code | 0 | github-code | 90 |
29462929523 | class aerospace: #creating a class that defines features all aerospace vehicles may have.
vehicle_name = ""
vehicle_model = ""
vehicle_year = ""
engine_thrust = ""
exit_velocity = ""
reusablity = ""
def aerospace1(self):
make = input("please enter make of the aerospace vehicle\n>>> ")
model = input("please enter model of the aerospace vehicle\n>>> ")
year = input("please enter aersopace vehicles manufacturing year\n>>> ")
thrust = input("please enter rocket thrust\n>>> ")
velocity = input("please enter exit velocity\n>>> ")
reusability = input("please enter can or cannot\n>>> ").lower()
print(f"The {make} {model} was manufactured in the year {year}. It has a maximum engine thrust of {thrust} and exits the atmosphere at {velocity}. It {reusability} be reused.")
class planes(aerospace): #creating a child class that includes all the attributes of its parent class, aerospace, but also traits that are unique to planes.
vehicle_name = ""
vehicle_model = ""
vehicle_year = ""
wingspan = ""
engine = ""
maximum_altitude = ""
def aerospace1(self):
make = input("please enter make of the plane\n>>> ")
model = input("please enter model of the plane\n>>> ")
year = int(input("please enter plane's manufacturing year\n>>> "))
wingspan = int(input("please enter wingspan in feet!\n>>> "))
engine = input("please enter engine type\n>>> ")
maximum_altitude = int(input("please enter maximum flight altitude\n>>> "))
print(f"The {make} {model} was manufactured in the year {year}. It has a wingspan of {wingspan} feet and boasts a powerful {engine}. It can reach a maximum height of {maximum_altitude}")
class helicopters(aerospace): #creating a child class that includes all the attributes of its parent class, aerospace, but also traits that are unique to planes.
vehicle_name = ""
vehicle_model = ""
vehicle_year = ""
rotor_radius = ""
horsepower = ""
def aerospace1(self):
make = input("please enter make of the helicopter\n>>> ")
model = input("please enter model of the helicopter\n>>> ")
year = input("please enter helicopter's year\n>>> ")
radius = input("please enter blade radius of the vehicle\n>>> ")
horsepower = int(input("please enter can or cannot\n>>> "))
print(f"The {make} {model} was manufactured in the year {year}. It has a rotor radius of {radius}, which is powered by a {horsepower} horsepower engine!.")
if __name__ == "__main__":
rocket = aerospace()
print(rocket.aerospace1())
plane = planes()
print(plane.aerospace1())
helicopter = helicopters()
print(helicopter.aerospace1())
| taekionic/Python_Projects | Learning Files/parent-child classes.py | parent-child classes.py | py | 2,787 | python | en | code | 0 | github-code | 90 |
13662219152 | import pytest
from random_name_generator.constants import Descent, Sex
@pytest.fixture
def mock_first_names(monkeypatch):
first_names = {
Descent.ENGLISH: {
Sex.MALE: [
'John',
'Joseph'
],
Sex.FEMALE: [
'Ashley'
],
Sex.UNISEX: [
'Alex'
]
},
Descent.ITALIAN: {
Sex.MALE: [
'Enzo',
'Luca'
],
Sex.FEMALE: [
'Gianna',
],
Sex.UNISEX: [
'Cosme'
]
},
Descent.FRENCH: {},
Descent.GERMAN: {
Sex.MALE: []
},
Descent.RUSSIAN: {
Sex.MALE: [
'Viktor'
],
Sex.FEMALE: [
'Iryna'
],
}
}
monkeypatch.setattr(
'random_name_generator.selectors.FIRST_NAMES',
first_names
)
monkeypatch.setattr(
'random_name_generator.constants.FIRST_NAMES',
first_names
)
return first_names
@pytest.fixture
def mock_last_names(monkeypatch):
last_names = {
Descent.ENGLISH: [
'Abramson',
'Johnson'
],
Descent.ITALIAN: [
'Gotti'
],
Descent.RUSSIAN: {
Sex.MALE: [
'Ivanov',
'Petrov'
],
Sex.FEMALE: [
'Ivanova',
'Petrova'
]
}
}
monkeypatch.setattr(
'random_name_generator.selectors.LAST_NAMES',
last_names
)
monkeypatch.setattr(
'random_name_generator.constants.LAST_NAMES',
last_names
)
return last_names
@pytest.fixture
def mock_random_shuffle(monkeypatch):
def shuffle(*args, **kwargs):
pass
monkeypatch.setattr('random.shuffle', shuffle)
return shuffle
| diachkow/python-random-name-generator | tests/conftest.py | conftest.py | py | 2,000 | python | en | code | 1 | github-code | 90 |
28230799233 | '''
Модуль для работы с внутриигровыми объектами.
К оным относятся игрок и мобы.
'''
import json
def get_object(name):
'''
Возвращает игровой объект типа name : string в виде словаря:
'name' : string -- название игрового объекта.
'max hp' : int -- максимальный запас здоровья.
'hp' : int -- текущий запас здоровья.
'position' : int -- позиция объекта на игровой карте (по оси x).
'''
fin = open('./data/' + name + ".json")
obj = json.load(fin)
obj['position'] = 20 #ИСПРАВИТЬ!!
obj['hp'] = obj['max hp']
obj['name'] = name
fin.close()
return obj
| Mirovengil/RadZombie | class_object.py | class_object.py | py | 833 | python | ru | code | 0 | github-code | 90 |
340604860 | """
問題URL:
"""
import math
import sys
from collections import deque
from typing import Union, List
INF = 2 * 10 ** 14
CONST = 998244353
global g
global yen_dist
global snk_dist
global q
class Edge(object):
def __init__(self, to, yen, snk):
self.to = to
self.yen = yen
self.snk = snk
def __lt__(self, other):
if isinstance(other, Edge):
return
return self.to < other.to
def bfs(sp, g, dist, q: deque, yen=True):
dist[sp] = 0
q.append(sp)
while len(q) > 0:
v = q.popleft()
for nv in g[v]:
if yen:
if dist[nv.to] > dist[v] + nv.yen:
dist[nv.to] = dist[v] + nv.yen
q.append(nv.to)
else:
if dist[nv.to] > dist[v] + nv.snk:
dist[nv.to] = dist[v] + nv.snk
q.append(nv.to)
return dist
def main():
N, M, s, t = read_nums()
g = [[] for _ in range(N)]
for _ in range(M):
u, v, a, b = read_nums()
g[u - 1].append(Edge(v - 1, a, b))
g[v - 1].append(Edge(u - 1, a, b))
yen_dist = [INF] * N
snk_dist = [INF] * N
q = deque()
yen_dist = bfs(s - 1, g, yen_dist, q)
q = deque()
snk_dist = bfs(t - 1, g, snk_dist, q, yen=False)
total = list()
for y, s in zip(yen_dist, snk_dist):
total.append(y + s)
ans = []
tmp = INF
init = int(1e15)
for t in reversed(total):
tmp = min(tmp, t)
ans.append(init - tmp)
for a in ans.__reversed__():
print(a)
def split_without_empty(strs: str) -> List[str]:
"""
文字列を分割してlistに格納し返す
Args:
strs: 複数の文字
Returns: listに複数の文字列を格納されたもの
Examples: foo boo -> [foo, boo]
"""
return strs.split(' ')
def split2int(strs: List[str]) -> List[int]:
"""
文字列型のlistを数値型のlistに変換する
Args:
strs: 数値が文字列型のlist
Returns: 数値型のlist
Examples: ['100', '200'] -> [100, 200]
"""
return [int(n) for n in strs]
def split2str(ints: List[int]) -> List[str]:
"""
数値型のlistを文字列型のlistに変換する
Args:
ints: 数値型のlist
Returns: 文字列型のlist
Examples: [100, 200] -> ['100', '200']
"""
return [str(n) for n in ints]
def s2i(s: str) -> int:
return int(s)
def i2s(i: int) -> str:
return str(i)
def b2i(b: bool) -> int:
return int(b)
def i2b(i: int) -> bool:
return bool(i)
def read_str() -> str:
"""
文字列、1単語
e.g.)
foo
"""
return sys.stdin.readline().rstrip()
def read_strs() -> List[str]:
"""
文字列、複数単語
Returns: List[str]
Examples:
foo, boo
"""
return split_without_empty(read_str())
def read_num() -> Union[int, float]:
"""
数値
e.g.)
10
"""
return s2i(read_str())
def read_nums() -> Union[List[int], List[float]]:
"""
数値
e.g.)
[10, 20]
"""
return split2int(read_strs())
def aCb(a, b: int) -> int:
"""
二項定理
Args:
a (int)
b (int)
Returns:
二項定理の値
"""
r = 1
if a < b * 2:
b = a - b
for i in range(b):
r *= (a - i + 1) / i
return r
def get_distance(p1, p2: List[int]) -> Union[int, float]:
"""
2点間距離
Args:
p1(List[int]): 座標
p2(List[int]): 座標
Returns:
距離
"""
d = 0
for x1, x2 in zip(p1, p2):
d += (x1 - x2) ** 2
return math.sqrt(d)
if __name__ == "__main__":
main()
| ktaroabobon/AtCoder | 練習問題/graph/Dijkstra/soundhound_2018summer_D.py | soundhound_2018summer_D.py | py | 3,755 | python | en | code | 0 | github-code | 90 |
42642339817 | import cv2
import os
import argparse
import numpy as np
from detection.core.detector_factory import get_detector
from detection.tensorpacks.viz import draw_final_outputs
def pick_best_faces(detection_results, num):
if len(detection_results) == 0:
return []
# Trivial solution: just pick the largest faces
bbox_areas = []
for r in detection_results:
area = (r.box[2] - r.box[0]) * (r.box[3] - r.box[1])
bbox_areas.append(area)
bbox_areas = np.array(bbox_areas)
best_inds = np.argpartition(bbox_areas, -num, )[-num:]
# Collect selected faces
ret = []
for ind in best_inds:
ret.append(detection_results[ind])
return ret
def write_output(result_map, anno, skiplist):
n_skipped = 0
for img_name, results in result_map.items():
if len(results) > 0:
# Output faces in the following format
# 0--Parade/0_Parade_marchingband_1_799.jpg (image name)
# 22 (num of faces)
# 78 221 85 229 (xmin, ymin, xmax, ymax)
# 78 238 92 255
anno.write(img_name + '\n')
anno.write(str(len(results)) + '\n')
for r in results:
anno.write('{:.1f} {:.1f} {:.1f} {:.1f}\n'.format(*r.box))
else:
print("Image {} doesn't have any face detected, ignored.".format(img_name))
n_skipped += 1
skiplist.write(img_name + '\n')
print("{} images are skipped due to no face detected.".format(n_skipped))
def generate_face_bbox(args):
n_visualize = args.n_visualize
face_detector = get_detector(args.face_model, args.face_ckpt, args.face_config)
# Input/output folder and file paths
image_dir = os.path.join(args.root, args.image_dir)
outdir = os.path.join(args.root, args.out_dir)
if not os.path.exists(outdir):
os.mkdir(outdir)
output_file = os.path.join(args.root, args.out_dir, args.out_file)
skiplist = os.path.join(args.root, args.out_dir, args.skip_list)
def process_image(detector, img_path):
# Detect faces
image_bgr = cv2.imread(img_path)
face_results = detector.detect(image_bgr, rgb=False)
# Select best face to ignore false positives, such as faces on the clothes
best_faces = pick_best_faces(face_results, args.max_num_faces)
# Visualize detected faces for quick verification
nonlocal n_visualize
if n_visualize > 0:
image_bgr = draw_final_outputs(image_bgr, best_faces, show_ids=face_detector.get_class_ids())
cv2.imshow('face detection', image_bgr)
cv2.waitKey(0)
n_visualize -= 1
return best_faces
result_map = {}
if args.mode == 'all':
# Walk into every subdirectory of image directory and process every image files, preserving its relative path
for root, dirs, files in os.walk(image_dir):
for filename in files:
if not filename.endswith(tuple(args.image_ext.split("|"))): # Ignore non image files
continue
relative_path = os.path.join(root.replace(image_dir, ""), filename)[1:]
print("Processing image {}".format(relative_path))
result_map[relative_path] = process_image(face_detector, os.path.join(root, filename))
elif args.mode == 'skip':
# When dealing with skipped images, just read filename from skiplist file and process them one by one
with open(skiplist, 'r') as f:
for line in f.readlines():
filename = line.strip()
print("Processing image {}".format(filename))
result_map[filename] = process_image(face_detector, os.path.join(image_dir, filename))
else:
raise Exception("Non supported mode {}".format(args.mode))
print("Writing output")
with open(skiplist, 'w') as s:
if args.mode == 'all':
with open(output_file, 'w') as o:
write_output(result_map, o, s)
elif args.mode == 'skip':
# Append detection results of previously skipped images to the end of annotation file
with open(output_file, 'a') as o:
write_output(result_map, o, s)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--mode',
type=str,
default='all',
help="all | skip"
)
parser.add_argument(
'--root',
required=True,
type=str,
help="Root directory to dataset")
parser.add_argument(
'--image_dir',
default='Images',
type=str,
help="Image directory relative to root directory of dataset"
)
parser.add_argument(
'--image_ext',
default='jpg|png',
type=str,
help="Image extensions to be supported, separated by '|'."
)
parser.add_argument(
'--out_dir',
default='Annotations',
type=str,
help="Output directory relative to root directory of dataset"
)
parser.add_argument(
'--out_file',
default="bbox_gt.txt",
type=str,
help="Filename for output bounding box file"
)
parser.add_argument(
'--skip_list',
default="skiplist.txt",
type=str,
help="List files for skipped images (which means no face are detected)."
)
parser.add_argument(
'--max_num_faces',
default=1,
type=int,
help="Maximum number of faces to output for each image. If 0 or negative value is specified, all detected faces will be output"
)
parser.add_argument(
'--n_visualize',
default=0,
type=int,
help="Number of images to be visualized before going on."
)
parser.add_argument(
'--face_model',
type=str,
help='tensorpack | s3fd | tf-model')
parser.add_argument(
'--face_ckpt',
default='',
type=str,
help='Checkpoint of face detection model')
parser.add_argument(
'--face_config',
default='',
type=str,
help='Configurations of face detection model',
nargs='+'
)
args = parser.parse_args()
generate_face_bbox(args)
| houweidong/models | detection/dataset/gen_face_bbox.py | gen_face_bbox.py | py | 6,275 | python | en | code | 0 | github-code | 90 |
32484673860 | from django.conf.urls import patterns, include, url
from group2 import views
urlpatterns = patterns('',
url(r'^profile/$', views.profile, name='profile'),
url(r'^dataStudent/$', views.data_student, name='data_student'),
url(r'^dataStudentEdit/$', views.data_student_edit, name='data_student_edit'),
url(r'^getdataStudentEdit/$', views.get_data_student_edit, name='get_data_student_edit'),
url(r'^search_student/$', views.search_student, name='search_student'),
url(r'^search_student/profile/(\d+)/$', views.profile_admin, name='profile_admin'),
url(r'^search_student/regis_result/(\d+)/$', views.regis_result_admin, name='regis_result_admin'),
url(r'^search_student/viyanipon_admin/(\d+)/$', views.viyanipon_admin, name='viyanipon_admin'),
url(r'^search_student/admin_look_school_record/(\d+)/$', views.admin_look_school_record, name='admin_look_school_record'),
url(r'^admin_look_school_record/$', views.admin_look_school_record, name='admin_look_school_record'),
url(r'^registeration/$', views.registeration, name='registeration'),
url(r'^regisResult/$', views.regis_result, name='regis_result'),
url(r'^schoolRecord/$', views.school_record, name='school_record'),
url(r'^school_record_admin/(\d+)/$', views.school_record_admin, name='school_record_admin'),
url(r'^school_record_admin/$', views.school_record_admin_edit, name='school_record_admin_edit'),
url(r'^search_course/$', views.search_course, name='search_course'),
url(r'^Find_course/$', views.Find_course, name='Find_course'),
url(r'^Find_course_admin/$', views.Find_course_admin, name='Find_course_admin'),
url(r'^Edit_course_admin/$', views.Edit_course_admin, name='Edit_course_admin'),
url(r'^Admin_check_register/$', views.Admin_check_register, name='Admin_check_register'),
url(r'^Add_course_admin/$', views.Add_course_admin, name='Add_course_admin'),
url(r'^Admin_drop/$', views.Admin_drop, name='Admin_drop'),
url(r'^drop/$', views.drop, name='drop'),
url(r'^drop_admin/$', views.drop_admin, name='drop_admin'),
url(r'^registeration_admin/$', views.registeration_admin, name='registeration_admin'),
url(r'^find_registeration_admin/$', views.find_registeration_admin, name='find_registeration_admin'),
url(r'^Find_Admin_check_register/$', views.Find_Admin_check_register, name='Find_Admin_check_register'),
url(r'^Update_check_admin/$', views.Update_check_admin, name='Update_check_admin'),
url(r'^Admin_check_drop/$', views.Admin_check_drop, name='Admin_check_drop'),
url(r'^Find_Admin_check_drop/$', views.Find_Admin_check_drop, name='Find_Admin_check_drop'),
url(r'^Find_course_drop/$', views.Find_course_drop, name='Find_course_drop'),
url(r'^Update_check_drop/$', views.Update_check_drop, name='Update_check_drop'),
url(r'^Find_school_record_admin/$', views.Find_school_record_admin, name='Find_school_record_admin'),
url(r'^Edit_school_record_admin/$', views.Edit_school_record_admin, name='Edit_school_record_admin'),
url(r'^Add_register/$', views.Add_register, name='Add_register'),
url(r'^viyanipon/$', views.viyanipon, name='viyanipon'),
url(r'^edit_viyanipon/$', views.edit_viyanipon, name='edit_viyanipon'),
url(r'^Add_edit_viyanipon/$', views.Add_edit_viyanipon, name='Add_edit_viyanipon'),
)
| tachagon/DB_Project | group2/urls.py | urls.py | py | 3,482 | python | en | code | 0 | github-code | 90 |
9051287540 | import mxnet as mx
import numbers
import os
import numpy as np
import torch
import cv2
from torch.utils.data import Dataset
from torchvision import transforms
from s_data.MaskTheFace.augment_mask import AugmentMask
default_trans_list = [
transforms.Resize((112, 112)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
]
class MXFaceDataset(Dataset):
def __init__(self, root_dir, transform_list=None):
super(MXFaceDataset, self).__init__()
self.root_dir = root_dir
path_imgrec = os.path.join(root_dir, 'train.rec')
path_imgidx = os.path.join(root_dir, 'train.idx')
self.imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')
s = self.imgrec.read_idx(0)
header, _ = mx.recordio.unpack(s)
if header.flag > 0:
self.header0 = (int(header.label[0]), int(header.label[1]))
self.imgidx = np.array(range(1, int(header.label[0])))
else:
self.imgidx = np.array(list(self.imgrec.keys))
self.aug_mask = AugmentMask('./s_data/MaskTheFace', mask_rate=0.3)
trans_list = [transforms.ToPILImage()]
trans_list.extend(transform_list if transform_list is not None else default_trans_list)
self.transform = transforms.Compose(trans_list)
def __getitem__(self, index):
idx = self.imgidx[index]
s = self.imgrec.read_idx(idx)
header, img = mx.recordio.unpack(s)
label = header.label
if not isinstance(label, numbers.Number):
label = label[0]
label = torch.tensor(label, dtype=torch.long)
sample = mx.image.imdecode(img).asnumpy()
sample = self.aug_mask.mask(sample)
# cv2.namedWindow('results')
# cv2.imshow('results', cv2.cvtColor(sample, cv2.COLOR_BGR2RGB))
# cv2.waitKey(1)
if self.transform is not None:
sample = self.transform(sample)
return sample, label
def __len__(self):
return len(self.imgidx)
| iChenning/face_project | s_data/dataset_mx.py | dataset_mx.py | py | 2,087 | python | en | code | 2 | github-code | 90 |
42210199235 | from pointraing import db
from flask import render_template, url_for, redirect, request, flash, abort, send_from_directory, current_app, Blueprint
from flask_login import current_user, login_required
from pointraing.students.forms import StudentActivityForm
from pointraing.models import Attendance, ActivityType, RateActivity, Activity, Subject
from pointraing.main.utils import get_education_student_by_subject, is_student
import uuid
import os
import secrets
students = Blueprint('students', __name__, template_folder='templates', url_prefix='/students')
def check_on_rights():
if not is_student():
abort(403)
@students.route("/education")
@students.route("/education/<string:subject_id>")
@login_required
def education(subject_id=None):
check_on_rights()
group = current_user.group
subjects = Subject.query\
.join(Attendance) \
.filter_by(group_id=group.id)\
.group_by(Subject.id).limit(100).all()
if not subject_id and len(subjects) > 0:
subject_id = subjects[0].id
if not subject_id:
return render_template('education.html',
active_tab='education',
right_group=subjects,
group_id=subject_id
)
else:
attendance_count_user, count_hours, attendance, labs_count_user, labs_count, labs, grade, auto_grade = \
get_education_student_by_subject(current_user.id, subject_id)
return render_template('education.html',
active_tab='education',
right_group=subjects,
group_id=subject_id,
count_hours=count_hours,
attendance_count_user=attendance_count_user,
attendance=attendance,
labs=labs,
labs_count=labs_count,
labs_count_user=labs_count_user,
grade=grade,
auto_grade=auto_grade
)
def get_students_activity():
return ActivityType.query.limit(100).all()
@students.route("/activity")
@students.route("/activity/<string:activity_id>")
@login_required
def activity(activity_id=None):
check_on_rights()
activity_list = get_students_activity()
if not activity_id and len(activity_list) > 0:
activity_id = activity_list[0].id
activity_by_user = Activity.query \
.filter(Activity.user_id == current_user.id) \
.filter(Activity.type_id == activity_id).limit(100).all()
return render_template('activity.html',
active_tab='activity',
right_group=activity_list,
group_id=activity_id,
activity_by_user=activity_by_user
)
def save_file(form_file):
random_hex = secrets.token_hex(8)
file = request.files[form_file.name]
if file.filename == '':
flash('Нет выбранного файла')
return redirect(request.url)
_, f_ext = os.path.splitext(file.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(current_app.config['UPLOAD_FOLDER'], picture_fn)
file.save(picture_path)
return picture_fn
def get_activity_sub_type_choices(activity_id):
choices = []
sub_type_id = None
for g in RateActivity.query.filter(RateActivity.activity_type_id == activity_id):
if g.sub_type:
choices.append((g.id, g.sub_type.name))
else:
sub_type_id = g.id
return {
'choices': choices,
'sub_type_id': sub_type_id
}
@students.route("/activity/<string:activity_id>/new", methods=['GET', 'POST'])
@login_required
def activity_new(activity_id):
check_on_rights()
form = StudentActivityForm()
sub_type_choices = get_activity_sub_type_choices(activity_id)
choices = sub_type_choices['choices']
sub_type_id = sub_type_choices['sub_type_id']
form.sub_type_id.choices = choices
if form.validate_on_submit():
picture_file = save_file(form.file)
db.session.add(Activity(
id=uuid.uuid4().hex,
name=form.name.data,
file=picture_file,
user_id=current_user.id,
type_id=activity_id,
rate_id=sub_type_id if sub_type_id else form.sub_type_id.data
))
db.session.commit()
flash('Ваша грамота принята на рассмотрение!', 'success')
return redirect(url_for('students.activity', activity_id=activity_id))
return render_template('activity_new.html',
title='Новая активная деятельность',
active_tab='activity',
right_group=get_students_activity(),
group_id=activity_id,
form=form
)
@students.route("/activity/<string:activity_id>/doc/<string:doc_id>/update", methods=['GET', 'POST'])
@login_required
def update_activity(activity_id, doc_id):
check_on_rights()
doc = Activity.query.get_or_404(doc_id)
if doc.user_id != current_user.id:
abort(403)
form = StudentActivityForm()
sub_type_choices = get_activity_sub_type_choices(activity_id)
choices = sub_type_choices['choices']
sub_type_id = sub_type_choices['sub_type_id']
form.sub_type_id.choices = choices
if form.validate_on_submit():
doc.name = form.name.data
picture_file = save_file(form.file)
doc.file = picture_file
doc.rate_id = sub_type_id if sub_type_id else form.sub_type_id.data
db.session.commit()
flash('Ваша грамота обновлена!', 'success')
return redirect(url_for('students.activity', activity_id=activity_id))
elif request.method == 'GET':
form.name.data = doc.name
form.file.data = send_from_directory(current_app.config['UPLOAD_FOLDER'], doc.file)
if not sub_type_id:
form.sub_type_id.data = doc.rate_id
return render_template('activity_new.html',
title='Редактирование активной деятельности',
active_tab='activity',
right_group=get_students_activity(),
group_id=activity_id,
form=form
)
@students.route("/activity/<string:activity_id>/doc/<string:doc_id>/delete", methods=['GET'])
@login_required
def delete_activity(activity_id, doc_id):
check_on_rights()
doc = Activity.query.get_or_404(doc_id)
if doc.user_id != current_user.id:
abort(403)
db.session.delete(doc)
db.session.commit()
flash('Ваша грамота была удалена!', 'success')
return redirect(url_for('students.activity', activity_id=activity_id))
| sumluxgirl/flaskProject | pointraing/students/routes.py | routes.py | py | 7,148 | python | en | code | 0 | github-code | 90 |
8059893419 | import telebot
import os
import inspect
import sys
from PIL import Image
import face_recognition
import numpy as np
from io import BytesIO
import random
import sqlite3
bot = telebot.TeleBot("2147259007:AAEVsREyP6oCv5-YCxIyk45DyoTtW-4ui1s", parse_mode=None)
ORDINATA = ['Ars longa, vita brevis.',
'Per aspera ad astra.',
'Usus est optimus magister.',
'Contra malum mortis non est medicamentum in hortis.',
'Alea jacta est.',
'Non ducor, duco.',
'Etiam si omnes, ego non.']
def show_faces(face_locations, image, message):
for face_location in face_locations:
top, right, bottom, left = face_location
face_image = image[top:bottom, left:right]
pil_image = Image.fromarray(face_image)
pil_image.save('FindedFace.jpg')
photo = open('FindedFace.jpg', 'rb')
bot.send_photo(message.chat.id, photo, 'Лицо с этой фотографии:', message.id)
def get_my_dir(follow_symlinks=True):
if getattr(sys, 'frozen', False):
path = os.path.abspath(sys.executable)
else:
path = inspect.getabsfile(get_my_dir)
if follow_symlinks:
path = os.path.realpath(path)
return os.path.dirname(path)
try:
os.mkdir(get_my_dir() + os.path.sep + 'photos')
except FileExistsError:
pass
try:
os.mkdir(get_my_dir() + os.path.sep + 'voice')
except FileExistsError:
pass
@bot.message_handler(content_types=['photo'])
def handle_photo(message):
try:
file_info = bot.get_file(message.photo[len(message.photo) - 1].file_id)
downloaded_file = bot.download_file(file_info.file_path)
bot.reply_to(message, "Сейчас поищу лица на этой фотографии...")
image = np.array(Image.open(BytesIO(downloaded_file)))
face_locations = face_recognition.face_locations(image)
if len(face_locations) > 0:
src = get_my_dir() + os.path.sep + file_info.file_path
with open(src, 'wb') as new_file:
new_file.write(downloaded_file)
bot.reply_to(message, "Я вижу людей на этой фотографии, сохраню ее на сервере для дальнейших исследований")
show_faces(face_locations, image, message)
else:
bot.reply_to(message, "Кажется, на этой фотографии нет лиц... ну или человеческих лиц")
except Exception as e:
bot.reply_to(message, str(e))
@bot.message_handler(content_types=['voice'])
def handle_voice(message):
try:
con = sqlite3.connect('chatvoices.db')
cur = con.cursor()
try:
cur.execute('''CREATE TABLE voisemessages
(uid integer, voicemessage text)''')
except sqlite3.OperationalError:
pass
bot.reply_to(message, "Сохраню это на сервере и попрошу кого-нибудь перевести, ведь я понимаю только латинский "
"язык. " + ORDINATA[random.randint(0, len(ORDINATA) - 1)])
file_info = bot.get_file(message.voice.file_id)
downloaded_file = bot.download_file(file_info.file_path)
try:
os.mkdir(get_my_dir() + os.path.sep + 'voice' + os.path.sep + str(message.chat.id), mode=777)
except FileExistsError:
pass
cur.execute(f'SELECT * FROM voisemessages WHERE uid={message.chat.id}')
results = cur.fetchall()
src = get_my_dir() + os.path.sep + 'voice' + os.path.sep + str(message.chat.id) + os.path.sep + f'audio_message_{len(results)}.wav '
with open(src, 'wb') as new_file:
new_file.write(downloaded_file)
cur.execute(
f"INSERT INTO voisemessages (uid, voicemessage) VALUES ({message.chat.id}, 'audio_message_{len(results)}.wav ')")
con.commit()
except Exception as e:
bot.reply_to(message, str(e))
bot.infinity_polling()
| yamaha3212/TelegramBot | BorschevickBot.py | BorschevickBot.py | py | 4,198 | python | en | code | 0 | github-code | 90 |
17932634580 | import torch
from torch.optim import Optimizer
#Custom Adam Optimizer - extension of Optimizer class
class CustomAdam(Optimizer):
"""
A custom implementation of the Adam optimizer. Defaults used are as recommended in https://arxiv.org/abs/1412.6980
See the paper or visit Optimizer_Experimentation.ipynb for more information on how exactly Adam works + mathematics behind it.
Params:
stepsize (float): the effective upperbound of the optimizer step in most cases (size of step). DEFAULT - 0.001.
bias_m1 (float): bias for the first moment estimate. DEFAULT - 0.9
bias_m2 (float): bias for the second uncentered moment estimate, DEFAULT - 0.999.
epsilon (float): small number added to prevent division by zero, DEFAULT - 10e-8.
bias_correction (bool): whether the optimizer should correct for the specified biases when taking a step. DEFAULT - TRUE.
"""
#Initialize optimizer with parameters
def __init__(self, params, stepsize = 0.001, bias_m1 = 0.9, bias_m2 = 0.999, epsilon = 10e-8, bias_correction = True):
#Check if stepsizes and biases are invalid (negative)
if stepsize < 0:
raise ValueError("Invalid stepsize [{}]. Choose a positive stepsize".format(stepsize))
if bias_m1 < 0 or bias_m2 < 0 and bias_correction:
raise ValueError("Invalid bias parameters [{}, {}]. Choose positive bias parameters.".format(bias_m1, bias_m2))
#Declare dictionary of default values for optimizer initialization
DEFAULTS = dict(stepsize = stepsize, bias_m1 = bias_m1, bias_m2 = bias_m2, epsilon = epsilon, bias_correction = bias_correction)
#Initialize the optimizer
super(CustomAdam, self).__init__(params, DEFAULTS)
#Step method (for updating parameters)
def step(self, closure = None):
#Set loss to none
loss = None
#If the closure is set to True, set the loss to the closure function
loss = closure() if closure != None else loss
#Check if this is the first step - if not, increment the current step
if not self.state["step"]:
self.state["step"] = 1
else:
self.state["step"] += 1
#Iterate over "groups" of parameters (layers of parameters in the network) to begin processing and computing the next set of params
for param_group in self.param_groups:
#Iterate over individual parameters
for param in param_group["params"]:
#Check if gradients have been computed for each parameter
#If not - if there are no gradients - then skip the parameter
if param.grad.data == None:
continue
else: gradients = param.grad.data
#Use Adam optimization method - first, define all the required arguments for the parameter if we are on the first step
if self.state["step"] == 1:
#Set the first and second moment estimates to zeroes
self.state["first_moment_estimate"] = torch.zeros_like(param.data)
self.state["second_moment_estimate"] = torch.zeros_like(param.data)
#Declare variables from state - inplace methods modify state variable directly
first_moment_estimate = self.state["first_moment_estimate"]
second_moment_estimate = self.state["second_moment_estimate"]
#Compute the first moment estimate - B_1 * m_t + (1-B_1) * grad (uncentered)
first_moment_estimate.mul_(param_group["bias_m1"]).add_(gradients * (1.0 - param_group["bias_m1"]))
#Compute the second moment estimate - B_2 * v_t + (1-B_2) * grad^2 (uncentered)
second_moment_estimate.mul_(param_group["bias_m2"]).add_(gradients.pow_(2) * (1.0 - param_group["bias_m2"]))
#Perform bias correction if parameter is set to true
if param_group["bias_correction"]:
#Perform bias correction for the first moment estimate: m_t / (1 -B_1^t)
first_moment_estimate.divide_(1.0 - (param_group["bias_m1"] ** self.state["step"]))
#Perform bias correction for second moment estimate: v_t / (1 - B_2^t)
second_moment_estimate.divide_(1.0 - (param_group["bias_m2"] ** self.state["step"]))
#Next, perform the actual update
#Multiply the stepsize a by the quotient of the first moment estimate and the square root of the second moment estimate plus epsilon
#In other words - theta = theta_{t-1} - a * first_estimate/(sqr(second_estimate) + epsilon)
param.data.add_((-param_group["stepsize"]) * first_moment_estimate.divide_(second_moment_estimate.sqrt_() + param_group["epsilon"]))
#Return the loss
return loss | thetechdude124/Adam-Optimization-From-Scratch | CustomAdam.py | CustomAdam.py | py | 4,870 | python | en | code | 4 | github-code | 90 |
71996842858 | # def solve(string):
# for i in range (0, len(string)):
# if (string[i] == string[i + 1]):
# return True
# return False
# s = "afternoon"
# print(solve(s))
mySet = set()
def testFunction(ok, i):
if i <= 5:
ok.add(i)
testFunction(ok,i + 1)
testFunction(mySet, 0)
print(mySet) | limzhanrong/DSA | test.py | test.py | py | 317 | python | en | code | 0 | github-code | 90 |
17952780069 | N=int(input())
A = [list(map(int, input().split())) for i in range(N)]
result=0
flag=True
isbreak=False
for m in range(N-1):
for n in range(m+1,N):
list_t=list(range(N))
list_t.remove(m)
list_t.remove(n)
for t in list_t:
if A[m][n]>A[m][t]+A[t][n]:
result=-1
isbreak=True
break
elif A[m][n]==A[m][t]+A[t][n] and result!=-1:
flag=False
break
if flag and result!=-1:
result+=A[m][n]
else:
flag=True
if isbreak:
break
if isbreak:
break
print(result)
| Aasthaengg/IBMdataset | Python_codes/p03600/s021799714.py | s021799714.py | py | 675 | python | en | code | 0 | github-code | 90 |
25915898301 | n1 = float(input(('Primiro valor:')))
n2 = float(input('Segundo valor:'))
opcao = maior = 0
while opcao != 5:
print(''' [1] somar
[2] multiplicar
[3] maior valor
[4] novos numeros
[5] sair''')
opcao = int(input('Qual é a sua opção ?'))
if opcao == 1:
print('{} + {} = {}'.format(n1, n2, n1 + n2))
elif opcao == 2:
print('{} x {} = {}'.format(n1, n2, n1 * n2))
elif opcao == 3:
if n1 > n2:
maior = n1
else:
maior = n2
print('Entre {} e {} o MAIOR valor é {}'.format(n1, n2, maior))
elif opcao == 4:
print('Informe novos valores:')
n1 = int(input('Primeiro valor:'))
n2 = int(input('Segundo valor:'))
elif opcao == 5:
print('Foi bom enquanto durou ... Espero que tenha gostado')
else:
print('OPCÃO INVÁLIDA ...')
print('=-=' * 16)
print('Fim do programa ...')
# menu de opções
| celycodes/curso-python-exercicios | exercicios/ex059.py | ex059.py | py | 939 | python | pt | code | 2 | github-code | 90 |
31676541287 | ''' VAE model traning for MD data set
# the reference for the originial code:
Kingma, Diederik P., and Max Welling.
"Auto-Encoding Variational Bayes."
https://arxiv.org/abs/1312.6114
Michael Feig, Bercem Dutagaci
Michigan State University
2022
bioRxiv:
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from numpy import genfromtxt
from sklearn.preprocessing import LabelEncoder
import tensorflow as tf
from tensorflow.python.keras.layers import Lambda, Input, Dense, Flatten, Embedding, Dropout, Attention
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.datasets import mnist
from tensorflow.python.keras.losses import mse
from tensorflow.python.keras.losses import binary_crossentropy
from tensorflow.keras.utils import to_categorical, plot_model
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import Sequential
from tensorflow.python.keras import optimizers
from tensorflow.keras.optimizers import Adam, Adagrad
import tensorflow.keras.losses
import matplotlib.pyplot as plt
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir',type=str, default='model',
help='Base directory for the model.')
parser.add_argument(
'--train_epochs', type=int, default=10, help='Number of training epochs.')
parser.add_argument(
'--batch_size', type=int, default=4, help="Number of batches.")
parser.add_argument(
'--sample_number', type=int, default=135, help="Number of samples.")
parser.add_argument(
'--input_dim', type=int, default=62, help="Dimension of input parameter tensor.")
parser.add_argument(
'--optimizer', type=str, default='Adam', help='Optimizer that is either Adam or Adagrad.')
parser.add_argument(
'--learning_rate', type=float, default=0.01, help='Learning rate.')
parser.add_argument(
'--trainfile', type=str, default='allprop.csv', help='Datafile for the training set.')
parser.add_argument(
'--testfile', type=str, default='allprop.csv', help='Datafile for the test set.')
parser.add_argument(
'--output_latent', type=str, default='latent.dat', help='Write the latent space variable into a text file.')
parser.add_argument(
'--output_loss', type=str, default='loss.dat', help='Write the loss into a text file.')
parser.add_argument(
'--output_weights', type=str, default='weights.h5', help='Write the weights.')
arg = parser.parse_args()
def prepare_dist_data(trainname):
label_encoder = LabelEncoder()
train_data = genfromtxt(trainname, delimiter=',')
train_features = train_data[:,range(train_data[0].size-2)]
train_label = train_data[:,range(train_data[0].size-2,train_data[0].size-1)]
train_datamut = genfromtxt(trainname, delimiter=',', dtype='unicode')
train_mutants = train_datamut[:,range(train_datamut[0].size-1,train_datamut[0].size)]
return train_features,train_label,train_mutants
def sampling(args):
"""Reparameterization trick by sampling from an isotropic unit Gaussian.
# Arguments
args (tensor): mean and log of variance of Q(z|X)
# Returns
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
def write_results(models,
data,
batch_size=10,
model_name="vae"):
encoder, decoder = models
x_test, y_test = data
z_mean, _, _ = encoder.predict(x_test,
batch_size=batch_size)
z0 = []
z1 = []
for i in range(samplenumber):
z0.append((z_mean[:, 0][i]-(min(z_mean[:, 0])+(max(z_mean[:, 0])-min(z_mean[:, 0]))/2))*10/(max(z_mean[:, 0])-min(z_mean[:, 0])))
z1.append((z_mean[:, 1][i]-(min(z_mean[:, 1])+(max(z_mean[:, 1])-min(z_mean[:, 1]))/2))*10/(max(z_mean[:, 1])-min(z_mean[:, 1])))
for s in range(samplenumber):
print(x_mutants[s][0],'z0',z0[s],'z1',z1[s],'label',y_test[s][0],file=outputfile_latent)
outputfile_latent.close()
x_train,y_train,x_mutants = prepare_dist_data(arg.trainfile)
samplenumber=arg.sample_number
original_dim = arg.input_dim
input_shape = (original_dim,)
batch_size = arg.batch_size
epochs = arg.train_epochs
latent_dim = 2
# VAE model = encoder + decoder
# build encoder model
inputs = Input(shape=input_shape, name='encoder_input')
x = Dense(128, activation='relu')(inputs)
x = Dense(64, activation='relu')(x)
x = Dense(32, activation='relu')(x)
x = Attention()([x,x])
z_mean = Dense(latent_dim, name='z_mean')(x)
z_log_var = Dense(latent_dim, name='z_log_var')(x)
z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])
# instantiate encoder model
encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
encoder.summary()
plot_model(encoder, to_file='%s/vae_att_encoder.png'%arg.model_dir, show_shapes=True)
# build decoder model
latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
x = Dense(32, activation='relu')(latent_inputs)
x = Attention()([x,x])
x = Dense(62, activation='relu')(x)
x = Dense(128, activation='relu')(x)
outputs = Dense(original_dim, dtype = tf.float32)(x)
# instantiate decoder model
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
plot_model(decoder, to_file='%s/vae_att_decoder.png'%arg.model_dir, show_shapes=True)
outputs = decoder(encoder(inputs)[2])
vae = Model(inputs, outputs, name='vae_mlp')
os.makedirs(arg.model_dir, exist_ok=True)
outputfile_latent = open('%s/%s'%(arg.model_dir,arg.output_latent),"w")
vae.summary()
plot_model(vae,to_file='%s/vae_att_model.png'%arg.model_dir,show_shapes=True)
if __name__ == '__main__':
data = (x_train, y_train)
models = (encoder, decoder)
reconstruction_loss = mse(inputs, outputs[0])
reconstruction_loss *= original_dim
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean((reconstruction_loss + kl_loss)/1.0)
vae.add_loss(vae_loss)
if arg.optimizer=="Adagrad":
opt = Adagrad(lr=arg.learning_rate)
else:
opt = Adam(lr=arg.learning_rate)
vae.compile(optimizer=opt)
vae.summary()
modeltl = vae.fit(x_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_train, None))
vae.save_weights('%s/%s'%(arg.model_dir,arg.output_weights))
loss_history = modeltl.history["loss"]
np_loss_history = np.array(loss_history)
np.savetxt('%s/%s'%(arg.model_dir,arg.output_loss), np_loss_history, delimiter=",", fmt='%f')
write_results(models,
data,
batch_size=batch_size,
model_name=arg.model_dir)
| bercemd/PolII-mutants | ml_md_vae_training.py | ml_md_vae_training.py | py | 6,884 | python | en | code | 0 | github-code | 90 |
13118002639 | """This module contains all of the endpoints for the api."""
from http import HTTPStatus
from flask import Blueprint, jsonify, request
from flask_restful import Api
from flask_jwt_extended import verify_jwt_in_request
from app.api.resources import (
ArtistAPI,
ArtistListAPI,
ArtistByNameAPI,
VenueAPI,
VenueByNameAPI,
VenueListAPI,
ArtistImageListAPI,
PerformanceAPI,
PerformanceListAPI,
ArtistPerformanceListAPI,
UserAPI,
UserListAPI,
CrawlTaskAPI,
CrawlTaskStatusAPI,
CrawlGroupAPI
)
from app.api.schemas import (
ArtistSchema,
VenueSchema,
ImageSchema,
PerformanceSchema,
UserSchema
)
#Instantiate Blueprint and Api objects
api_blueprint = Blueprint("api", __name__, url_prefix="/api/v1")
api = Api(api_blueprint)
@api_blueprint.before_request
def before_request():
"""Before request hook for the api."""
try:
verify_jwt_in_request()
except:
return jsonify({"message": "Access token is invalid or expired."}), HTTPStatus.UNAUTHORIZED
#artist resources
api.add_resource(
ArtistListAPI,
"/artists",
resource_class_kwargs={"schema": ArtistSchema()},
endpoint="artists"
)
api.add_resource(
ArtistAPI,
"/artists/<int:artist_id>",
resource_class_kwargs={"schema": ArtistSchema()},
endpoint="artist"
)
api.add_resource(
ArtistByNameAPI,
"/artists/<name>",
resource_class_kwargs={"schema": ArtistSchema()},
endpoint="artist_by_name"
)
#image resources
api.add_resource(
ArtistImageListAPI,
"/artists/<int:artist_id>/images",
resource_class_kwargs={"schema": ImageSchema()},
endpoint="images"
)
#performance resources
api.add_resource(
PerformanceListAPI,
"/performances",
resource_class_kwargs={"schema": PerformanceSchema()},
endpoint="performances"
)
api.add_resource(
PerformanceAPI,
"/performances/<int:performance_id>",
resource_class_kwargs={"schema": PerformanceSchema()},
endpoint="performance"
)
api.add_resource(
ArtistPerformanceListAPI,
"/artists/<int:artist_id>/performances",
resource_class_kwargs={"schema": PerformanceSchema()},
endpoint="artist_performances"
)
#user resources
api.add_resource(
UserListAPI,
"/users",
resource_class_kwargs={"schema": UserSchema()},
endpoint="users"
)
api.add_resource(
UserAPI,
"/users/<int:user_id>",
resource_class_kwargs={"schema": UserSchema()},
endpoint="user"
)
#venue resources
api.add_resource(
VenueListAPI,
"/venues",
resource_class_kwargs={"schema": VenueSchema()},
endpoint="venues"
)
api.add_resource(
VenueAPI,
"/venues/<int:venue_id>",
resource_class_kwargs={"schema": VenueSchema()},
endpoint="venue"
)
api.add_resource(
VenueByNameAPI,
"/venues/<name>",
resource_class_kwargs={"schema": VenueSchema()},
endpoint="venue_by_name"
)
#Scrapy crawls executed by Celery
api.add_resource(
CrawlTaskAPI,
"/crawl",
endpoint="crawl"
)
api.add_resource(
CrawlTaskStatusAPI,
"/crawl_status/<task_id>",
endpoint="crawl_status"
)
api.add_resource(
CrawlGroupAPI,
"/group_crawl",
endpoint="group_crawl"
)
| EricMontague/MailChimp-Newsletter-Project | server/app/api/views.py | views.py | py | 3,215 | python | en | code | 0 | github-code | 90 |
19272389075 |
# JOB: open file, get sum of squares of numbers in file
# ----------------------------------------------------------------------------
def isInteger(val):
""" Returns true if string val is an integer.
Note: having floats in strings throws value error.
Arguments:
val = value to check if integer
"""
try:
maybeVal = int(val.strip())
except (TypeError, ValueError):
return False # just note that except was caught
return True
# ----------------------------------------------------------------------------
def convertToInteger(val):
"""Takes string val and converts it to int, assuming from above
that it is an integer
val = value to be convert
"""
return int(val.strip())
# ----------------------------------------------------------------------------
def getFileNumbers(fileName):
"""Opens a file, reads the contents, and returns the lines as ints in a list.
Arguments:
fileName = name of file to open
"""
# get the file
inputFile = open(fileName)
numbers = []
# convert each item to integer (if we can)
for line in inputFile:
if isInteger(line):
numbers.append(convertToInteger(line)) # add it to list
inputFile.close()
return numbers
# ----------------------------------------------------------------------------
def sumOfSquares(numbers):
"""Calculates the sum of the squares of numbers in the list numbers"""
total = 0
for num in numbers:
total += num ** 2
return total
if __name__ == "__main__":
listOfNumbers = getFileNumbers("numbers.txt")
print(sumOfSquares(listOfNumbers)) | statisticallyfit/Python | pythonlanguagetutorials/PythonTutorial/JohnZelle_PythonProgramming/Chapter6_Functions/ex14_squareNumbersFromFile.py | ex14_squareNumbersFromFile.py | py | 1,670 | python | en | code | 0 | github-code | 90 |
2657447984 | """Drop output file type from database.
Revision ID: cd0fd4a20457
Revises: 41608b05c0b1
Create Date: 2022-04-06 21:51:27.162113
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "cd0fd4a20457"
down_revision = "41608b05c0b1"
branch_labels = None
depends_on = None
def upgrade():
op.drop_column("tasks", "output_method")
op.execute("DROP TYPE IF EXISTS task_output_method")
def downgrade():
op.add_column(
"tasks",
sa.Column(
"output_method",
postgresql.ENUM("file", "video", name="task_output_method"),
autoincrement=False,
nullable=False,
),
)
| opendatalabcz/traffic-surveys-automation | backend/migrations/versions/2022-04-06-21-51-cd0fd4a20457_drop_output_file_.py | 2022-04-06-21-51-cd0fd4a20457_drop_output_file_.py | py | 735 | python | en | code | 3 | github-code | 90 |
10117337229 | import csv
def lecture(text):
'''
Renvoie une table à partir du fichier csv
param : fichier : csv file
return : list
>>> lecture('pokemon.csv')[0]
['Clic', '60', '80', '95', '50', 'Acier']
'''
file=open(text,'r')
table=[]
for ligne in file:
table.append(ligne.rstrip().split(';'))
file.close
del table[0]
return table
def distance(p1,p2):
'''
Renvoie la distance entre deux pokemons p1 et p2
param : pokemon1 : list
param : pokemon2 : list
return : float
>>> distance(['Clic', '60', '80', '95', '50', 'Acier'],['Tic', '40', '55', '70', '30', 'Acier'])
45.27692569068709
'''
return ((int(p1[1])-int(p2[1]))**2+(int(p1[2])-int(p2[2]))**2+(int(p1[3])-int(p2[3]))**2+(int(p1[4])-int(p2[4]))**2)**0.5
def critere(donnee):
"""
Renvoie la deuxième valeur de la donnée
param : donnee : tuple
return : int
>>> critere((42,15))
15
"""
return donnee[1]
def K_plus_proches_voisins(text,p,k):
'''
Renvoie la liste des k plus proches voisins de pokemon
param : text : file
param : p : list
param : k : int
return : list formée des tuples (indices,distance)
>>> K_plus_proches_voisins('pokemon.csv',['Tic', '40', '55', '70', '30', 'Acier'],3)
[(42, 16.09347693943108), (274, 17.635192088548397), (44, 18.303005217723125)]
'''
liste=[ (i,distance(lecture(text)[i],p)) for i in range(len(lecture(text)))]
liste_triee=sorted(liste,key=critere)
resultat=[liste_triee[i] for i in range(k+1)]
del resultat[0]
return resultat
def renvoie_type(text,p,k):
'''
Renvoie le type correspondant à p
param : text : file
param : p : list
param : k : int
return : str
>>> renvoie_type('pokemon.csv',['Krabby','30','105','90','50','Eau'],20)
'Eau'
'''
types=['Acier','Combat','Dragon','Eau','Electrik','Fée','Feu','Glace','Insecte','Normal','Plante','Poison','Psy','Roche','Sol','Spectre','Ténèbres','Vol']
comptes=[0 for i in range(len(types))]
i=0
for type_pokemon in types:
for valeur in K_plus_proches_voisins(text,p,k):
if lecture(text)[valeur[0]][5]==type_pokemon:
comptes[i]+=1
i+=1
return types[comptes.index(max(comptes))]
def creation_dictionnaire(text):
"""
Renvoie un dictionnaire dont la clé est le nom du pokemon
param: text : fichier
return : dict
>>> creation_dictionnaire('pokemon.csv')['Clic']
('60', '80', '95', '50', 'Acier')
"""
liste=lecture(text)
dictionnaire={liste[i][0]:(liste[i][1],liste[i][2],liste[i][3],liste[i][4],liste[i][5]) for i in range(len(liste))}
return dictionnaire
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS, verbose=True) | VLesieux/NSI-Premiere | Projet_7_Pokemon/correction_projet_pokemon.py | correction_projet_pokemon.py | py | 2,858 | python | fr | code | 1 | github-code | 90 |
24715359292 | #!/usr/bin/env python3
import os
import sys
import re
import requests
import shutil
import subprocess
from time import sleep
URL_WHITELIST = "https://static.fclaude.net/whitelist-master.txt"
URL_SUBDOM = "https://static.fclaude.net/whitelist-subdoms.txt"
URL_IPADDR = "https://static.fclaude.net/whitelist-ipaddrs.txt"
TRACKER_IPADDR = '/var/www/static/whitelist-ipaddrs.txt'
TRACKER_SUBDOM = '/var/www/static/whitelist-subdoms.txt'
BIN_FINDOMAIN = '/home/fclaude/.local/bin/findomain'
BIN_SUBFINDER = '/home/fclaude/.local/bin/subfinder'
BIN_AMASS = '/home/fclaude/.local/bin/amass'
SLEEP_RSLV = 0.10
SLEEP_ENUM = 10.00
URL_REGEX = re.compile(r"[^\n]+")
IP_REGEX = re.compile(r"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)")
def check_env():
if shutil.which('findomain') is None:
print("ERROR :: Findomain not found in path")
sys.exit(1)
if shutil.which('amass') is None:
print("ERROR :: Amass not found in path")
sys.exit(1)
if shutil.which('subfinder') is None:
print("ERROR :: Subfinder not found in path")
sys.exit(1)
def dedupe_sort(input_list):
deduped_list = []
for element in input_list:
if element not in deduped_list and element != '127.0.0.1' and element != '0.0.0.0':
deduped_list.append(element)
return sorted(deduped_list)
def resolve(subdomains):
ips = []
new_ips = []
for subdomain in subdomains:
# pihole
print("resolving subdomain with pihole: " + subdomain)
dig_pihole = os.popen('dig @10.114.27.1 +short +tries=1 timeout=3 ' + subdomain)
new_ips.extend([new.group() for new in re.finditer(IP_REGEX, dig_pihole.read())])
sleep(SLEEP_RSLV)
# quad9
print("resolving subdomain with quad9: " + subdomain)
dig_quad9 = os.popen('dig @9.9.9.9 +short +tries=1 timeout=3 ' + subdomain)
new_ips.extend([new.group() for new in re.finditer(IP_REGEX, dig_quad9.read())])
sleep(SLEEP_RSLV)
# opendns
print("resolving subdomain with opendns: " + subdomain)
dig_mullvad = os.popen('dig @208.67.222.222 +short +tries=1 timeout=3 ' + subdomain)
new_ips.extend([new.group() for new in re.finditer(IP_REGEX, dig_mullvad.read())])
sleep(SLEEP_RSLV)
# dnswatch
print("resolving subdomain with cloudflare: " + subdomain)
dig_dnswatch = os.popen('dig @1.1.1.1 +short +tries=1 timeout=3 ' + subdomain)
new_ips.extend([new.group() for new in re.finditer(IP_REGEX, dig_dnswatch.read())])
sleep(SLEEP_RSLV)
# google
print("resolving subdomain with google: " + subdomain)
dig_adguard = os.popen('dig @8.8.8.8 +short +tries=1 timeout=3 ' + subdomain)
new_ips.extend([new.group() for new in re.finditer(IP_REGEX, dig_adguard.read())])
sleep(SLEEP_RSLV)
# dedupe list
ips = dedupe_sort(new_ips)
new_ips.clear()
subdomains.clear()
# write new masterlist to file
ip_masterlist = open(TRACKER_IPADDR, "w")
for ip in ips:
ip_masterlist.writelines(ip + '\n')
ip_masterlist.close()
def scan(tlds):
subdomains = []
new_subdomains = []
# import existing url masterlist into array
response = requests.get(URL_SUBDOM)
if str(response.status_code).startswith("2"):
new_subdomains.extend([new.group() for new in re.finditer(URL_REGEX, response.text)])
# enumerate tld subdomains
for tld in tlds:
# findomain
print("enumerating subdomains using findomain: " + tld)
findomain_cmd = [ BIN_FINDOMAIN, "--rate-limit", "1", "--tcp-connect-threads", "1", "--resolver-timeout", "60", "--quiet", "--target", tld ]
new_subdomains.extend([new.group() for new in re.finditer(URL_REGEX, subprocess.check_output(findomain_cmd, timeout=300, encoding='utf8'))])
sleep(SLEEP_ENUM)
# amass
print("enumerating subdomains using amass: " + tld)
amass_cmd = [ BIN_AMASS, "enum", "-nocolor", "-passive", "-dns-qps", "1", "-timeout", "10", "-d", tld ]
new_subdomains.extend([new.group() for new in re.finditer(URL_REGEX, subprocess.check_output(amass_cmd, stderr=subprocess.DEVNULL, timeout=300, encoding='utf8'))])
sleep(SLEEP_ENUM)
# subfinder
print("enumerating subdomains using subfinder: " + tld)
subfinder_cmd = [ BIN_SUBFINDER, "-no-color", "-silent", "-rate-limit", "1", "-max-time", "30", "-timeout", "120", "-exclude-ip", "-all", "-t", "1", "-d", tld ]
new_subdomains.extend([new.group() for new in re.finditer(URL_REGEX, subprocess.check_output(subfinder_cmd, timeout=300, encoding='utf8'))])
sleep(SLEEP_ENUM)
# dedupe list
subdomains = dedupe_sort(new_subdomains)
new_subdomains.clear()
tlds.clear()
# write new masterlist to file
subdom_masterlist = open(TRACKER_SUBDOM, "w")
for subdomain in subdomains:
subdom_masterlist.writelines(subdomain + '\n')
subdom_masterlist.close()
# pass subdomains to resolver
resolve(subdomains)
def pull_tlds():
tlds = []
response = requests.get(URL_WHITELIST)
if str(response.status_code).startswith("2"):
tlds.extend([url.group() for url in re.finditer(URL_REGEX, response.text)])
scan(tlds)
def main():
#check_env()
pull_tlds()
if __name__ == "__main__":
main()
| francois-claude/tracker_ip_finder | resolv.py | resolv.py | py | 5,442 | python | en | code | 0 | github-code | 90 |
73979163495 | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
path('gallery', views.gallery, name='gallery'),
path('generate', views.generate, name='generate'),
path('generate_prompt', views.generate_prompt, name='generate_prompt'),
path('random_prompt', views.random_prompt, name='random_prompt'),
path('fetch_post', views.fetch_post, name='fetch_post'),
path('prompts', views.prompts, name='prompts'),
path("load_prompts", views.load_prompts, name="load_prompts"),
path("profile/<str:username>", views.profile, name="profile"),
path("edit_profile", views.edit_profile, name="edit_profile"),
]
| xuche123/capstone | imagine/urls.py | urls.py | py | 836 | python | en | code | 0 | github-code | 90 |
38736699070 | import cv2
import numpy as np
import torch
#---CLAHE transform---
def clahe(img, clip_limit=2.0, tile_grid_size=(8, 8)):
if img.dtype != np.uint8:
raise TypeError("clahe supports only uint8 inputs")
clahe_mat = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=tile_grid_size)
if len(img.shape) == 2 or img.shape[2] == 1:
img = clahe_mat.apply(img)
else:
img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
img[:, :, 0] = clahe_mat.apply(img[:, :, 0])
img = cv2.cvtColor(img, cv2.COLOR_LAB2RGB)
return img
#---blend image---
def mix_pixel(pix_1, pix_2, perc):
return (perc/255 * pix_1) + ((255 - perc)/255 * pix_2)
def blend_images_using_mask(img_orig, img_for_overlay, img_mask):
if len(img_mask.shape) != 3:
img_mask = cv2.cvtColor(img_mask, cv2.COLOR_GRAY2BGR)
img_res = mix_pixel(img_orig, img_for_overlay, img_mask)
return img_res.astype(np.uint8)
def blending_images(x1: np.ndarray, x2: np.ndarray, alpha: np.float32):
return np.uint8((alpha * x1) + ((1.-alpha) * x2))
def blending_images_cv2(src1, src2, alpha):
return cv2.addWeighted(src1, alpha, src2, 1.-alpha, 0.0)
#---ZCA image whitening (prototype)---
class ZCATransformation(object):
def __init__(self, transformation_matrix, transformation_mean):
if transformation_matrix.size(0) != transformation_matrix.size(1):
raise ValueError("transformation_matrix should be square. Got " +
"[{} x {}] rectangular matrix.".format(*transformation_matrix.size()))
self.transformation_matrix = transformation_matrix
self.transformation_mean = transformation_mean
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (N, C, H, W) to be whitened.
Returns:
Tensor: Transformed image.
"""
if tensor.size(1) * tensor.size(2) * tensor.size(3) != self.transformation_matrix.size(0):
raise ValueError("tensor and transformation matrix have incompatible shape." +
"[{} x {} x {}] != ".format(*tensor[0].size()) +
"{}".format(self.transformation_matrix.size(0)))
batch = tensor.size(0)
flat_tensor = tensor.view(batch, -1)
transformed_tensor = torch.mm(flat_tensor - self.transformation_mean, self.transformation_matrix)
tensor = transformed_tensor.view(tensor.size())
return tensor
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += (str(self.transformation_matrix.numpy().tolist()) + ')')
return format_string
#---zca simplified---
#batchrun
def zca_batch(x):
"""Computes ZCA transformation for the dataset.
Args:
x: dataset.
Returns:
ZCA transformation matrix and mean matrix.
"""
[B, C, H, W] = list(x.size())
x = x.reshape((B, C*H*W)) # flatten the data
x = x - torch.mean(x, dim=0, keepdim=True)
#covariance = torch.matmul(x.transpose(0, 1), x) / B
covariance = x.t() @ x
#U, S, V = np.linalg.svd(covariance.cpu().detach())
U, S, V = torch.linalg.svd(covariance)
eps = 1e-1
#W = np.matmul(np.matmul(U, np.diag(1. / np.sqrt(S + eps))), U.T)
W = torch.matmul(torch.matmul(U, torch.diag(1. / torch.sqrt(S + eps))), U.T)
#return torch.Tensor(W)
return W
#single image --- Unable to bear on overload processing, may crash the memory usage---
def zca_image(x):
"""Computes ZCA transformation for the dataset.
Args:
x: dataset.
Returns:
ZCA transformation matrix and mean matrix.
"""
#[C, H, W] = list(x.size())
C, H, W = x.shape
x = x.reshape(C*H*W) # flatten the data
mean = np.mean(x, axis=0, keepdims=True)
x -= mean
covariance = x.reshape(-1, 1) @ x.reshape(1, -1)
U, S, V = np.linalg.svd(covariance)
eps = 1e-3
W = np.matmul(np.matmul(U, np.diag(1. / np.sqrt(S + eps))), U.T)
x_whiten = W @ x
return x_whiten
#---exemplary application---
#image-level
#W1= transform_img.zca_batch(X1)
#W2= transform_img.zca_batch(X2)
#X1, X2 = torch.matmul(X1[0:opt['mini_batch_size_g_h']].reshape((31, 3*224*224)), W1),\
# torch.matmul(X2[0:opt['mini_batch_size_g_h']].reshape((31, 3*224*224)), W2)
#X1, X2 = X1.reshape((31, 3,224,224)), X2.reshape((31, 3,224,224))
#feature-level
#W1 = transform_img.zca_batch(encoder_X1)
#W2 = transform_img.zca_batch(encoder_X2)
#X1 = torch.matmul(encoder_X1, W1)
#X2 = torch.matmul(encoder_X2, W2) | pjirayu/STOS | utils/transform_img.py | transform_img.py | py | 4,708 | python | en | code | 1 | github-code | 90 |
25494840664 |
import json
import requests
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts=["http://127.0.0.1:9200"])
headers = {
"Content-Type": "application/json",
"Authorization": "JWT eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJlbWFpbCI6IiIsImV4cCI6MTUxNzU4ODQ4OCwidXNlcl9pZCI6MSwidXNlcm5hbWUiOiJsaXNpIn0.OEpoYSuprzlFREuv7W7BHBIKsTakZy2eZr-rtowAfMU"
}
data = {
"indices": ["test_22", "test_12"],
"size": 20,
"sort": {
"age": "desc",
"last_login": "desc"
}
}
res = requests.post("http://127.0.0.1:8000/api/v1/search/data", headers=headers, data=json.dumps(data))
res_data = res.json()
print(res_data)
for i in res_data["hits"]:
print(i["_source"]["age"], i["_source"]["last_login"]) | open-cmdb/cmdb | apps/c_test/test/test-6.py | test-6.py | py | 717 | python | en | code | 966 | github-code | 90 |
22597710518 | import requests
from apiclient.discovery import build
from apiclient.errors import HttpError
class LelKekBot:
def __init__(self,token):
self.token = token
self.apiUrl = "https://api.telegram.org/bot{}/".format(token)
def getUpdates(self,offset=None, timeout = 30):
method = 'getUpdates'
params = {'timeout':timeout,'offset':offset}
resp = requests.get(self.apiUrl + method,params)
resultJson = resp.json()['result']
return resultJson
def getLastUpdate(self):
getResult = self.getUpdates()
if len(getResult)>0:
lastUpdate = getResult[-1]
else:
lastUpdate = 0
return lastUpdate
def sendMess(self, chatId, text):
params = {"chat_id":chatId, "text":text}
method = 'sendMessage'
response = requests.post(self.apiUrl+method,params)
def getVideo(self, query, where):
searchResponse = where.search().list(
q = query,
part='snippet',
maxResults=5
).execute()
for searchRes in searchResponse.get('items',[]):
if searchRes['id']['kind']=='youtube#video':
return searchRes['id']['videoId']
return 'cocиписос'
token = "700076471:AAG9HAUfDiPKH4QWaD995F68Pga2cE2q5KA"
bot = LelKekBot(token)
DEVELOPER_KEY = 'AIzaSyD3q2zJLDca75xklszgqsMsZIONdR9nUwA'
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
def main():
newOffset = None
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
while True:
bot.getUpdates(newOffset)
lastUpdate = bot.getLastUpdate()
if lastUpdate ==0:continue
lastUpdateId = lastUpdate['update_id']
lastChatText = lastUpdate['message']['text']
lastChatId = lastUpdate['message']['chat']['id']
#lastChatName = lastUpdate['message']['chat']['first_name']
lastChatTextSplit = lastChatText.split(maxsplit=1)
if lastChatTextSplit[0].lower() == '!vid' and len(lastChatTextSplit)>1:
h = bot.getVideo(lastChatTextSplit[1], youtube)
bot.sendMess(lastChatId, "http://youtube.com/watch?v="+h)
newOffset = lastUpdateId +1
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit()
| Kilotary/lel | lolkekbot.py | lolkekbot.py | py | 2,444 | python | en | code | 0 | github-code | 90 |
13029472045 | # 3'. Задайте список из вещественных чисел.
# Напишите программу, которая найдёт разницу между максимальным и минимальным значением дробной части элементов.
# *Пример:*
# - [1.1, 1.2, 3.1, 5, 10.01] => 0.19
from os import system
from random import randint
import random
system("cls")
n = int(input(" сколько в саписке элементов? "))
testList = []
def CreateListFloat(n,testList):
for i in range(0,n):
testList.append(randint(0, 2)+ round(random.random(),2))
print(testList)
def F(list = testList):
tempList = []
for i in testList:
tempList.append(round(i%1,2))
testlist = tempList
testlist.sort(reverse = True)
print(testlist)
mi_n = testlist[len(testlist)-1]
ma_x = testlist[0]
res = ma_x-mi_n
print(f"разница {ma_x} и {mi_n} = {round(res,2)}")
F(CreateListFloat(n,testList))
| AH1N/PythonHomeWork | HW_3_28.11.2022.py | HW_3_28.11.2022.py | py | 1,025 | python | ru | code | 0 | github-code | 90 |
13087678350 | import json
from twisted.internet import reactor
from binance.websocket.binance_socket_manager import BinanceSocketManager
class BinanceWebsocketClient(BinanceSocketManager):
def __init__(self, stream_url):
super().__init__(stream_url)
def stop(self):
try:
self.close()
finally:
reactor.stop()
def _single_stream(self, stream):
if isinstance(stream, str):
return True
elif isinstance(stream, list):
return False
else:
raise ValueError("Invalid stream name, expect string or array")
def live_subscribe(self, stream, id, callback, **kwargs):
"""live subscribe websocket
Connect to the server
- SPOT: wss://stream.binance.com:9443/ws
- SPOT testnet : wss://testnet.binance.vision/ws
and sending the subscribe message, e.g.
{"method": "SUBSCRIBE","params":["btcusdt@miniTicker"],"id": 100}
"""
combined = False
if self._single_stream(stream):
stream = [stream]
else:
combined = True
data = {"method": "SUBSCRIBE", "params": stream, "id": id}
data.update(**kwargs)
payload = json.dumps(data, ensure_ascii=False).encode("utf8")
stream_name = "-".join(stream)
return self._start_socket(
stream_name, payload, callback, is_combined=combined, is_live=True
)
def instant_subscribe(self, stream, callback, **kwargs):
"""Instant subscribe, e.g.
wss://stream.binance.com:9443/ws/btcusdt@bookTicker
wss://stream.binance.com:9443/stream?streams=btcusdt@bookTicker/bnbusdt@bookTicker
"""
combined = False
if not self._single_stream(stream):
combined = True
stream = "/".join(stream)
data = {"method": "SUBSCRIBE", "params": stream}
data.update(**kwargs)
payload = json.dumps(data, ensure_ascii=False).encode("utf8")
stream_name = "-".join(stream)
return self._start_socket(
stream_name, payload, callback, is_combined=combined, is_live=False
)
| June911/WithdrawFromBinance | binance/websocket/websocket_client.py | websocket_client.py | py | 2,166 | python | en | code | 5 | github-code | 90 |
18344592019 | from itertools import accumulate
N, K = map(int, input().split())
S = input()
q = []
pre = S[0]
ans = cnt = 0
for s in S:
if pre != s:
q.append(cnt)
cnt = 1
pre = s
else:
cnt += 1
q.append(cnt)
J = 2*K+1
acc = list(accumulate(q))
acc += [acc[-1]]
if len(q) <= J:
ans = acc[-1]-1
else:
ans = acc[J-1]-1 + acc[-1]-acc[J]-(len(q)-J)
for i, a in enumerate(acc):
if i+J < len(q):
tot = acc[i+J]-acc[i]-1
tot += acc[i]+acc[-1]-acc[i+J+1]-(len(q)-J)
if tot > ans:
ans = tot
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02918/s453647133.py | s453647133.py | py | 593 | python | en | code | 0 | github-code | 90 |
9342415473 | '''
Description: https://blog.csdn.net/weixin_44128857/article/details/117445420
Author: HCQ
Company(School): UCAS
Email: 1756260160@qq.com
Date: 2021-08-03 12:41:16
LastEditTime: 2021-10-17 20:29:10
FilePath: /PCDet/pcdet/datasets/huituo/robosense/robosense_dataset.py
'''
import numpy as np
import copy
import pickle
import os
import json
import numpy as np
import pcl
import pandas
import sys
import random
from skimage import io
from ...dataset import DatasetTemplate
from ....ops.roiaware_pool3d import roiaware_pool3d_utils
from ....utils import box_utils, common_utils
from pathlib import Path
class RobosenseDataset(DatasetTemplate):
def __init__(self,dataset_cfg,class_names,training= True, root_path=None,logger = None):
#参数:配置文件dataset_cfg, 要分类的类名class_names, 是否为训练training= True,
# 数据集的路径root_path=None, 日志文件logger = None
# 这里由于是类继承的关系,所以root_path在父类中已经定义
#print("即将运行初始化")
super().__init__(
dataset_cfg = dataset_cfg,class_names=class_names,
training = training, root_path = root_path,logger = logger
)
self.robosense_infos =[]
#用于存放文件路径的列表
self.files_list_pcd = []
self.files_list_label = []
self.files_list_label_train = []
self.files_list_label_val = []
self.files_list_pcd_train = []
self.files_list_pcd_val = []
self.train_ratio_of_all_labels=self.dataset_cfg.TRAIN_RATIO_OF_ALL_LABELS
self.split = self.dataset_cfg.DATA_SPLIT[self.mode]
self.include_robosense_data(self.mode)
def include_robosense_data(self,mode):
if self.logger is not None:
self.logger.info('Loading robosense dataset')
robosense_infos =[]
'''
INFO_PATH:{
'train':[robosense_infos_train.pkl],
'test':[robosense_infos_val.pkl],}
'''
for info_path in self.dataset_cfg.INFO_PATH[mode]:
info_path = str(self.root_path)+'/'+ info_path
#info_path = self.root_path/ info_path
if not Path(info_path).exists():
continue
with open(info_path,'rb') as f:
infos = pickle.load(f)
robosense_infos.extend(infos)
self.robosense_infos.extend(robosense_infos)
if self.logger is not None:
self.logger.info('Total samples for robosense dataset: %d'%(len(robosense_infos)))
#根据数据地址的路径,获取路径下 文件夹的名字列表
def get_folder_list(self,root_path):
folder_list = []
root_path =root_path
#读取该目录下所有文件夹的名字,并组成一个列表
folder_list = os.listdir(root_path)
return folder_list
#根据文件夹的列表,返回包含所有文件名的列表 files_list_pcd 和files_list_label
def get_files_name_list(self):
folder_list = []
folder_list = self.get_folder_list(self.root_path)
files_list_pcd = []
files_list_label = []
for per_folder in folder_list:
#一条路的文件夹的路径one_road_path
one_road_path = str(self.root_path+per_folder+'/')
#一条路下文件夹下的文件列表 one_road_list =['label','pcd']
one_road_list = self.get_folder_list(one_road_path)
for one_folder in one_road_list:
if one_folder == 'pcd':
pcd_path = str(one_road_path+one_folder)
if one_folder == 'label':
label_path = str(one_road_path+one_folder)
#获取pcd文件夹下面的文件名,并将文件的完整路径添加到列表里
pcd_files = self.get_folder_list(pcd_path)
for thisfile in pcd_files:
if thisfile.endswith(".pcd"):
files_list_pcd.append(str(pcd_path+'/'+thisfile))
#获取label文件夹下面的文件名,并将文件的完整路径添加到列表里
label_files = self.get_folder_list(label_path)
for thisfile in label_files:
if thisfile.endswith(".json"):
files_list_label.append(str(label_path +'/'+ thisfile))
#返回files_list_pcd和files_list_label的列表,
# 该列表内包含了所有pcd和label文件的路径名
return files_list_pcd,files_list_label
#根据label的路径,得到对应的pcd路径
def from_label_path_to_pcd_path(self,single_label_path):
#根据label的路径,推出来pcd相应的路径,两者在倒数第二个文件夹不同
single_pcd_path = ''
strl1 = 'label'
strl2 = '.json'
if strl1 in single_label_path:
single_pcd_path = single_label_path.replace(strl1,'pcd')
if strl2 in single_pcd_path:
single_pcd_path = single_pcd_path.replace(strl2,'.pcd')
#由此得到了label对应的pcd文件的路径 :single_pcd_path
return single_pcd_path
# 根据label文件路径列表,返回所有标签的数据
def get_all_labels(self,num_workers = 4,files_list_label=None):
import concurrent.futures as futures
#根据一个label文件的路径single_label_path,获取该文件内的信息
#信息包括:type, center ,size,rotation,id等信息
global i
i =0
def get_single_label_info(single_label_path):
global i
i=i+1
single_label_path = single_label_path
#打开文件
with open(single_label_path,encoding = 'utf-8') as f:
labels = json.load(f)
#定义一个空字典,用于存放当前帧label所有objects中的信息
single_objects_label_info = {}
single_objects_label_info['single_label_path'] = single_label_path
single_objects_label_info['single_pcd_path'] = self.from_label_path_to_pcd_path(single_label_path)
single_objects_label_info['name'] = np.array([label['type'] for label in labels['labels']])
single_objects_label_info['box_center'] = np.array([[label['center']['x'], label['center']['y'],label['center']['z']] for label in labels['labels']])
single_objects_label_info['box_size'] = np.array([[label['size']['x'],label['size']['z'],label['size']['z']] for label in labels['labels']])
single_objects_label_info['box_rotation'] = np.array([[label['rotation']['roll'],label['rotation']['pitch'],label['rotation']['yaw']] for label in labels['labels']])
single_objects_label_info['tracker_id'] = np.array([ label['tracker_id'] for label in labels['labels']])
box_center = single_objects_label_info['box_center']
box_size = single_objects_label_info['box_size']
box_rotation = single_objects_label_info['box_rotation']
rotation_yaw = box_rotation[:,2].reshape(-1,1)
gt_boxes = np.concatenate([box_center,box_size,rotation_yaw],axis=1).astype(np.float32)
single_objects_label_info['gt_boxes'] = gt_boxes
print("The current processing progress is %d / %d "%(i,len(files_list_label)))
return single_objects_label_info
files_list_label = files_list_label
with futures.ThreadPoolExecutor(num_workers) as executor:
infos = executor.map(get_single_label_info,files_list_label)
infos = list(infos)
print("*****************************Done!***********************")
print("type of infos :",type(infos))
print("len of infos :",len(infos))
#此时的infos是一个列表,列表里面的每一个元素是一个字典,
#每个元素里面的内容是当前帧的信息
return infos
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.robosense_infos) * self.total_epochs
return len(self.robosense_infos)
#去掉一帧里面无效的点云数据
def remove_nan_data(self,data_numpy):
data_numpy = data_numpy
data_pandas = pandas.DataFrame(data_numpy)
#删除任何包含nan的所在行 (实际有三分之一的数据无效,是[nan, nan, nan, 0.0])
data_pandas = data_pandas.dropna(axis=0,how='any')
data_numpy = np.array(data_pandas)
return data_numpy
#根据每一帧的pcd文件名和路径single_pcd_path,
# 得到这一帧中的点云数据,返回点云的numpy格式(M,4)==============================================================
# !!!!对比参考:pcdet/datasets/kitti/kitti_dataset.py def process_single_scene(sample_idx):
def get_single_pcd_info(self,single_pcd_path):
single_pcd_path = single_pcd_path
single_pcd_points = pcl.load_XYZI(single_pcd_path)
#将点云数据转化为numpy格式
single_pcd_points_np = single_pcd_points.to_array()
#去掉一帧点云数据中无效的点
single_pcd_points_np = self.remove_nan_data(single_pcd_points_np)
#print(single_pcd_points_np)
#将点云数据转化为list格式
#single_pcd_points_list =single_pcd_points.to_list()
return single_pcd_points_np
# 根据名字,去掉相应的信息,主要针对single_objects_label_info
# single_objects_label_info 里关于‘unknown’的数据信息
def drop_info_with_name(self,info,name):
ret_info = {}
info = info
keep_indices =[ i for i,x in enumerate(info['name']) if x != name]
for key in info.keys():
if key == 'single_label_path' or key == 'single_pcd_path':
ret_info[key] = info[key]
continue
ret_info[key] = info[key][keep_indices]
return ret_info
#根据训练列表label的数据,得到对应的pcd的路径列表list
def from_labels_path_list_to_pcd_path_list(self,labels_path_list):
pcd_path_list = []
for m in labels_path_list:
pcd_path_list.append(self.from_label_path_to_pcd_path(m))
return pcd_path_list
#实现列表相减的操作,从被减数list_minute中去掉减数list_minus的内容
def list_subtraction(self,list_minute,list_minus):
list_difference = []
for m in list_minute:
if m not in list_minus:
list_difference.append(m)
return list_difference
def __getitem__(self,index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.robosense_infos)
single_objects_label_info = copy.deepcopy(self.robosense_infos[index])
single_label_path = single_objects_label_info['single_label_path']
single_pcd_path = self.from_label_path_to_pcd_path(single_label_path)
#得到点云数据,且是有效的点云数据,返回点云的numpy格式(M,4)
points = self.get_single_pcd_info(single_pcd_path)
#定义输入数据的字典,包含:points,文件的路径,。。?
input_dict = {
'points': points, # 点云Mx4
'frame_id': single_pcd_path,
'single_pcd_path':single_pcd_path,
}
# 在single_objects_label_info字典里,剔除关于'unknown' 的信息
single_objects_label_info = self.drop_info_with_name(info=single_objects_label_info,name='unknown')
name =single_objects_label_info['name'] #(N,)
box_center = single_objects_label_info['box_center'] #(N,3)
box_size = single_objects_label_info['box_size'] #(N,3)
box_rotation = single_objects_label_info['box_rotation'] #(N,3)
tracker_id = single_objects_label_info['tracker_id'] #(N,)
#以下是将 上面的3D框的数据 转化为统一的数据格式
#数据格式为:(N,7),分别代表 (N, 7) [x, y, z, l, h, w, r]
# gt_boxes: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center"""
rotation_yaw = box_rotation[:,2].reshape(-1,1)
gt_boxes = np.concatenate([box_center,box_size,rotation_yaw],axis=1).astype(np.float32)
#print(gt_boxes.shape)
#print(type(gt_boxes))
input_dict.update({
'gt_names':name,
'gt_boxes':gt_boxes,
'tracker_id':tracker_id
})
#print(input_dict)
# 将点云与3D标注框均转至统一坐标定义后,送入数据基类提供的 self.prepare_data()
#data_dict = input_dict
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
#由文件的完整路径得到文件的名字(去掉多余的信息)
def from_filepath_get_filename(self,filepath):
filename = ''
filepath = filepath
#得到一个元祖tuple,(目录,文件名)
filepath_and_filename = os.path.split(filepath)
filename = filepath_and_filename[1]
#得到文件名+后缀,得到一个元祖tuple,(文件名,后缀)
filename_and_extension = os.path.splitext(filename)
filename = filename_and_extension[0]
return filename
#
def create_groundtruth_database(self,info_path = None,used_classes =None,split = 'train'):
import torch
#database_save_path = str(Path(self.root_path))+'/'+('gt_database' if split =='train' else ('gt_database_%s'%split))
#db_info_save_path = str(Path(self.root_path))+'/'+('robosense_dbinfos_%s.pkl'%split)
database_save_path = Path(self.root_path)/('gt_database' if split =='train' else ('gt_database_%s'%split))
db_info_save_path = Path(self.root_path)/('robosense_dbinfos_%s.pkl'%split)
database_save_path.mkdir(parents=True,exist_ok=True)
all_db_infos = {}
with open(info_path,'rb') as f:
infos = pickle.load(f)
for k in range(len(infos)):
print('gt_database sample:%d/%d'%(k+1,len(infos)))
info = infos[k]
#print("---------------去掉unknown之前的info--------------")
#print(info)
#去掉信息中 unknown的类别的信息
info = self.drop_info_with_name(info=info,name='unknown')
#print("---------------去掉unknown之后的info--------------")
#print(info)
single_label_path = info['single_label_path']
single_pcd_path = info['single_pcd_path']
points = self.get_single_pcd_info(single_pcd_path)
#由文件的完整路径得到文件的名字(去掉多余的信息),方便后续的文件命名
single_filename = self.from_filepath_get_filename(single_label_path)
name = info['name']
box_center = info['box_center']
box_size = info['box_size']
box_rotation = info['box_rotation']
tracker_id = info['tracker_id']
gt_boxes = info['gt_boxes']
#num_obj是有效物体的个数
num_obj = len(name)
#对参数的处理:首先转为tensor格式(M,3)(N,7)
##返回一个“全零"(后面又运行了一个cuda的函数,故值可能会变化)的张量,
# 维度是(N,M), N是有效物体的个数,M是点云的个数,在转化为numpy
#point_indices意思是点的索引
point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:,0:3]),torch.from_numpy(gt_boxes)
).numpy() # (nboxes, npoints)
for i in range(num_obj):
filename = '%s_%s_%d.bin'%(single_filename,name[i],i)
filepath = database_save_path / filename
#point_indices[i] > 0得到的是一个[T,F,T,T,F...]之类的真假索引,共有M个
#再从points中取出相应为true的点云数据,放在gt_points中
gt_points = points[point_indices[i]>0]
#gt_points中每个的前三列数据
# 又都减去gt_boxes中当前物体的前三列的位置信息
gt_points[:, :3] -= gt_boxes[i, :3]
#把gt_points 的信息写入文件里
with open(filepath,'w') as f:
gt_points.tofile(f)
if (used_classes is None) or name[i] in used_classes:
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
#获取当前物体的信息
db_info = {
'name':name[i],'path':db_path,'image_idx':single_filename,
'gt_idx':i,'box3d_lidar':gt_boxes[i],'num_points_in_gt':gt_points.shape[0],
'box_center':box_center,'box_size':box_size,'box_rotation':box_rotation,'tracker_id':tracker_id
}
if name[i] in all_db_infos:
all_db_infos[name[i]].append(db_info)
else:
all_db_infos[name[i]] = [db_info]
for k,v in all_db_infos.items():
print('Database %s: %d'%(k,len(v)))
with open(db_info_save_path,'wb') as f:
pickle.dump(all_db_infos,f)
#在 self.generate_prediction_dicts()中接收模型预测的
# 在统一坐标系下表示的3D检测框,并转回自己所需格式即可。
@staticmethod
def generate_prediction_dicts(batch_dict,pred_dicts,class_names,output_path = None):
'''
To support a custom dataset, implement this function to receive the predicted results from the model, and then
transform the unified normative coordinate to your required coordinate, and optionally save them to disk.
要支持自定义数据集,请实现此功能以接收来自模型的预测结果,
然后将统一的标准坐标转换为所需的坐标,然后选择将其保存到磁盘。
Args:
batch_dict: dict of original data from the dataloader
pred_dicts: dict of predicted results from the model
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path: if it is not None, save the results to this path
Returns:
'''
#获取预测后的模板字典 ret_dict,全部定义为全零的向量
#参数num_samples 是这一帧里面的物体个数
def get_template_prediction(num_samples):
ret_dict = {
'name':np.zeros(num_samples),
'box_center':np.zeros([num_samples,3]),
'box_size':np.zeros([num_samples,3]),
'box_rotation':np.zeros([num_samples,3]),
'tracker_id':np.zeros(num_samples),
'scores':np.zeros(num_samples),
'pred_labels':np.zeros(num_samples),
'pred_lidar':np.zeros([num_samples,7])
}
return ret_dict
def generate_single_sample_dict(box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
#定义一个帧的空字典,用来存放来自预测的信息
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
#如果这一帧的预测结果中,没有物体object,则返回空字典
return pred_dict
pred_dict['name'] = np.array(class_names)[pred_labels -1]
pred_dict['scores'] = pred_scores
pred_dict['pred_labels'] = pred_labels
pred_dict['pred_lidar'] = pred_boxes
pred_dict['box_center'] = pred_boxes[:,0:3]
pred_dict['box_size'] = pred_boxes[:,3:6]
pred_dict['box_rotation'][:,-1] = pred_boxes[:,6]
return pred_dict
#由文件的完整路径得到文件的名字(去掉多余的信息)
def from_filepath_get_filename2(filepath):
filename = ''
filepath = filepath
#得到一个元祖tuple,(目录,文件名)
filepath_and_filename = os.path.split(filepath)
filename = filepath_and_filename[1]
#得到文件名+后缀,得到一个元祖tuple,(文件名,后缀)
filename_and_extension = os.path.splitext(filename)
filename = filename_and_extension[0]
return filename
annos = []
for index,box_dict in enumerate(pred_dicts):
single_pred_dict = generate_single_sample_dict(box_dict)
#frame_id是当前帧的文件路径+文件名
frame_id = batch_dict['frame_id'][index]
single_pred_dict['frame_id'] = frame_id
annos.append(single_pred_dict)
#如果输出路径存在,则将预测的结果写入文件中
if output_path is not None:
filename = from_filepath_get_filename2(frame_id)
cur_det_file = Path(output_path)/('%s.txt'%filename)
with open(cur_det_file,'w') as f:
name =single_pred_dict['name']
box_center = single_pred_dict['box_center']
box_size = single_pred_dict['box_size']
box_rotation = single_pred_dict['box_rotation']
for idx in range(len(single_pred_dict['name'])):
print('%s,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,'
%(name[idx],
box_center[idx][0],box_center[idx][1],box_center[idx][2],
box_size[idx][0],box_size[idx][1],box_size[idx][2],
box_rotation[idx][0],box_rotation[idx][1],box_rotation[idx][1]),
file=f)
return annos
def evaluation(self,det_annos,class_names,**kwargs):
if 'name' not in self.robosense_infos[0].keys():
#如果robosense_infos里没有信息,直接返回空字典
return None,{}
#参数det_annos 是验证集val下面的所有infos,是一个列表,每个元素是每一帧的字典数据
#这里 的info是从model出来的,由generate_prediction_dicts函数得到,字典的键key:
# name , box_center,box_size,box_rotation,tracked_id, scores,pred_labels,pred_lidar,frame_id
'''
print('~~~~~~~~~~~~~det_annos~~~~~~~~~~~~~~~~~~')
print(det_annos[0])
print(len(det_annos))
print('~~~~~~~~~~~~~~~class_names~~~~~~~~~~~~~~~~')
print(class_names)
'''
from ...kitti.kitti_object_eval_python import eval3 as kitti_eval
#复制一下参数det_annos
#copy.deepcopy()在元组和列表的嵌套上的效果是一样的,都是进行了深拷贝(递归的)
#eval_det_info的内容是从model预测出来的结果,等于det_annos
eval_det_info = copy.deepcopy(det_annos)
'''
print('---------------------------eval_det_info--------------------------------------')
print(eval_det_info[0].keys())
print(type(eval_det_info))
print(len(eval_det_info))
'''
# 一个info 表示一帧数据的信息,则下面是把所有数据的annos属性取出来,进行copy
#实质上还是等于:eval_gt_infos = self.robosense_infos
#eval_gt_infos的内容实际上是val的真实集合信息,
eval_gt_infos = [copy.deepcopy(info) for info in self.robosense_infos]
'''
print('---------------------------eval_gt_infos--------------------------------------')
print(eval_gt_infos[0].keys())
print(type(eval_gt_infos))
print(len(eval_gt_infos))
print(class_names)
'''
#调用函数,预测得到ap的值
#ap_result_str,ap_dict = kitti_eval.get_coco_eval_result1(eval_gt_infos,eval_det_info,class_names)
ap_result_str,ap_dict = kitti_eval.get_official_eval_result(eval_gt_infos,eval_det_info,class_names)
return ap_result_str,ap_dict
def create_robosense_infos(dataset_cfg,class_names,data_path,save_path,workers=4):
dataset = RobosenseDataset(dataset_cfg=dataset_cfg,class_names=class_names,root_path=data_path,training=False)
train_split,val_split = 'train','val'
#设置训练集的占比
TRAIN_RATIO_OF_ALL_LABELS = dataset.train_ratio_of_all_labels
#定义要保存的文件的路径和名称
train_filename = save_path + '/' + ('robosense_infos_%s.pkl'%train_split)
val_filename = save_path + '/' +('robosense_infos_%s.pkl'%val_split)
trainval_filename = save_path + '/' + 'robosense_infos_trainval.pkl'
test_filename = save_path + '/' +'robosense_infos_test.pkl'
files_list_pcd,files_list_label =dataset.get_files_name_list()
# 从总列表标签中取TRAIN_RATIO_OF_ALL_LABELS(0.5)的数据当做训练集train,
# 剩下的当做val,并获取相应的文件路径列表
files_list_label_train = random.sample(files_list_label,int(TRAIN_RATIO_OF_ALL_LABELS*len(files_list_label)))
files_list_label_val = dataset.list_subtraction(files_list_label,files_list_label_train)
files_list_pcd_train = dataset.from_labels_path_list_to_pcd_path_list(files_list_label_train)
files_list_pcd_val = dataset.from_labels_path_list_to_pcd_path_list(files_list_label_val)
#对类内的参数进行赋值
dataset.files_list_pcd =files_list_pcd
dataset.files_list_label =files_list_label
dataset.files_list_label_train =files_list_label_train
dataset.files_list_label_val =files_list_label_val
dataset.files_list_pcd_train = files_list_pcd_train
dataset.files_list_pcd_val = files_list_pcd_val
print('------------------------Start to generate data infos-----------------------')
robosense_infos_train = dataset.get_all_labels(files_list_label=files_list_label_train)
with open(train_filename,'wb') as f:
pickle.dump(robosense_infos_train,f)
print('robosense info train file is saved to %s'%train_filename)
robosense_infos_val = dataset.get_all_labels(files_list_label=files_list_label_val)
with open(val_filename,'wb') as f:
pickle.dump(robosense_infos_val,f)
print('robosense info val file is saved to %s'%val_filename)
with open(trainval_filename,'wb') as f:
pickle.dump(robosense_infos_train + robosense_infos_val,f)
print('robosense info trainval file is saved to %s'%trainval_filename)
robosense_infos_test = dataset.get_all_labels(files_list_label=files_list_label)
with open (test_filename,'wb') as f:
pickle.dump(robosense_infos_test,f)
print('robosense info test file is saved to %s'%test_filename)
print('---------------------Strat create groundtruth database for data augmentation ----------------')
#调用生成 database的函数,生成相应的文件 create_groundtruth_database
dataset.create_groundtruth_database(info_path=train_filename,split=train_split)
print('---------------------Congratulation ! Data preparation Done !!!!!!---------------------------')
pass
if __name__ == '__main__':
import sys
if sys.argv.__len__()>1 and sys.argv[1] == 'create_robosense_infos':
import yaml
from pathlib import Path
from easydict import EasyDict
dataset_cfg = EasyDict(yaml.load(open(sys.argv[2])))
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
#ROOT_DIR是得到当前项目的根目录:/root/dataset/OpenPCDet
class_names= ['cone', 'pedestrain','bicycle','vehicle','big_vehicle'] # 数据类别
create_robosense_infos(
dataset_cfg=dataset_cfg,
class_names= class_names,
data_path='/root/dataset/RoboSense_Dataset/RS_datasets/datasets/',
save_path='/root/dataset/RoboSense_Dataset/RS_datasets/datasets/'
)
| HuangCongQing/pcdet-note | pcdet/datasets/huituo/robosense/robosense_dataset.py | robosense_dataset.py | py | 28,527 | python | en | code | 45 | github-code | 90 |
19131016125 | import os
# readFile and WriteFile from
# http://www.cs.cmu.edu/~112/notes/notes-strings.html
def readFile(path):
with open(path, "rt") as f:
return f.read()
def writeFile(path, contents):
with open(path, "wt") as f:
f.write(contents)
def findConfig():
# Find the Euro Truck Simulator 2 directory in Documents
home = os.path.expanduser('~')
documents = os.path.join(home, 'Documents')
euro = os.path.join(documents, 'Euro Truck Simulator 2')
path = euro + '\config.cfg'
# Open config.cfg
return path
def checkConfig():
# See if the user has ran Jalopy before
path = findConfig()
config = readFile(path)
result = ""
for line in config.splitlines():
# If anomaly detected in game parameters, then break immediately
# and rewrite all settings
# Game height
if line.startswith('user r_mode_height') or\
line.startswith('uset r_mode_height'):
if '600' not in line:
return False
# Game width
elif line.startswith('user r_mode_width') or\
line.startswith('uset r_mode_width'):
if '800' not in line:
return False
# Game fullscreen
elif line.startswith('user r_fullscreen') or\
line.startswith('uset r_fullscreen'):
if '0' not in line:
return False
return True
def modifyConfig():
# Change game parameters if not
path = findConfig()
config = readFile(path)
result = ""
for line in config.splitlines():
# Game height
if line.startswith('user r_mode_height') or\
line.startswith('uset r_mode_height'):
lineToAdd = 'uset r_mode_height "600"\n'
result += lineToAdd
# Game width
elif line.startswith('user r_mode_width') or\
line.startswith('uset r_mode_width'):
lineToAdd = 'uset r_mode_width "800"\n'
result += lineToAdd
# Game fullscreen
elif line.startswith('user r_fullscreen') or\
line.startswith('uset r_fullscreen'):
lineToAdd = 'uset r_fullscreen "0"\n'
result += lineToAdd
# Other parameters
else:
line = line + '\n'
result += line
writeFile(path, result)
def main():
if not checkConfig():
modifyConfig()
if __name__ == '__main__':
main()
| eh8/jalopy | jalopy/changeSettings.py | changeSettings.py | py | 2,466 | python | en | code | 2 | github-code | 90 |
18592717619 | #HarshadNumber
n = int(input())
def hrsh(x):
s = 0
while x > 10:
s += x % 10
x = x//10
s += x
return s
f = hrsh(n)
#print(f)
if n % f == 0:
print('Yes')
else:
print('No') | Aasthaengg/IBMdataset | Python_codes/p03502/s875164548.py | s875164548.py | py | 190 | python | en | code | 0 | github-code | 90 |
37762251953 | from django.urls import path
from . import views
app_name = "api"
urlpatterns = [
path('/home', views.home, name='home'),
path('/search_links', views.get_links, name='links'),
path('/search_images', views.get_images, name='images'),
path('/search_pdfs', views.get_pdfs, name='pdfs')
] | Harikrishnan2004/Google_search_api | google_seo_api/urls.py | urls.py | py | 312 | python | en | code | 0 | github-code | 90 |
72211279338 | N = int(input())
arr = list(map(int, input().split()))
arr_tmp = list(set(arr))
result = []
dict_tmp = dict()
arr_tmp.sort()
tmp = 0
for num in arr_tmp:
dict_tmp[num] = tmp
tmp += 1
for num in arr:
result.append(dict_tmp[num])
print(*result) | khyunchoi/Algo | Boj/python/18870.py | 18870.py | py | 257 | python | en | code | 0 | github-code | 90 |
41423339336 | from flask import Flask, render_template, Response
import numpy as np
import cv2
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
app = Flask(__name__)
@app.route("/")
def index():
"""Video streaming home page."""
return render_template("index.html")
net = tf.keras.models.load_model("./model_file/model-icdar-1.4")
def gen(video):
while True:
_, image = video.read()
img_show = np.copy(image)
x, y, _ = img_show.shape
img = cv2.resize(image, (224, 224))
img = img.astype(np.float32)
img2 = img / 255.0
result = net([img2], training=False).numpy()[0]
coord = result[0:8]
coord[0::2] *= x
coord[1::2] *= y
coord = coord.astype(int)
cv2.circle(img_show, (coord[0], coord[1]), 3, (0, 0, 255), -1)
cv2.circle(img_show, (coord[2], coord[3]), 3, (0, 255, 255), -1)
cv2.circle(img_show, (coord[4], coord[5]), 3, (255, 0, 0), -1)
cv2.circle(img_show, (coord[6], coord[7]), 3, (0, 255, 0), -1)
img_show = cv2.resize(img_show, (2 * y, 2 * x))
img_show = cv2.flip(img_show, 1)
_, jpeg = cv2.imencode(".jpg", img_show)
frame = jpeg.tobytes()
yield (b"--frame\r\n" b"Content-Type: image/jpeg\r\n\r\n" + frame + b"\r\n\r\n")
@app.route("/video_feed")
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(video), mimetype="multipart/x-mixed-replace; boundary=frame")
if __name__ == "__main__":
video = cv2.VideoCapture(0)
app.run(host="0.0.0.0", debug=False, threaded=True)
| sasuke-ss1/LDR_NET | app.py | app.py | py | 1,648 | python | en | code | 2 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.