seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
12258827784 | import pprint
grid = [
[1, 0, 0, 0, 4, 0, 0, 0, 0],
[0, 9, 2, 6, 0, 0, 3, 0, 0],
[3, 0, 0, 0, 0, 5, 1, 0, 0],
[0, 7, 0, 1, 0, 0, 0, 0, 4],
[0, 0, 4, 0, 5, 0, 6, 0, 0],
[2, 0, 0, 0, 0, 4, 0, 8, 0],
[0, 0, 9, 4, 0, 0, 0, 0, 1],
[0, 0, 8, 0, 0, 6, 5, 2, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 6]
]
def find_next_empty_el(smth):
for row in range(0, 9):
for col in range(0, 9):
if smth[row][col] == 0:
return row, col
def valid(row, col, gr, value):
start_row = row - row % 3
start_col = col - col % 3
if value in gr[row]:
return False
for el in range(9):
if value == gr[el][col]:
return False
for i in range(3):
for j in range(3):
if gr[i + start_row][j + start_col] == value:
return False
return True
def solve(gr):
find = find_next_empty_el(gr)
if find:
row, col = find_next_empty_el(gr)
else:
return True
for value in range(1, 10):
if valid(row, col, gr, value):
gr[row][col] = value
if solve(gr):
return True
gr[row][col] = 0
return False
solve(grid)
pprint.pprint(grid)
| MilezNoles/sudokuSolver | main.py | main.py | py | 1,197 | python | en | code | 0 | github-code | 13 |
2250752729 | """Wrapper to record rendered video frames from an environment."""
import pathlib
from typing import Any, Dict, Optional, SupportsFloat, Tuple
import gymnasium as gym
from gymnasium.core import WrapperActType, WrapperObsType
from gymnasium.wrappers.monitoring import video_recorder
class VideoWrapper(gym.Wrapper):
"""Creates videos from wrapped environment by calling render after each timestep."""
episode_id: int
video_recorder: Optional[video_recorder.VideoRecorder]
single_video: bool
directory: pathlib.Path
def __init__(
self,
env: gym.Env,
directory: pathlib.Path,
single_video: bool = True,
):
"""Builds a VideoWrapper.
Args:
env: the wrapped environment.
directory: the output directory.
single_video: if True, generates a single video file, with episodes
concatenated. If False, a new video file is created for each episode.
Usually a single video file is what is desired. However, if one is
searching for an interesting episode (perhaps by looking at the
metadata), then saving to different files can be useful.
"""
super().__init__(env)
self.episode_id = 0
self.video_recorder = None
self.single_video = single_video
self.directory = directory
self.directory.mkdir(parents=True, exist_ok=True)
def _reset_video_recorder(self) -> None:
"""Creates a video recorder if one does not already exist.
Called at the start of each episode (by `reset`). When a video recorder is
already present, it will only create a new one if `self.single_video == False`.
"""
if self.video_recorder is not None:
# Video recorder already started.
if not self.single_video:
# We want a new video for each episode, so destroy current recorder.
self.video_recorder.close()
self.video_recorder = None
if self.video_recorder is None:
# No video recorder -- start a new one.
self.video_recorder = video_recorder.VideoRecorder(
env=self.env,
base_path=str(self.directory / f"video.{self.episode_id:06}"),
metadata={"episode_id": self.episode_id},
)
def reset(
self,
*,
seed: Optional[int] = None,
options: Optional[Dict[str, Any]] = None,
) -> Tuple[WrapperObsType, Dict[str, Any]]:
self._reset_video_recorder()
self.episode_id += 1
return super().reset(seed=seed, options=options)
def step(
self,
action: WrapperActType,
) -> Tuple[WrapperObsType, SupportsFloat, bool, bool, Dict[str, Any]]:
res = super().step(action)
assert self.video_recorder is not None
self.video_recorder.capture_frame()
return res
def close(self) -> None:
if self.video_recorder is not None:
self.video_recorder.close()
self.video_recorder = None
super().close()
| HumanCompatibleAI/imitation | src/imitation/util/video_wrapper.py | video_wrapper.py | py | 3,138 | python | en | code | 1,004 | github-code | 13 |
31692283560 | budget = int(input())
season = input()
amount_fisherman = int(input())
price = 0
if season == "Spring":
price = 3000
if season == "Summer" or season == "Autumn":
price = 4200
if season == "Winter":
price = 2600
if amount_fisherman <= 6:
price = price * 0.9
elif 7 <= amount_fisherman <= 11:
price = price * 0.85
elif amount_fisherman > 12:
price = price * 0.75
if amount_fisherman % 2 == 0:
if season != "Autumn":
price = price * 0.95
diff = abs(budget - price)
if budget >= price:
print(f"Yes! You have {diff:.2f} leva left.")
if budget < price:
print(f"Not enough money! You need {diff:.2f} leva.") | DPrandzhev/Python-SoftUni | Programming_Basics-SoftUni-Python/ConditionalStatements - Advanced/fishing_boat.py | fishing_boat.py | py | 651 | python | en | code | 0 | github-code | 13 |
27241396001 |
# Import Statements
import math
from math import radians, cos, sin, atan, sqrt
#Functions
def header():
print(" Welcome to my Geo Calculator.")
def get_location():
lat = float(input('Please enter a latitude in decimal degrees: '))
lon = float(input('Please enter a longitude in decimal degrees: '))
return (lat, lon)
def distance(origin, destination):
'''Code obtained from https://gist.github.com/rochacbruno/2883505'''
lat1, lon1 = map(math.radians, origin)
lat2, lon2 = map(math.radians, destination)
r = 3956 #radius in miles
dlat = lat2 - lat1
dlon = lon2 - lon1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * atan(sqrt(a))
d = c * r
return d
#Main program
header()
doanother = 'y'
while doanother == 'y':
origin = get_location()
destination = get_location()
distance(origin, destination)
print('The distance between', origin, 'and' ,\
destination, 'is', round(distance(origin, destination),2), 'miles.')
doanother = input('Do another (y/n) ?')
if doanother == 'n':
print("Thanks for trying my GEO Calculator. Goodbye!")
break
| kyrstid/P6-Two-Geographic-Points | P6-Two-Geographic-Points.py | P6-Two-Geographic-Points.py | py | 1,208 | python | en | code | 0 | github-code | 13 |
15743612492 | from itertools import combinations
def solution(m, weights):
answer = 0
for num_candies in range(1, len(weights) + 1):
combi_list = combinations(weights, num_candies)
for combi_set in combi_list:
if sum(combi_set) == m:
answer += 1
return answer
| ssooynn/algorithm_python | 프로그래머스/사탕담기.py | 사탕담기.py | py | 307 | python | en | code | 0 | github-code | 13 |
33163587213 | #Lesson 74: Listbox
# https://www.youtube.com/watch?v=xiUTqnI6xk8
#listbox = a listing of slectable text items within its own container
from tkinter import *
window = Tk()
def submit():
food=[]
for index in listbox.curselection():
food.insert(index,listbox.get(index))
print("You have ordered: ")
for index in food:
print(index)
def add():
listbox.insert(listbox.size(),EntryBox.get())
listbox.config(height=listbox.size())
def delete():
for index in reversed(listbox.curselection()):
listbox.delete(index)
# listbox.delete(listbox.curselection()) way to delete only one.
listbox.config(height=listbox.size())
listbox=Listbox(window,
bg='#f7ffde',
font=("Constantia", 35),
width=12,
height=10,
selectmode=MULTIPLE)
listbox.pack()
listbox.insert(1,'pizza')
listbox.insert(2,'pasta')
listbox.insert(3,'garlic bread')
listbox.insert(4,'soup')
listbox.insert(5,'salad')
listbox.config(height=listbox.size())
EntryBox = Entry(window)
EntryBox.pack()
SubmitButton = Button(window,text='submit', command=submit)
SubmitButton.pack()
AddButton = Button(window,text='add', command=add)
AddButton.pack()
DeleteButton = Button(window,text='delete', command=delete)
DeleteButton.pack()
window.mainloop() | Bill-Corkery/BroCode-PythonFullCourse | 74-Listbox.py | 74-Listbox.py | py | 1,343 | python | en | code | 1 | github-code | 13 |
39777328729 | import decimal
import json
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.core.exceptions import ValidationError
from django.db.models import Count, Prefetch, Q, Sum
from django.db.models.functions import Coalesce, TruncDate
from django.http import JsonResponse
from django.urls import reverse_lazy
from django.utils import timezone
from django.utils.translation import gettext as _
from django.views.generic import CreateView, DeleteView, DetailView, FormView, ListView, TemplateView, UpdateView
from packman.calendars.models import PackYear
from packman.dens.models import Den, Membership
from packman.membership.models import Scout
from .forms import CustomerForm, OrderForm, OrderItemFormSet, PrizeSelectionForm
from .mixins import UserIsSellerFamilyTest
from .models import Campaign, Order, OrderItem, Prize, PrizePoint, PrizeSelection, Product, Quota
from .utils import email_receipt
class OrderListView(LoginRequiredMixin, ListView):
model = Order
template_name = "campaigns/order_list.html"
def get_queryset(self):
queryset = super().get_queryset()
if self.request.GET.get("filter") == "delivered":
queryset = queryset.delivered()
elif self.request.GET.get("filter") == "undelivered":
queryset = queryset.undelivered()
campaign = (
Campaign.objects.get(year=PackYear.get_pack_year(self.kwargs["campaign"])["end_date"].year)
if "campaign" in self.kwargs
else Campaign.objects.current()
)
if self.request.user.family.is_seperated:
queryset = queryset.filter(recorded_by=self.request.user)
return (
queryset.prefetch_related("seller", "customer", "recorded_by")
.calculate_total()
.filter(seller__family=self.request.user.family, campaign=campaign)
.order_by("-seller__date_of_birth", "date_added")
)
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context["campaigns"] = {
"available": Campaign.objects.filter(
Q(orders__seller__family=self.request.user.family) | Q(year=PackYear.objects.current())
)
.distinct()
.order_by("-ordering_opens"),
"current": Campaign.objects.current(),
"viewing": (
Campaign.objects.get(year=PackYear.get_pack_year(self.kwargs["campaign"])["end_date"].year)
if "campaign" in self.kwargs
else Campaign.objects.current()
),
}
return context
class OrderReportView(PermissionRequiredMixin, TemplateView):
permission_required = "campaigns.generate_order_report"
template_name = "campaigns/order_report.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["campaigns"] = {
"available": Campaign.objects.all(),
"current": Campaign.objects.current(),
"viewing": (
Campaign.objects.get(year=PackYear.get_pack_year(int(self.kwargs["campaign"]))["end_date"].year)
if "campaign" in self.kwargs
else Campaign.objects.latest()
),
}
orders = Order.objects.calculate_total().filter(campaign=context["campaigns"]["viewing"])
context["report"] = {
"count": orders.count(),
"total": orders.totaled()["totaled"],
"days": orders.annotate(date=TruncDate("date_added"))
.order_by("date")
.values("date")
.annotate(count=Count("date"), order_total=Coalesce(Sum("total"), decimal.Decimal(0.00)))
.values("date", "count", "order_total"),
}
return context
class OrderCreateView(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Order
form_class = OrderForm
success_message = _("Your order was successful.")
template_name = "campaigns/order_form.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["product_list"] = Product.objects.current()
if self.request.POST:
context["customer_form"] = CustomerForm(self.request.POST)
context["items_formset"] = OrderItemFormSet(self.request.POST)
else:
context["customer_form"] = CustomerForm()
context["items_formset"] = OrderItemFormSet()
return context
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["request"] = self.request
return kwargs
def get_initial(self):
initial = super().get_initial()
if "cub" in self.request.GET:
initial["seller"] = self.request.GET.get("cub")
return initial
def form_valid(self, form):
context = self.get_context_data(form=form)
customer_form = context["customer_form"]
items_formset = context["items_formset"]
if customer_form.is_valid() and items_formset.is_valid():
form.instance.customer = customer_form.save()
form.instance.recorded_by = self.request.user
self.object = form.save()
items_formset.instance = self.object
items_formset.save()
if self.object.customer.email:
email_receipt(self.object)
return super().form_valid(form)
return super().form_invalid(form)
class OrderUpdateView(UserIsSellerFamilyTest, SuccessMessageMixin, UpdateView):
model = Order
form_class = OrderForm
success_message = _("You order was updated successfully.")
template_name = "campaigns/order_form.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["product_list"] = Product.objects.filter(campaign=Campaign.get_latest()).prefetch_related(
Prefetch("orders", queryset=OrderItem.objects.filter(order=self.object))
)
if self.request.POST:
context["customer_form"] = CustomerForm(self.request.POST, instance=self.object.customer)
context["items_formset"] = OrderItemFormSet(self.request.POST, instance=self.object)
else:
context["customer_form"] = CustomerForm(instance=self.object.customer)
context["items_formset"] = OrderItemFormSet(instance=self.object)
return context
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["request"] = self.request
return kwargs
def form_valid(self, form):
context = self.get_context_data(form=form)
customer_form = context["customer_form"]
items_formset = context["items_formset"]
if customer_form.is_valid() and items_formset.is_valid():
form.instance.customer = customer_form.save()
self.object = form.save()
items_formset.instance = self.object
items_formset.save()
if self.object.items.exists() or self.object.donation:
return super().form_valid(form)
form.add_error(None, ValidationError(_("You haven't ordered anything."), code="incomplete"))
return super().form_invalid(form)
return super().form_invalid(form)
class OrderDeleteView(UserIsSellerFamilyTest, DeleteView):
model = Order
template_name = "campaigns/order_confirm_delete.html"
success_url = reverse_lazy("campaigns:order_list")
def form_valid(self, form):
message = _("The order has been successfully deleted.") % {"page": self.object}
messages.success(self.request, message, "danger")
return super().form_valid(form)
class OrderDetailView(DetailView):
model = Order
template_name = "campaigns/order_detail.html"
class PrizeListView(LoginRequiredMixin, ListView):
model = Prize
template_name = "campaigns/prize_list.html"
def get_queryset(self):
return super().get_queryset().filter(campaign=Campaign.objects.latest())
class PrizeSelectionView(LoginRequiredMixin, FormView):
form_class = PrizeSelectionForm
success_message = _("You prize selections were updated successfully.")
template_name = "campaigns/prize_form.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
cubs = self.request.user.family.children.active()
orders = (
Order.objects.prefetch_related("seller")
.calculate_total()
.filter(seller__in=cubs, campaign=Campaign.objects.latest())
)
cub_list = []
for cub in cubs:
quota = Quota.objects.get(den=cub.current_den, campaign=Campaign.objects.latest()).target
total = orders.filter(seller=cub).totaled()["totaled"]
if total < quota:
points_earned = 0
elif total <= 2000:
points_earned = PrizePoint.objects.filter(earned_at__lte=total).order_by("-earned_at").first().value
else:
points_earned = PrizePoint.objects.order_by("earned_at").last().value + int(
(total - PrizePoint.objects.order_by("earned_at").last().earned_at) / 100
)
points_spent = PrizeSelection.objects.filter(
campaign=Campaign.objects.latest(), cub=cub
).calculate_total_points_spent()["spent"]
# points_spent = PrizeSelection.objects.filter(campaign=Campaign.objects.current(), cub=cub).aggregate(
# spent=Coalesce(Sum("prize__points"), 0))["spent"]
cub_list.append(
{
"name": cub.short_name,
"pk": cub.pk,
"quota": quota,
"total": total,
"points": {
"earned": points_earned,
"spent": points_spent,
"remaining": points_earned - points_spent,
},
}
)
context["prize_list"] = Prize.objects.filter(campaign=Campaign.objects.latest())
context["cub_list"] = cub_list
context["total"] = orders.totaled()["totaled"]
return context
class ProductListView(ListView):
model = Product
template_name = "campaigns/product_list.html"
def get_queryset(self):
return super().get_queryset().filter(campaign=Campaign.objects.current())
@login_required
def update_order(request):
data = json.loads(request.body)
action = data["action"]
order = Order.objects.get(pk=data["orderId"])
if action == "mark_paid":
order.date_paid = timezone.now()
elif action == "mark_unpaid":
order.date_paid = None
elif action == "mark_delivered":
order.date_delivered = timezone.now()
elif action == "mark_undelivered":
order.date_delivered = None
order.save()
response = {"action": action, "order": order.pk}
return JsonResponse(response)
@login_required
def update_prize_selection(request):
data = json.loads(request.body)
action = data["action"]
prize = Prize.objects.get(pk=data["prize"])
cub = Scout.objects.get(pk=data["cub"])
if action == "add":
selection, created = PrizeSelection.objects.get_or_create(
prize=prize,
cub=cub,
)
if not created:
selection.quantity += 1
selection.save()
elif action == "remove":
selection = PrizeSelection.objects.get(
prize=prize,
cub=cub,
)
if selection.quantity <= 1:
selection.delete()
else:
selection.quantity -= 1
selection.save()
response = {"action": action, "prize": prize.pk, "cub": cub.pk, "quantity": selection.quantity if selection else 0}
return JsonResponse(response)
class PlaceMarkerTemplateView(PermissionRequiredMixin, TemplateView):
permission_required = "campaigns.generate_order_report"
template_name = "campaigns/reports/place_markers.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["cub_list"] = (
Membership.objects.filter(year_assigned=PackYear.objects.current(), scout__status=Scout.ACTIVE)
.select_related("den", "scout")
.order_by("den", "scout")
)
return context
class PullSheetTemplateView(PermissionRequiredMixin, TemplateView):
permission_required = "campaigns.generate_order_report"
template_name = "campaigns/reports/pull_sheets.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["den_list"] = (
Den.objects.prefetch_related("campaigns")
.filter(scouts__year_assigned=PackYear.objects.current())
.distinct()
)
return context
class PrizeSelectionReportView(PermissionRequiredMixin, TemplateView):
permission_required = "campaigns.generate_order_report"
template_name = "campaigns/reports/prize_selections.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
current_campaign = Campaign.objects.current()
context["prize_selections"] = PrizeSelection.objects.filter(campaign=current_campaign).order_by("cub")
context["prizes"] = Prize.objects.filter(campaign=current_campaign).calculate_quantity()
return context
class OrderSlipView(PermissionRequiredMixin, TemplateView):
permission_required = "campaigns.generate_order_report"
template_name = "campaigns/reports/order_slips.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
campaign = Campaign.objects.latest()
context["order_list"] = (
Order.objects.filter(campaign=campaign, item__isnull=False)
.distinct()
.calculate_total()
.select_related("seller", "customer")
.prefetch_related("items", "items__product")
.order_by("seller")
)
return context
| Pack144/packman | packman/campaigns/views.py | views.py | py | 14,422 | python | en | code | 1 | github-code | 13 |
72840760977 | from __future__ import print_function
import sys
import requests
from requests.exceptions import Timeout
from lxml import html
# python 2 compabilaty
if sys.version_info.major == 2:
input = raw_input
reload(sys)
sys.setdefaultencoding('utf8')
TIMEOUT_TIME = 5
BVG_URL = 'http://mobil.bvg.de/Fahrinfo/bin/stboard.bin/dox?'
def get_argument(argument_name, default=''):
''' Return value for given argument; default if argument not specified. '''
argument = default
argument_name = '--' + argument_name
if argument_name in sys.argv:
pos = sys.argv.index(argument_name)
if len(sys.argv) >= pos + 2:
argument = sys.argv[pos + 1]
return argument
def create_products_filter(select='', ignore=''):
''' Returns a bit-mask to select or ignore certain types of transport.
Types can be separated by comma (e.g. 'U,S') or without (e.g. 'US'). If at least one
type is selected not specified types are ignored, otherwiese not specified types are
selected. Selected outvotes ingored if a type is in both.
bit-masks: 11111101 = Regional, 11111011 = Fern, 11101111 = Bus, 11011111 = Tram,
10111111 = UBahn, 01111111 = SBahn
::
>>> create_products_filter()
'11111111'
>>> create_products_filter(select='U,S,R')
'11000100'
>>> create_products_filter(ignore='U,S')
'00111111'
>>> create_products_filter(select='US', ignore='SBT')
'11000000'
'''
def value(type):
if type not in select and type in ignore:
return '0'
if type in select:
return '1'
return '1' if not select else '0'
return ''.join(value(t) for t in 'SUTBIR__')
def request_station_ids(station_name):
''' Requests the station ids for the provided station name.
The function has two different outcomes dependend on how distinctive
the station name is. A list of possibel stations or the one station.
Return a tuple (data, ok). Data holdes the <stations> with their name
and id. The status flag can be True or False if there are network problems.
'''
try:
r = requests.get(BVG_URL, data={'input': station_name}, timeout=TIMEOUT_TIME)
except Timeout:
return None, False
# network
if r.status_code != 200:
return None, False
if '--verbose' in sys.argv:
print('info: response for', r.request.url)
tree = html.fromstring(r.content)
data = []
# possibel stations
if tree.cssselect('span.error'):
for station in tree.cssselect('span.select a'):
station_name = station.text.strip()
# TODO: clean up direct list access
station_id = station.get("href").split('&')[1].split('=')[1]
data.append((station_name, station_id))
return data, True
# one station
# TODO: clean up direct list access
station_name = tree.cssselect('span.desc strong')[0].text
station_id = tree.cssselect('p.links a')[0].get('href').split('&')[1].split('=')[1]
return ((station_name, station_id),), True
def request_departures(station_id, limit, products_filter=''):
''' Requests the departure times for the provided station id.
Return a tuple (data, ok). Data holdes the <departures> with time, line and
destination. The status flag can be True or False if there are network problems.
'''
payload = {'input': station_id, 'maxJourneys': limit, 'start': 'yes'}
if products_filter:
payload['productsFilter'] = products_filter
try:
r = requests.get(BVG_URL, params=payload, timeout=TIMEOUT_TIME)
except Timeout:
return None, False
# network
if r.status_code != 200:
return None, False
if '--verbose' in sys.argv:
print('info: response for', r.request.url)
tree = html.fromstring(r.content)
data = []
for row in tree.cssselect('tbody tr'):
cells = tuple(e for e in row.text_content().split('\n') if e)
data.append(cells)
return data, True
def show_usage():
print('usage: bvg_cli.py --station NAME [--limit N]\n\n'
'A command line tool for the public transport of Berlin.\n\n'
'arguments:\n'
'--station NAME name of your departure station\n\n'
'optional arguments:\n'
'--limit N limit the number of responses (default 10)\n\n'
'--select types select types of transport (e.g. U,T)\n'
'--ignore types ignore types of transport (e.g. R,I,B)\n'
' types: U - underground (U-Bahn)\n'
' S - suburban railway (S-Bahn)\n'
' T - tram\n'
' B - bus\n'
' R - regional railway\n'
' I - long-distance railway\n\n'
'--verbose print info messages (debug)')
if __name__ == '__main__':
''' Rudimentary CLI capabilities ...'''
# TODO: investigate cli packages
if len(sys.argv) < 3 or sys.argv[1] != '--station':
show_usage()
sys.exit(1)
limit_arg = get_argument('limit', '10')
select_arg = get_argument('select')
ignore_arg = get_argument('ignore')
if '--verbose' in sys.argv:
print('info: limit_arg', limit_arg, 'select_arg', select_arg,
'ignore_arg', ignore_arg)
stations, ok = request_station_ids(sys.argv[2])
if not ok:
print('Check your network. BVG website migth also be down.', file=sys.stderr)
sys.exit(1)
station_id = 0
if len(stations) > 1:
for i, (name, _) in enumerate(stations, start=1):
print('[{}] {}'.format(i, name))
while 'do-loop':
user_response = input('\nWhich station [1-{}] did you mean? '.format(
len(stations)))
if user_response.isdigit() and 0 < int(user_response) <= len(stations):
station_id = int(user_response) - 1
break
station_name, station_id = stations[station_id]
products_filter = create_products_filter(select_arg, ignore_arg)
departures, ok = request_departures(station_id, limit_arg, products_filter)
if not ok:
print('Check your network. BVG website migth also be down.', file=sys.stderr)
sys.exit(1)
print('\n# Next departures at', station_name)
print('{:8}{:10}{}'.format('Time', 'Line', 'Destination'))
print('-' * 50)
for info in departures:
print('{:8}{:10}{}'.format(*info))
| behrtam/bvg-cli | bvg_cli.py | bvg_cli.py | py | 6,643 | python | en | code | 7 | github-code | 13 |
17154372327 | ## Import necessary packages
import numpy as np
import pandas as pd
from statistics import mean
from statistics import pstdev
from scipy import stats
import csv
import re
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
### This section of code extracts and normalizes mutability scores from ###
### the dataset. ###
# Extract mutability scores sheet from dataset and save as a pandas
# DataFrame. Excel file contains multiple sheets of which the mutability
# scores is contains two columns: amino acid position and mutability score
mutability_scores = pd.read_excel(io=
'Spencer_et_al_2017_Cas9_mutagenesis.xlsx',
sheet_name='Mutability Scores')
# Convert DataFrame to list and calculate mean and
# standard deviation (population) of mutability scores
mutability_scores_list = mutability_scores['Mutability Score'].tolist()
mean_mut_score = mean(mutability_scores_list)
stdev_mut_score = pstdev(mutability_scores_list)
# Normalize mutability scores using mean and stdev
for index, row in mutability_scores.iterrows():
normalized_mutability_score = (row[1] -
mean_mut_score)/stdev_mut_score
mutability_scores = mutability_scores.replace(row[1],
normalized_mutability_score)
### This section of code extracts the domains of the amino acids, which was ###
### not provided in the mutability scores sheet used above. ###
### "All Count Data" contains many columns of data, of which only domains ###
### and their respective amino acids are needed. ###
# Extract all the data from the dataset and save to a DataFrame
df1 = pd.read_excel(io='Spencer_et_al_2017_Cas9_mutagenesis.xlsx',
sheet_name='All Count Data')
# Drop duplicates ("All Count Data" contains all mutations made at each
# amino acid position which is used to calculate the final mutability scores)
df1.drop_duplicates(subset=['AA Position'], inplace=True)
# Create series of amino acid positions and their domains
s1 = pd.Series(df1['AA Position'], name='AA Position')
s2 = pd.Series(df1['Domain'], name='Domain')
# Concatenate series into a DataFrame
aa_domains = pd.concat([s1,s2], axis=1)
# Merge amino acids, domains, and mutability scores into a new DataFrame
aa_domains_mutability = pd.merge(aa_domains, mutability_scores,
on=['AA Position'])
# Drop any rows generated for which there is no data
aa_domains_mutability = aa_domains_mutability.dropna()
# Save DataFrame into an excel sheet
aa_domains_mutability.to_excel('AminoAcids_Domains_Mutability.xlsx')
### This section of code extracts the network scores generated by the ###
### structural network conversion of Cas9. ###
# Open excel sheet of network scores (change this file based on what network
# representation you wish to use) and save amino acids and network scores
# into DataFrame
with open('FinalSum_Cas9RNP_NoNodeEdgeBetweenness') as csv_file:
csv_reader = csv.reader(csv_file, delimiter='\t')
aa_network_scores = pd.DataFrame(csv_reader,
columns=['AA Position','Network Score'])
# Drop network scores generated for the nucleic acid ligands since mutational
# analysis is done on amino acids alone
aa_network_scores = aa_network_scores[aa_network_scores['Network Score']
!= 'NA']
# Convert network scores to float and sort DataFrame ascending order
aa_network_scores['Network Score'] = aa_network_scores[
'Network Score'].astype(float)
aa_network_scores = aa_network_scores.sort_values(by=['Network Score'])
# Network representation outputs amino acids as both position and
# name, so loop through and remove letters from the amino acids,
# i.e. remove the names
for aa in aa_network_scores['AA Position']:
aa_new = int(re.sub('\D', '', aa))
aa_network_scores = aa_network_scores.replace(to_replace=aa, value=aa_new)
# Sort DataFrame in ascending order of amino acid position
aa_network_scores = aa_network_scores.sort_values(by=['AA Position'])
# Save DataFrame as an excel sheet
aa_network_scores.to_excel('AminoAcids_NetworkScores.xlsx')
# Merge the two dataframes and rearrange the columns in order of amino acid
# position, domain, network score, and mutability score
aa_domains_network_mutability = pd.merge(aa_network_scores,
aa_domains_mutability,
on=['AA Position'])
columns_swap = ['AA Position','Domain','Network Score','Mutability Score']
aa_domains_network_mutability = aa_domains_network_mutability.reindex(
columns=columns_swap)
# Save compiled DataFrame to an excel sheet
aa_domains_network_mutability.to_excel('Mutability_NetworkScores.xlsx')
### This section of code generates the linear regression between ###
### the network scores and mutational tolerances and calculates ###
### the Spearman's rank correlation coefficient and significance of ###
### the regression. Then, a scatter plot of the data is generated ###
### including the linear regression line and statistical analysis results. ###
# Generate figure
fig = plt.figure(figsize = (7,7))
# Get all network scores and mutability scores and plot them
x_vals = aa_domains_network_mutability.loc[:,'Network Score']
y_vals = aa_domains_network_mutability.loc[:,'Mutability Score']
plt.scatter(x_vals, y_vals, s=10, color='dodgerblue')
# Generate the slope and y-intercept of the line of best fit and plot it
m, b = np.polyfit(x_vals, y_vals, 1)
plt.plot(x_vals, m*x_vals + b, color='orange')
# Add title, x- and y-axis labels, and adjust tick mark sizes
plt.xlabel('Network Score', fontsize = 15)
plt.ylabel('Mutational Tolerance', fontsize = 15)
plt.title('Structural Network Analysis of Cas9 Mutational Tolerance',
fontsize = 15)
plt.xticks(fontsize = 15)
plt.yticks(fontsize = 15)
# Calculate Spearman's rank correlation coefficient and p-value, then add it
# to the figure
model_x = np.array(x_vals).reshape((-1,1))
model_y = np.array(y_vals)
correlation, pvalue = stats.spearmanr(model_x, model_y)
fig_text = 'Correlation = {} \n p-value = {}'.format(correlation,pvalue)
plt.text(-4.5,5.9, fig_text, fontsize = 12)
plt.show()
# Save figure to jpg
fig.savefig('Structural Network Analysis.jpg', bbox_inches='tight')
# Domain_scorer defines a function that takes a DataFrame and a domain name
# and returns only the network scores and mutability scores.
# Inputs:
# df, a DataFrame
# domain_name, the name of the domain for which you want network scores
# and mutability scores
# Assumptions:
# DataFrame has columns named 'Domain', Network Score', and 'Mutability Score'
# domain_name is a domain that exists in the DataFrame
def domain_scorer(df, domain_name):
rows = df[df['Domain'].str.contains(domain_name)]
x_vals = rows.loc[:,'Network Score']
y_vals = rows.loc[:,'Mutability Score']
return x_vals, y_vals
# Get the network scores and mutability scores for all the domains of Cas9
RuvC_x_vals, RuvC_y_vals = domain_scorer(aa_domains_network_mutability, 'RuvC')
BH_x_vals, BH_y_vals = domain_scorer(aa_domains_network_mutability, 'BH')
Rec1_x_vals, Rec1_y_vals = domain_scorer(aa_domains_network_mutability, 'Rec1')
Rec2_x_vals, Rec2_y_vals = domain_scorer(aa_domains_network_mutability, 'Rec2')
HNH_x_vals, HNH_y_vals = domain_scorer(aa_domains_network_mutability, 'HNH')
PI_x_vals, PI_y_vals = domain_scorer(aa_domains_network_mutability, 'PI')
### This section of code is identical to the one that produces a figure ###
### and linear regression for Cas9 network analysis but does so for ###
### each domain on its own. ###
# Generate a figure with six subplots
fig, axs = plt.subplots(3,2, figsize=(14,20))
# RuvC plotting
axs[0,0].scatter(x_vals, y_vals, s=10, color='grey')
axs[0,0].scatter(RuvC_x_vals, RuvC_y_vals, s=10, color='tomato')
m, b = np.polyfit(RuvC_x_vals, RuvC_y_vals, 1)
axs[0,0].plot(RuvC_x_vals, m*RuvC_x_vals + b, color='dodgerblue')
axs[0,0].set_xlabel('Network Score', fontsize=15)
axs[0,0].set_ylabel('Mutational Tolerance', fontsize=15)
axs[0,0].set_title('RuvC', fontsize=15)
# RuvC statistics
model_x = np.array(RuvC_x_vals).reshape((-1,1))
model_y = np.array(RuvC_y_vals)
correlation, pvalue = stats.spearmanr(model_x, model_y)
# To generate legends on subplots, mpatches package was used since
# plt functions were not appropriate for subplots
spearman_patch = mpatches.Patch(facecolor='None',
label='Correlation = {} \n p-value = {}'.
format(correlation,pvalue))
axs[0,0].legend(loc=[-0.08,0.9], handles=[spearman_patch], frameon=False,
fontsize=10)
# BH plotting
axs[0,1].scatter(x_vals, y_vals, s=10, color='grey')
axs[0,1].scatter(BH_x_vals, BH_y_vals, s=10, color='tomato')
m, b = np.polyfit(BH_x_vals, BH_y_vals, 1)
axs[0,1].plot(BH_x_vals, m*BH_x_vals + b, color='dodgerblue')
axs[0,1].set_xlabel('Network Score', fontsize=15)
axs[0,1].set_ylabel('Mutational Tolerance', fontsize=15)
axs[0,1].set_title('BH', fontsize=15)
# BH statistics
model_x = np.array(BH_x_vals).reshape((-1,1))
model_y = np.array(BH_y_vals)
correlation, pvalue = stats.spearmanr(model_x, model_y)
spearman_patch = mpatches.Patch(facecolor='None',
label=
'Correlation = {} \n p-value = {}'.
format(correlation,pvalue))
axs[0,1].legend(loc=[-0.08,0.9], handles=[spearman_patch], frameon=False,
fontsize=10)
# Rec1 plotting
axs[1,0].scatter(x_vals, y_vals, s=10, color='grey')
axs[1,0].scatter(Rec1_x_vals, Rec1_y_vals, s=10, color='tomato')
m, b = np.polyfit(Rec1_x_vals, Rec1_y_vals, 1)
axs[1,0].plot(Rec1_x_vals, m*Rec1_x_vals + b, color='dodgerblue')
axs[1,0].set_xlabel('Network Score', fontsize=15)
axs[1,0].set_ylabel('Mutational Tolerance', fontsize=15)
axs[1,0].set_title('Rec1', fontsize=15)
# Rec1 statistics
model_x = np.array(Rec1_x_vals).reshape((-1,1))
model_y = np.array(Rec1_y_vals)
correlation, pvalue = stats.spearmanr(model_x, model_y)
spearman_patch = mpatches.Patch(facecolor='None',
label='Correlation = {} \n p-value = {}'.
format(correlation,pvalue))
axs[1,0].legend(loc=[-0.08,0.9], handles=[spearman_patch], frameon=False,
fontsize=10)
# Rec2 plotting
axs[1,1].scatter(x_vals, y_vals, s=10, color='grey')
axs[1,1].scatter(Rec2_x_vals, Rec2_y_vals, s=10, color='tomato')
m, b = np.polyfit(Rec2_x_vals, Rec2_y_vals, 1)
axs[1,1].plot(Rec2_x_vals, m*Rec2_x_vals + b, color='dodgerblue')
axs[1,1].set_xlabel('Network Score', fontsize=15)
axs[1,1].set_ylabel('Mutational Tolerance', fontsize=15)
axs[1,1].set_title('Rec2', fontsize=15)
# Rec2 statistics
model_x = np.array(Rec2_x_vals).reshape((-1,1))
model_y = np.array(Rec2_y_vals)
correlation, pvalue = stats.spearmanr(model_x, model_y)
spearman_patch = mpatches.Patch(facecolor='None',
label='Correlation = {} \n p-value = {}'.
format(correlation,pvalue))
axs[1,1].legend(loc=[-0.08,0.9], handles=[spearman_patch], frameon=False,
fontsize=10)
# HNH plotting
axs[2,0].scatter(x_vals, y_vals, s=10, color='grey')
axs[2,0].scatter(HNH_x_vals, HNH_y_vals, s=10, color='tomato')
m, b = np.polyfit(HNH_x_vals, HNH_y_vals, 1)
axs[2,0].plot(HNH_x_vals, m*HNH_x_vals + b, color='dodgerblue')
axs[2,0].set_xlabel('Network Score', fontsize=15)
axs[2,0].set_ylabel('Mutational Tolerance', fontsize=15)
axs[2,0].set_title('HNH', fontsize=15)
# HNH statistics
model_x = np.array(HNH_x_vals).reshape((-1,1))
model_y = np.array(HNH_y_vals)
correlation, pvalue = stats.spearmanr(model_x, model_y)
spearman_patch = mpatches.Patch(facecolor='None',
label='Correlation = {} \n p-value = {}'.
format(correlation,pvalue))
axs[2,0].legend(loc=[-0.08,0.9],handles=[spearman_patch], frameon=False,
fontsize=10)
# PI plotting
axs[2,1].scatter(x_vals, y_vals, s=10, color='grey')
axs[2,1].scatter(PI_x_vals, PI_y_vals, s=10, color='tomato')
m, b = np.polyfit(PI_x_vals, PI_y_vals, 1)
axs[2,1].plot(PI_x_vals, m*PI_x_vals + b, color='dodgerblue')
axs[2,1].set_xlabel('Network Score', fontsize=15)
axs[2,1].set_ylabel('Mutational Tolerance', fontsize=15)
axs[2,1].set_title('PI', fontsize=15)
# PI statistics
model_x = np.array(PI_x_vals).reshape((-1,1))
model_y = np.array(PI_y_vals)
correlation, pvalue = stats.spearmanr(model_x, model_y)
spearman_patch = mpatches.Patch(facecolor='None',
label='Correlation = {} \n p-value = {}'.
format(correlation,pvalue))
axs[2,1].legend(loc=[-0.08,0.90], handles=[spearman_patch], frameon=False,
fontsize=10)
plt.show()
# Save plots to a jpg
fig.savefig('Structural Network Analysis by Domain.jpg', bbox_inches='tight')
| sayoeweje/Cas9-StructuralNetworkAnalysis | src/Mutational_Network_Analysis.py | Mutational_Network_Analysis.py | py | 13,068 | python | en | code | 0 | github-code | 13 |
19599883602 | """
Aula 19
Metaclasses
EM PYTHON TUDO É OBJETO: incluindo classes
Metaclasses são as "classes" que criam classes.
type é um metaclesse
"""
class Meta(type):
def __new__(mcs, name, bases, namespace):
if name == 'A':
return type.__new__(mcs, name, bases, namespace)
if 'b_fala' not in namespace:
print(f'Voce precisa criar o metodo b_fala {name}')
if not callable(namespace['b_fala']):
print(f'b_fala precisa ser metodo e não atributo em {name}')
if 'attr_classe' in namespace:
del namespace['attr_classe']
return type.__new__(mcs, name, bases, namespace)
class A(metaclass=Meta):
attr_classe = 'valor A'
def fala(self):
self.b_fala()
class B(A):
# def b_fala(self):
# print('Oi')
attr_classe = 'valor B'
b_fala = 'WoW'
def oi(self):
print('oi')
b = B()
# b.fala()
print(b.attr_classe)
| joaoo-vittor/estudo-python | OrientacaoObjeto/aula19.py | aula19.py | py | 947 | python | pt | code | 0 | github-code | 13 |
5477179080 | import requests, pandas, boto3, os, configparser, datetime, logging
from io import BytesIO, StringIO
from zipfile import ZipFile
from airflow.contrib.hooks.aws_hook import AwsHook
def get_aws_config(conn_id):
aws_hook = AwsHook(conn_id)
credentials = aws_hook.get_credentials()
return credentials
def get_aws_config_deprecated(profile, key):
config = configparser.ConfigParser()
config.read(f'{os.environ["AWS_CREDS"]}/credentials')
return config[profile][key]
def download_extract(url):
print(f'Downloading dataset from {url}')
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
response = requests.get(url, stream=True)
print('Download Complete')
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print('Unzipping response to byte stream')
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
z = ZipFile(BytesIO(response.content))
print('Unzipping response Complete')
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
return z
def dataframe_to_s3(s3_client, input_datafame, bucket_name, file_info):
csv_buffer = StringIO()
input_datafame.to_csv(csv_buffer, index=False)
filename = file_info[0]
filepath = file_info[1]
s3_client.put_object(Bucket=bucket_name, Key=filepath, Body=csv_buffer.getvalue())
print(f'{filename} successfully loaded to s3')
def get_file_info(start_date):
year = start_date.strftime("%Y")
month = start_date.strftime("%m")
day = start_date.strftime("%d")
hour = start_date.strftime("%H")
filename = start_date.strftime("%Y%m%d_%H-%M-%S")+'.csv'
filepath = f'{year}/{month}/{day}/{hour}/{filename}'
file_info = (filename, filepath)
return file_info
def set_logging():
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format='%(asctime)s %(levelname)-s %(message)s',level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
def load_historical(**kwargs):
url = kwargs['url']
bucket_name = kwargs['bucket_name']
load = kwargs['load']
s3_client = boto3.client('s3', aws_access_key_id=get_aws_config('aws_credentials')[0], aws_secret_access_key=get_aws_config('aws_credentials')[1])
z = download_extract(url)
text_files = z.infolist()
if load=='trips':
for text_file in text_files:
if 'od' in text_file.filename.lower():
print(f'starting load for {text_file.filename}')
start_date = datetime.datetime.strptime(text_file.filename.split('_')[1].split('.')[0], "%Y-%m")
df = pandas.read_csv(z.open(text_file.filename))
year = start_date.strftime("%Y")
month = start_date.strftime("%m")
filepath = f'trips/{year}/{month}/{text_file.filename}'
file_info = (text_file.filename, filepath)
dataframe_to_s3(s3_client, df, bucket_name, file_info)
elif load=='stations':
for text_file in text_files:
if 'station' in text_file.filename.lower():
print(f'starting load for {text_file.filename}')
start_date = datetime.datetime.strptime(text_file.filename.split('_')[1].split('.')[0], "%Y")
df = pandas.read_csv(z.open(text_file.filename))
year = start_date.strftime("%Y")
filepath = f'station/{year}/{text_file.filename}'
file_info = (text_file.filename, filepath)
dataframe_to_s3(s3_client, df, bucket_name, file_info)
def dataframe_to_s3_loader():
for text_file in text_files:
#logging.info(text_file.filename)
if text_file.filename == 'OD_2019-07.csv':
print(f'starting load for {text_file.filename}')
df = pandas.read_csv(z.open(text_file.filename))
# start_date = datetime.datetime.strptime(df.get_value(0,'start_date'), "%Y-%m-%d %H:%M:%S")
# for i in df.index:
# end_date = start_date + datetime.timedelta(0, 300)
# if end_date > datetime.datetime.strptime(df.get_value(i,'start_date'), "%Y-%m-%d %H:%M:%S"):
# pass
# else:
# mask = (df['start_date'] > start_date.strftime("%Y-%m-%d %H:%M:%S")) & (df['start_date'] < end_date.strftime("%Y-%m-%d %H:%M:%S"))
# df_interval = df.loc[mask]
# get_filepath(start_date)
# dataframe_to_s3(s3_client, df_interval, 'bixi.qc.raw', filepath)
# start_date = end_date
# start_date = df['start_date'][0]
# for i in df.index:
# end_date = datetime.datetime.strptime(start_date, "%Y-%m-%d %H:%M:%S") + datetime.timedelta(0, 300)
# if end_date > datetime.datetime.strptime(df['start_date'][i], "%Y-%m-%d %H:%M:%S"):
# pass
# else:
# mask = (df['start_date'] < end_date.strftime("%Y-%m-%d %H:%M:%S"))
# df_interval = df.loc[mask]
# file_info = get_file_info(datetime.datetime.strptime(start_date, "%Y-%m-%d %H:%M:%S"))
# dataframe_to_s3(s3_client, df_interval, 'bixi.qc.raw', file_info)
# start_date = end_date.strftime("%Y-%m-%d %H:%M:%S")
start_date = datetime.datetime(2019, 7, 1)
i = 0
## Use this code if you want to bifurcate the trips in 5 minute interval period
for i in range(0, 8928):
end_date = start_date + datetime.timedelta(0, 300)
mask = (df['start_date'] < end_date.strftime("%Y-%m-%d %H:%M:%S"))
df_interval = df.loc[mask]
file_info = get_file_info(start_date)
dataframe_to_s3(s3_client, df_interval, 'bixi.qc.raw', file_info)
start_date = end_date | gurjarprateek/bixi-data-repository | airflow/dags/lib/helpers.py | helpers.py | py | 5,065 | python | en | code | 0 | github-code | 13 |
10087198856 | # Given a binary tree, flatten it to a linked list in-place.
# For example,
# Given
# 1
# / \
# 2 5
# / \ \
# 3 4 6
# The flattened tree should look like:
# 1
# \
# 2
# \
# 3
# \
# 4
# \
# 5
# \
# 6
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return nothing, do it in place
def flatten(self, root):
if root == None:
return
self.convert(root)
def convert(self,root):
if root.left == None and root.right == None:
return root
rHead = None
if root.right != None:
rHead = self.convert(root.right)
p = root
if root.left != None:
lHead = self.convert(root.left)
root.right = lHead
lHead.left = None
root.left = None
while p.right != None:
p = p.right
if rHead != None:
p.right = rHead
rHead.left = None
return root | xiaochenai/leetCode | Python/Flatten Binary Tree to Linked List.py | Flatten Binary Tree to Linked List.py | py | 1,072 | python | en | code | 0 | github-code | 13 |
33454599295 | from distutils.core import setup
import os
import glob
import re
##First, get version from Ungribwrapper/_version.py. Don't import here
#as doing this in the setup.py can be problematic
VERSION_FILE='./mp3tools/_version.py'
matched = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
open(VERSION_FILE, "rt").read(), re.M)
if matched:
version_str = matched.group(1)
else:
raise RuntimeError("Unable to find version string in %s." %(VERSION_FILE))
def main():
setup(name = 'mp3tools',
version = version_str,
description = 'Turns CDs into mp3s',
author = 'Cory Davis',
author_email = 'corzneffect@gmail.com',
package_dir = {"mp3tools": "mp3tools"},
packages = ["mp3tools"],
scripts = glob.glob('scripts/*.py'),
data_files = [],
)
if __name__=='__main__':
main()
| corzneffect/mp3-tools | setup.py | setup.py | py | 896 | python | en | code | 0 | github-code | 13 |
13775183478 | from peewee import *
from settings import *
db = MySQLDatabase(DB_NAME, user=DB_USER) if DB_PASS == 'NO-PASS' else MySQLDatabase(DB_NAME, user=DB_USER, passwd=DB_PASS)
class BaseModel(Model):
def to_dict(self):
return self.__data__
class Meta:
database = db
auto_increment = True
class Readings(BaseModel):
id = AutoField(primary_key=True)
sensor = CharField()
timestamp = CharField()
type = IntegerField()
value = DoubleField()
dbsync = BooleanField(default=False)
syncref = IntegerField(null = True)
class Logs(BaseModel):
id = AutoField(primary_key=True)
user = IntegerField()
message = CharField()
date = DateTimeField()
db.create_tables([Readings, Logs], safe=True) | ajaaibu/greenhouse-tools | models.py | models.py | py | 755 | python | en | code | 1 | github-code | 13 |
2448830777 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseBetween(self, head: Optional[ListNode], left: int, right: int) -> Optional[ListNode]:
dummy = ListNode(0)
dummy2 = ListNode(0)
curr = head
start = None
ptr = 1
while ptr < left:
start = curr
curr = curr.next
ptr += 1
pre = None
while curr and ptr <= right:
temp = curr.next
curr.next = pre
pre = curr
curr = temp
ptr += 1
#asign startnode
if start:
start.next = pre
else:
start = pre
head = start
while pre and pre.next:
pre = pre.next
pre.next = curr
return head | asnakeassefa/A2SV_programming | 0092-reverse-linked-list-ii/0092-reverse-linked-list-ii.py | 0092-reverse-linked-list-ii.py | py | 937 | python | en | code | 1 | github-code | 13 |
12135496563 | '''
边沿检测
'''
import cv2
img = cv2.imread('../data/lily.png',0)
cv2.imshow('img',img)
#Sobel
sobel = cv2.Sobel(img,
cv2.CV_64F,#图像的深度
dx=1,dy=1,#水平和垂直方向的滤波计算
ksize=5)#滤波器大小
cv2.imshow('sobel',sobel)
#Laplacain
lap = cv2.Laplacian(img,cv2.CV_64F,ksize=5)
cv2.imshow('lap',lap)
#Canny
canny = cv2.Canny(img,
70,
300)
cv2.imshow('canny',canny)
cv2.waitKey()
cv2.destroyAllWindows()
| 15149295552/Code | Month08/day14/09_edge.py | 09_edge.py | py | 529 | python | en | code | 1 | github-code | 13 |
9582175458 | import discord
from tools.constants import Constants
class Arcade():
@staticmethod
def show_arcade_options():
embed = discord.Embed(title="TamoBot Arcade", color=0xffa500)
embed.set_thumbnail(url='https://raw.githubusercontent.com/TamoStudy/TamoBot/main/README%20Assets/TamoBot.png')
embed.add_field(name=':joystick: Single Player', value='**Trivia:** answer fun and simple trivia questions\n→ `/trivia` (100 Tamo tokens)', inline=False)
embed.add_field(name=':signal_strength: Multi Player', value='**Coming Soon**', inline=False)
embed.add_field(name='\u200b', value=Constants.get_footer_string(), inline=False)
return embed
| TamoStudy/TamoBot | apps/arcade/arcade.py | arcade.py | py | 686 | python | en | code | 11 | github-code | 13 |
6858945996 | """
train embedding with CenterLoss
Author: LucasX
"""
import copy
import os
import sys
import time
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.metrics import confusion_matrix
from torch.optim import lr_scheduler
from torchvision import models
sys.path.append('../')
from research.cbir import data_loader
from research.cbir.cfg import cfg
from research.cbir.file_utils import mkdir_if_not_exist
class CenterLoss(nn.Module):
"""
Center loss.
Reference:
Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.
Args:
num_classes (int): number of classes.
feat_dim (int): feature dimension.
"""
def __init__(self, num_classes, feat_dim=1024):
super(CenterLoss, self).__init__()
self.num_classes = num_classes
self.feat_dim = feat_dim
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
if torch.cuda.is_available():
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).to(self.device))
else:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
def forward(self, x, labels):
"""
Args:
x: feature matrix with shape (batch_size, feat_dim).
labels: ground truth labels with shape (batch_size).
"""
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
distmat.addmm_(1, -2, x, self.centers.t())
classes = torch.arange(self.num_classes).long()
classes = classes.to(self.device)
labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
mask = labels.eq(classes.expand(batch_size, self.num_classes))
dist = []
for i in range(batch_size):
value = distmat[i][mask[i]]
value = value.clamp(min=1e-12, max=1e+12) # for numerical stability
dist.append(value)
dist = torch.cat(dist)
loss = dist.mean()
return loss
def train_model(model, dataloaders, criterion_xent, criterion_cent, optimizer_model, optimizer_centloss, scheduler,
num_epochs, inference=False):
"""
train model
:param optimizer_centloss:
:param optimizer_model:
:param criterion_cent:
:param criterion_xent:
:param model:
:param dataloaders:
:param scheduler:
:param num_epochs:
:param inference:
:return:
"""
model_name = model.__class__.__name__
model = model.float()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model)
model = model.to(device)
dataset_sizes = {x: dataloaders[x].__len__() * cfg['batch_size'] for x in ['train', 'val', 'test']}
for k, v in dataset_sizes.items():
print('Dataset size of {0} is {1}...'.format(k, v))
if not inference:
print('Start training %s...' % model_name)
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('-' * 100)
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
# for data in dataloaders[phase]:
for i, data in enumerate(dataloaders[phase], 0):
inputs, labels = data['image'], data['type']
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer_model.zero_grad()
optimizer_centloss.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
feats, outputs = model(inputs)
_, preds = torch.max(outputs, 1)
xent_loss = criterion_xent(outputs, labels)
loss = criterion_cent(feats, labels) * 0.001 + xent_loss
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer_model.step()
# multiple (1./alpha) in order to remove the effect of alpha on updating centers
for param in criterion_cent.parameters():
param.grad.data *= (1. / 1.)
optimizer_centloss.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
tmp_correct = 0
tmp_total = 0
tmp_y_pred = []
tmp_y_true = []
tmp_filenames = []
for data in dataloaders['val']:
images, labels, filename = data['image'], data['type'], data['filename']
images = images.to(device)
labels = labels.to(device)
feats, outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
tmp_total += labels.size(0)
tmp_correct += (predicted == labels).sum().item()
tmp_y_pred += predicted.to("cpu").detach().numpy().tolist()
tmp_y_true += labels.to("cpu").detach().numpy().tolist()
tmp_filenames += filename
tmp_acc = tmp_correct / tmp_total
print('Confusion Matrix of {0} on val set: '.format(model_name))
cm = confusion_matrix(tmp_y_true, tmp_y_pred)
print(cm)
cm = np.array(cm)
print('Accuracy = {0}'.format(tmp_acc))
precisions = []
recalls = []
for i in range(len(cm)):
precisions.append(cm[i][i] / sum(cm[:, i].tolist()))
recalls.append(cm[i][i] / sum(cm[i, :].tolist()))
print("Precision of {0} on val set = {1}".format(model_name, sum(precisions) / len(precisions)))
print("Recall of {0} on val set = {1}".format(model_name, sum(recalls) / len(recalls)))
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
model.load_state_dict(best_model_wts)
model_path_dir = './model'
mkdir_if_not_exist(model_path_dir)
torch.save(model.state_dict(),
'./model/{0}_best_epoch-{1}.pth'.format(model_name, epoch))
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
model_path_dir = './model'
mkdir_if_not_exist(model_path_dir)
torch.save(model.state_dict(), './model/%s.pth' % model_name)
else:
print('Start testing %s...' % model.__class__.__name__)
model.load_state_dict(torch.load(os.path.join('./model/%s.pth' % model_name)))
model.eval()
correct = 0
total = 0
y_pred = []
y_true = []
filenames = []
probs = []
with torch.no_grad():
for data in dataloaders['test']:
images, labels, filename = data['image'], data['type'], data['filename']
images = images.to(device)
labels = labels.to(device)
feats, outputs = model(images)
outputs = F.softmax(outputs)
# get TOP-K output labels and corresponding probabilities
topK_prob, topK_label = torch.topk(outputs, 2)
probs += topK_prob.to("cpu").detach().numpy().tolist()
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
y_pred += predicted.to("cpu").detach().numpy().tolist()
y_true += labels.to("cpu").detach().numpy().tolist()
filenames += filename
print('Accuracy of {0} on test set: {1}% '.format(model_name, 100 * correct / total))
print(
'Confusion Matrix of {0} on test set: '.format(model_name))
cm = confusion_matrix(y_true, y_pred)
print(cm)
cm = np.array(cm)
precisions = []
recalls = []
for i in range(len(cm)):
precisions.append(cm[i][i] / sum(cm[:, i].tolist()))
recalls.append(cm[i][i] / sum(cm[i, :].tolist()))
print('Precision List: ')
print(precisions)
print('Recall List: ')
print(recalls)
print("Precision of {0} on val set = {1}".format(model_name,
sum(precisions) / len(precisions)))
print(
"Recall of {0} on val set = {1}".format(model_name, sum(recalls) / len(recalls)))
print('Output CSV...')
col = ['filename', 'gt', 'pred', 'prob']
df = pd.DataFrame([[filenames[i], y_true[i], y_pred[i], probs[i][0]] for i in range(len(filenames))],
columns=col)
df.to_csv("./%s.csv" % model_name, index=False)
print('CSV has been generated...')
def main_with_centerloss(model, epoch, data_name):
"""
train model
:param model:
:param epoch:
:param data_name: ISIC/SD198
:return:
"""
criterion_xent = nn.CrossEntropyLoss()
criterion_cent = CenterLoss(num_classes=cfg['out_num'], feat_dim=1024)
optimizer_model = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
optimizer_centloss = optim.SGD(criterion_cent.parameters(), lr=0.5)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_model, step_size=60, gamma=0.1)
if data_name == 'TissuePhysiology':
print('start loading TissuePhysiology dataset...')
trainloader, valloader, testloader = data_loader.load_tissuephysiology_data()
elif data_name == 'LightClothing':
print('start loading LightClothing dataset...')
trainloader, valloader, testloader = data_loader.load_lightclothing_data()
else:
print('Invalid data name. It can only be TissuePhysiology or LightClothing...')
dataloaders = {
'train': trainloader,
'val': valloader,
'test': testloader
}
train_model(model=model, dataloaders=dataloaders, criterion_xent=criterion_xent, criterion_cent=criterion_cent,
optimizer_model=optimizer_model, optimizer_centloss=optimizer_centloss, scheduler=exp_lr_scheduler,
num_epochs=epoch, inference=False)
class DenseNet121(nn.Module):
"""
DenseNet with features, constructed for CenterLoss
"""
def __init__(self, num_cls=198):
super(DenseNet121, self).__init__()
self.__class__.__name__ = 'DenseNet121'
densenet121 = models.densenet121(pretrained=True)
num_ftrs = densenet121.classifier.in_features
densenet121.classifier = nn.Linear(num_ftrs, num_cls)
self.model = densenet121
def forward(self, x):
for name, module in self.model.named_children():
if name == 'features':
feats = module(x)
feats = F.relu(feats, inplace=True)
feats = F.avg_pool2d(feats, kernel_size=7, stride=1).view(feats.size(0), -1)
elif name == 'classifier':
out = module(feats)
return feats, out
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
if __name__ == '__main__':
# densenet121 = models.densenet121(pretrained=True)
# num_ftrs = densenet121.classifier.in_features
# densenet121.classifier = nn.Linear(num_ftrs, 198)
# densenet121 = DenseNet(num_classes=198)
densenet121 = DenseNet121(num_cls=cfg['out_num'])
main_with_centerloss(model=densenet121, epoch=cfg['epoch'], data_name='TissuePhysiology')
| stevenliu1375/XCloud | research/cbir/train_with_centerloss.py | train_with_centerloss.py | py | 13,478 | python | en | code | null | github-code | 13 |
27979123309 | # Daniel Perez A.
# CSCI Big Data: Project One
import re
from typing import Dict
import pandas as pd
import hetnetpy.hetnet
from neo4j import GraphDatabase
from pprint import pprint
import json
import networkx as nx
import matplotlib.pyplot as plt
from neo4j import GraphDatabase
import sys
from nltk.tokenize import word_tokenize
# Using Neo4j For Graph Storage
from pyvis.network import Network
from pyvis import network as net
import numpy as np
# Authentication
URI = "bolt://localhost:7689"
AUTH = ("neo4j", "spiky1234567")
# Neo4j Database Class
class Neo4j:
# Initialzing Neo4j driver
def __init__(self, URI, user, password):
self.driver = GraphDatabase.driver(URI, auth=(user,password))
# Closing Server
def close(self):
self.driver.close()
def run_query(self, query, **params):
with self.driver.session() as session:
result = session.run(query,**params)
return result
def r_query(self, query, **params):
with self.driver.session() as session:
record = {}
# Tokenizing Query for Keyword Word
word_query = word_tokenize(query.lower())
# Running Query
try:
result = session.run(query, **params)
except:
print ("Query Not Possible")
return 1
else:
if 'load' in word_query:
print('Neo4j Loaded Data Succesfully')
return 1
for x in result:
record = dict(x)
print(record['n']['name'], "Name: ", record['n']['dataName'].title())
if not record :
print("No Matches Found")
return 1 #
# Main Program for Querying and Visualizing Data
def main(argv):
# Creating Database Instance
database_db = Neo4j('bolt://localhost:7689', 'neo4j', 'spiky1234567')
try:
# Argument From Command Line
command_line_query = argv[1]
except:
print("Query is not Defined. Here's a Visual")
# Visualizing Query
# %%
import networkx as nx
import matplotlib.pyplot as plt
data_graph = nx.Graph()
with database_db.driver.session() as session:
result = session.run("""MATCH n=(a:Data)-[:CpD]->\
(b:Data where b.id='Disease::DOID:7148') RETURN a,b""")
for record in result:
# Extracting the nodes from both nodes
node_a = record["a"]
node_b = record["b"]
# Adding nodes to the network graph
data_graph.add_node(node_a["id"])
data_graph.add_node(node_b["id"])
# Adding edge to graph
data_graph.add_edge(node_a["id"], node_b["id"])
pos = nx.spring_layout(data_graph)
edge_labels = {edge: 'CpD' for edge in data_graph.edges()}
nx.draw_networkx_edge_labels(data_graph, pos, edge_labels=edge_labels)
nx.draw_networkx(data_graph, pos,with_labels=True, node_size=100)
plt.show()
else:
# Running Query From Termnial
database_db.r_query(command_line_query)
# Closing Data Base
database_db.close()
return 0
if __name__ == "__main__":
main(sys.argv)
# %%
| halaway/graph-net-project | projectBD.py | projectBD.py | py | 3,471 | python | en | code | 1 | github-code | 13 |
41019124082 | # create a template file of setting file
import json
def create_setting_file():
setting = {}
setting["ip"] = "192.168.124.1"
setting["port"] = "22"
setting["username"] = "admin"
setting["password"] = "asus#1234"
setting["file_path"] = "/tmp/syslog.log"
with open("setting.json", "w") as setting_file:
json.dump(
setting,
setting_file,
indent=4,
)
def create_pattern_file():
pattern = {}
# list all supported search method and parameters they needed
pattern["usable pattern_type"] = {}
pattern["usable pattern_type"]["keyword in time"] = {
"#Triggering conditions": "The keyword appears more than the threshold in a time range",
"pattern_name": "pattern name",
"pattern_type": "keyword in time",
"keyword": r"a regex string",
"appeared_threshold": "appears times, in interger",
"time_range": "second, in interger",
}
# add template of user defined patterns
for i in range(2):
pattern["pattern " + str(i)] = {
"pattern_name": "pattern " + str(i),
"pattern_type": "keyword in time",
"keyword": "regex string",
"appeared_threshold": "2",
"time_range": "100",
"mail": {
"mail_to": "mail1@gmail.com;mail2@gmail.com",
"mail_subject": "!pattern_name detected",
"mail_content": "!pattern_name detected at !time, !log",
},
"teams": {
"teams_webhook": "",
"teams_subject": "!pattern_name detected",
"teams_content": "!pattern_name detected at !time, !log",
},
}
with open("pattern.json", "w") as pattern_file:
json.dump(
pattern,
pattern_file,
indent=4,
)
if __name__ == "__main__":
create_pattern_file()
create_setting_file()
| louischouasus/LogDetector | create_setting.py | create_setting.py | py | 2,035 | python | en | code | 0 | github-code | 13 |
14834828171 | class rv_decoder:
def __init__(self, inst:int):
self.opcode = inst & 0x7f
self.funct3 = (inst >> 12) & 0x07
self.funct7 = (inst >> 25) & 0x7f
self.rd = (inst >> 7) & 0x1f
self.rs1 = (inst >> 15) & 0x1f
self.rs2 = (inst >> 20) & 0x1f
self.u_imm = inst & 0xfffff000
self.j_imm = (0xfff00000 if self.funct7 & 0x40 else 0x00) + (self.rs1 << 15) + (self.funct3 << 12) + ((self.rs2 & 0x1) << 11) + ((self.funct7 & 0x3f) << 5) + (self.rs2 & 0x1e)
self.b_imm = (0xfffff000 if self.funct7 & 0x40 else 0x00) + ((self.funct7 & 0x3f) << 5) + (self.rd & 0x1e) + ((self.rd & 0x1) << 11)
self.s_imm = (0xfffff800 if self.funct7 & 0x40 else 0x00) + ((self.funct7 & 0x3f) << 5) + (self.rd)
self.i_imm = (0xfffff800 if self.funct7 & 0x40 else 0x00) + ((self.funct7 & 0x3f) << 5) + (self.rs2)
self.id = self.opcode + (self.funct3 << 7 if self.opcode not in [0x37, 0x17, 0x6f] else 0x00) + (self.funct7 << 10 if self.opcode == 0x33 or self.opcode == 0x13 and self.funct3 == 0x05 else 0x00) | WenbinTeng/tone | interpreter/rv_decoder.py | rv_decoder.py | py | 1,098 | python | en | code | 0 | github-code | 13 |
27226410119 | class Coche:
#Constructor
def __init__(self, marca, kilometraje, color):
self.__marca = marca
self.__kilometraje = kilometraje
self.color = color
def arrancar(self, arrancamos):
self.arrancamos = arrancamos
if(self.arrancamos):
return 'El Coche esta en movimiento'
else:
return 'EL coche esta detenido'
def __str__(self):
return 'El auto de marca {} con Kilometraje {} es de color {} y esta {}'.format(self.__marca, self.__kilometraje, self.color, objMiCoche.arrancar(True))
objMiCoche = Coche('Mazda', 45000, 'Rojo')
objMiCoche.color = 'verde'
print(str(objMiCoche))
| andresdino/usco2023 | Prog2/POO/Encapculamiento.py | Encapculamiento.py | py | 678 | python | es | code | 1 | github-code | 13 |
776681234 | #!/usr/bin/env python
# coding: utf-8
# # On-the-fly statistics
#
# [This note-book is in oceantracker/tutorials_how_to/]
#
# Scaling up particle numbers to millions will create large volumes of
# particle track data. Storing and analyzing these tracks is slow and
# rapidly becomes overwhelming. For example, building a heat map from a
# terabyte of particle tracks after a run has completed. Ocean tracker can
# build some particle statistics on the fly, without recording any
# particle tracks. This results in more manageable data volumes and
# analysis.
#
# On-the-fly statistics record particle counts separately for each release group. It is also possible to subset the counts, ie only count particles which are stranded by the
# tide by designating a range of particle status values to count. Or, only
# count particles in a given vertical “z” range. Users can add multiple
# statistics, all calculated in from the same particles during the run.
# Eg. could add a particle statistic for each status type, for different
# depth ranges.
#
# Statistics can be read, plotted or animated with OceanTrackers
# post-processing code, see below
#
# The available “particle_statistics” classes with their individual
# settings are at …. add link
#
# Currently there are two main classes of 2D particle statistics "gridded" which counts particles inside cells of a regular grid, and "polygon" which counts particles in a given list of polygons.
#
# The user can add many particle statistics classes, all based on the same particles. For both types it is possible to only count a subset of these particles, by setting a min. and/or max status to count, or setting a min. and/or max. "z", the vertical location. So could add several statistics classes, each counting particles in different layers, or classes to separately count those moving and those on the bottom hoping to be re-suspended.
#
# ## Gridded statistics
#
# These are heat maps of counts binned into cells of a regular grid. Along
# with heat maps of particle counts, users can optionally build a heat
# maps of named particle properties, eg. the value decaying particle
# property. To ensure the heat map grids are not too large or too coarse,
# by default grids are centred on each release group, thus there are
# different grid locations for each release group.
#
# ## Polygon statistics
#
# These particle counts can be used to calculate the connectivity between
# each release group and a user given list of “statistics” polygons. Also,
# used to estimate the influence of each release group on a particle
# property with each given statistics polygon. Polygon statistics count the particles from each point or polygon release within each statistics polygons. The statistics polygons are
# are completely independent of the polygons that might be used in any polygon release (they can be the same if the user gives both the same point coordinates). A special case of a polygon
# statistic, is the “residence_time” class, which can be used to calculate
# the fraction of particles from each release group remaining within each
# statistics polygon at each ‘update_interval’ as one way to estimate
# particle residence time for each release group.
#
# ## Particle property statistics
#
# Both types of statistics can also record sums of user designated
# particle properties within each given grid cell or statistics polygon,
# which originate from each release group. These sums enabling mean values
# of designated particle properties within each grid cell or polygon to be
# calculated. They can also be used to estimate the relative influence of
# each release group on the value of a particle property within each given
# grid cell or polygon.
#
# A future version with allow estimating the variance of the designated
# property values and particle counts in each grid cell or
# polygon, for each release group.
#
#
#
#
# ## Gridded/Heat map example
#
# The below uses the helper class method to extends the minimal_example to add
#
# * Decaying particle property, eg. breakdown of a pollutant
# * Gridded time series of particle statistics as heat maps, which also builds a heat map of the pollutant
# * Plot the particle counts and pollutant as animated heatmap.
#
# In[1]:
# Gridded Statistics example.py using class helper method
#------------------------------------------------
from oceantracker.main import OceanTracker
# make instance of oceantracker to use to set parameters using code, then run
ot = OceanTracker()
# ot.settings method use to set basic settings
ot.settings(output_file_base='heat_map_example', # name used as base for output files
root_output_dir='output', # output is put in dir 'root_output_dir'\\'output_file_base'
time_step= 600., # 10 min time step as seconds
write_tracks = False # particle tracks not needed for on fly
)
# ot.set_class, sets parameters for a named class
ot.add_class('reader',input_dir= '../demos/demo_hindcast', # folder to search for hindcast files, sub-dirs will, by default, also be searched
file_mask= 'demoHindcastSchism*.nc') # hindcast file mask
# add one release locations
ot.add_class('release_groups', name='my_release_point', # user must provide a name for group first
points= [ [1599000, 5486200]], # ust be 1 by N list pairs of release locations
release_interval= 900, # seconds between releasing particles
pulse_size= 1000, # number of particles released each release_interval
)
# add a decaying particle property
# add and Age decay particle property, with exponential decay based on age, with time scale 1 hour
ot.add_class('particle_properties', # add a new property to particle_properties role
name ='a_pollutant', # must have a user given name
class_name='oceantracker.particle_properties.age_decay.AgeDecay', # class_role is resuspension
initial_value= 1000,
decay_time_scale = 3600.) # time scale of age decay ie decays initial_value* exp(-age/decay_time_scale)
# add a gridded particle statistic
ot.add_class('particle_statistics',
name = 'my_heatmap',
class_name= 'oceantracker.particle_statistics.gridded_statistics.GriddedStats2D_timeBased',
# the below settings are optional
update_interval = 900, # time interval in sec, between doing particle statists counts
particle_property_list = ['a_pollutant'], # request a heat map for the decaying part. prop. added above
status_min ='moving', # only count the particles which are moving
z_min =-2., # only count particles at locations above z=-2m
grid_size= [120, 121] # number of east and north cells in the heat map
)
# run oceantracker
case_info_file_name = ot.run()
# ### Read and plot heat maps
#
# The statistics output from the above run is in file
# output\heat_map_example\heat_map_example_stats_gridded_time_my_heatmap.nc
#
# This netcdf file can be read and organized as a python dictionary by
# directly with read_ncdf_output_files.read_stats_file.
#
# To plot use, load_output_files.load_stats_data, which also loads grid etc for plotting
# In[2]:
# read stats files
from oceantracker.post_processing.read_output_files import read_ncdf_output_files, load_output_files
from oceantracker.post_processing.plotting import plot_statistics
from IPython.display import HTML
# basic read of net cdf
raw_stats = read_ncdf_output_files.read_stats_file('output/heat_map_example/heat_map_example_stats_gridded_time_my_heatmap.nc')
print('raw_stats', raw_stats.keys())
# better, load netcdf plus grid and other data useful in plotting
# uses case_info name returned from run above
stats_data = load_output_files.load_stats_data(case_info_file_name,'my_heatmap')
print('stats',stats_data.keys())
# use stats_data variable to plot heat map at last time step, by default plots var= "count"
ax= [1591000, 1601500, 5478500, 5491000]
anim= plot_statistics.animate_heat_map(stats_data, release_group='my_release_point', axis_lims=ax,
heading='Particle count heatmap built on the fly, no tracks recorded', fps=1)
HTML(anim.to_html5_video())# this is slow to build!
# animate the pollutant
anim= plot_statistics.animate_heat_map(stats_data, var='a_pollutant',release_group= 'my_release_point', axis_lims=ax,
heading='Decaying particle property , a_pollutant built on the fly, no tracks recorded', fps=1)
HTML(anim.to_html5_video())# this is slow to build!
# static heat map
plot_statistics.plot_heat_map(stats_data, var='a_pollutant',release_group= 'my_release_point', axis_lims=ax, heading='a_pollutant at last time step depth built on the fly, no tracks recorded')
# ## Polygon example
#
#
# # add polygon stats example with plotting
# In[3]:
# Polygon Statistics example.py run using dictionary of parameters
#------------------------------------------------
from oceantracker import main
params = main.param_template() # start with template
params['output_file_base']='polygon_connectivity_map_example' # name used as base for output files
params['root_output_dir']='output' # output is put in dir 'root_output_dir'\\'output_file_base'
params['time_step']= 600. # 10 min time step as seconds
params['write_tracks'] = False # particle tracks not needed for on fly
# ot.set_class, sets parameters for a named class
params['reader']= { 'input_dir': '../demos/demo_hindcast', # folder to search for hindcast files, sub-dirs will, by default, also be searched
'file_mask': 'demoHindcastSchism*.nc'} # hindcast file mask
# add one release locations
params['release_groups']['my_release_point']={ # user must provide a name for group first
'points': [ [1599000, 5486200]], # ust be 1 by N list pairs of release locations
'release_interval': 900, # seconds between releasing particles
'pulse_size': 1000, # number of particles released each release_interval
}
# add a gridded particle statistic
params['particle_statistics']['my_polygon']= {
'class_name': 'oceantracker.particle_statistics.polygon_statistics.PolygonStats2D_timeBased',
'polygon_list': [{'points': [ [1597682.1237, 5489972.7479],# list of one or more polygons
[1598604.1667, 5490275.5488],
[1598886.4247, 5489464.0424],
[1597917.3387, 5489000],
[1597300, 5489000], [1597682.1237, 5489972.7479]
]
}],
# the below settings are optional
'update_interval': 900, # time interval in sec, between doing particle statists counts
'status_min':'moving', # only count the particles which are moving
}
# run oceantracker
poly_case_info_file_name = main.run(params)
# ## Read polygon/connectivity statistics
#
#
# In[4]:
#Read polygon stats and calculate connectivity matrix
from oceantracker.post_processing.read_output_files import load_output_files
poly_stats_data = load_output_files.load_stats_data(poly_case_info_file_name,'my_polygon')
print('stats',poly_stats_data.keys())
import matplotlib.pyplot as plt
plt.plot(poly_stats_data['date'], poly_stats_data['connectivity_matrix'][:,0,0])
plt.title('Connectivity time series between release point and polygon')
#print(poly_stats_data['date'])
# ## Time verses Age statistics
#
# Both gridded and polygon statistics come in two types, “time” and “age”.
#
# * “time” statistics are time series, or snapshots, of particle numbers
# and particle properties at a time interval given by “calculation_interval” parameter. Eg. gridded stats showing how the heat map of a source’s plume evolves over time.
#
# * “age” statistics are particle counts and properties binned by particle age. The result are age based histograms of counts or particle proprieties. This is useful to give numbers in each age band arriving at a given grid cell or polygon, from each release group. Eg. counting how many larvae are old enough to settle in a polygon or grid cell from each potential source location.
| oceantracker/oceantracker | tutorials_how_to/G_onthefly_statistics.py | G_onthefly_statistics.py | py | 12,763 | python | en | code | 10 | github-code | 13 |
4791183238 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import math
def demo2(a, b, strs):
# 排列
for index, ele in enumerate(strs):
b += ele
if index == a - 1:
strs = strs[index + 1:]
return b, strs
def demo(strs: str, numRows: int) -> str:
strs = list(strs.upper())
if numRows == 1:
return strs
"""
不用numpy的矩阵思维,使用普通函数实现:
每一行元素数量相同,数量不足的行用空格补足
"""
a = math.ceil(len(strs) / numRows)
b = ""
for i in range(numRows):
# 换行
if i != 0:
b += "\n"
# 最后一行缺失问题
if len(strs) < a:
space = a - len(strs)
for s in range(space):
strs += " "
b, strs = demo2(a, b, strs)
print(b)
if __name__ == '__main__':
print(len("sldkajlkewrcxuass"))
demo(strs="sldkajlkewrcxuass", numRows=4)
| LeroyK111/BasicAlgorithmSet | 代码实现算法/ZigzagConversion.py | ZigzagConversion.py | py | 1,003 | python | en | code | 1 | github-code | 13 |
15825364375 | import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
def candle_gen():
df = pd.read_csv('symbols.csv')
fig = go.Figure(data=go.Candlestick(
x=df['date'], open=df['open'], high=df['high'], low=df['low'], close=df['close']))
return fig
def ohlc_gen():
df = pd.read_csv('symbols.csv')
fig = go.Figure(data=go.Ohlc(
x=df['date'], open=df['open'], high=df['high'], low=df['low'], close=df['close']))
# fig.show()
return fig
def colored_bar():
df = pd.read_csv('symbols.csv')
fig = px.bar(df, x=df['date'], y=[df['open'],
df['low'], df['high'], df['close']])
# fig.show()
return fig
def vertex_line():
df = pd.read_csv('symbols.csv')
fig = go.Figure(go.Scatter(x=df['date'], y=df['close']))
fig.update_layout(plot_bgcolor='rgb(230, 230,230)', showlegend=True)
return fig
def hollow_gen():
df = pd.read_csv('symbols.csv')
fig = go.Figure(data=go.Candlestick(x=df['date'], open=df['open'], high=df['high'], low=df['low'],
close=df['close'], increasing_line_color='blue', decreasing_line_color='white'))
return fig
| AmeyaKulkarni2001/OHCL-Engine | graph_gen.py | graph_gen.py | py | 1,172 | python | en | code | 1 | github-code | 13 |
22869053962 | import io
import aiofiles
import pytest
from aresponses import ResponsesMockServer
from aiogram import Bot
from aiogram.api.client.session.aiohttp import AiohttpSession
from aiogram.api.methods import GetFile, GetMe
from aiogram.api.types import File, PhotoSize
from tests.mocked_bot import MockedBot
try:
from asynctest import CoroutineMock, patch
except ImportError:
from unittest.mock import AsyncMock as CoroutineMock, patch # type: ignore
class TestBot:
def test_init(self):
bot = Bot("42:TEST")
assert isinstance(bot.session, AiohttpSession)
assert bot.id == 42
def test_hashable(self):
bot = Bot("42:TEST")
assert hash(bot) == hash("42:TEST")
def test_equals(self):
bot = Bot("42:TEST")
assert bot == Bot("42:TEST")
assert bot != "42:TEST"
@pytest.mark.asyncio
async def test_emit(self):
bot = Bot("42:TEST")
method = GetMe()
with patch(
"aiogram.api.client.session.aiohttp.AiohttpSession.make_request",
new_callable=CoroutineMock,
) as mocked_make_request:
await bot(method)
mocked_make_request.assert_awaited_with(bot, method, timeout=None)
@pytest.mark.asyncio
async def test_close(self):
session = AiohttpSession()
bot = Bot("42:TEST", session=session)
await session.create_session()
with patch(
"aiogram.api.client.session.aiohttp.AiohttpSession.close", new_callable=CoroutineMock
) as mocked_close:
await bot.close()
mocked_close.assert_awaited()
@pytest.mark.asyncio
@pytest.mark.parametrize("close", [True, False])
async def test_context_manager(self, close: bool):
with patch(
"aiogram.api.client.session.aiohttp.AiohttpSession.close", new_callable=CoroutineMock
) as mocked_close:
async with Bot("42:TEST", session=AiohttpSession()).context(auto_close=close) as bot:
assert isinstance(bot, Bot)
if close:
mocked_close.assert_awaited()
else:
mocked_close.assert_not_awaited()
@pytest.mark.asyncio
async def test_download_file(self, aresponses: ResponsesMockServer):
aresponses.add(
aresponses.ANY, aresponses.ANY, "get", aresponses.Response(status=200, body=b"\f" * 10)
)
# https://github.com/Tinche/aiofiles#writing-tests-for-aiofiles
aiofiles.threadpool.wrap.register(CoroutineMock)(
lambda *args, **kwargs: aiofiles.threadpool.AsyncBufferedIOBase(*args, **kwargs)
)
mock_file = CoroutineMock()
bot = Bot("42:TEST")
with patch("aiofiles.threadpool.sync_open", return_value=mock_file):
await bot.download_file("TEST", "file.png")
mock_file.write.assert_called_once_with(b"\f" * 10)
@pytest.mark.asyncio
async def test_download_file_default_destination(self, aresponses: ResponsesMockServer):
bot = Bot("42:TEST")
aresponses.add(
aresponses.ANY, aresponses.ANY, "get", aresponses.Response(status=200, body=b"\f" * 10)
)
result = await bot.download_file("TEST")
assert isinstance(result, io.BytesIO)
assert result.read() == b"\f" * 10
@pytest.mark.asyncio
async def test_download_file_custom_destination(self, aresponses: ResponsesMockServer):
bot = Bot("42:TEST")
aresponses.add(
aresponses.ANY, aresponses.ANY, "get", aresponses.Response(status=200, body=b"\f" * 10)
)
custom = io.BytesIO()
result = await bot.download_file("TEST", custom)
assert isinstance(result, io.BytesIO)
assert result is custom
assert result.read() == b"\f" * 10
@pytest.mark.asyncio
async def test_download(self, bot: MockedBot, aresponses: ResponsesMockServer):
bot.add_result_for(
GetFile, ok=True, result=File(file_id="file id", file_unique_id="file id")
)
bot.add_result_for(
GetFile, ok=True, result=File(file_id="file id", file_unique_id="file id")
)
assert await bot.download(File(file_id="file id", file_unique_id="file id"))
assert await bot.download("file id")
with pytest.raises(TypeError):
await bot.download(
[PhotoSize(file_id="file id", file_unique_id="file id", width=123, height=123)]
)
| Abdo-Asil/abogram | tests/test_api/test_client/test_bot.py | test_bot.py | py | 4,506 | python | en | code | 0 | github-code | 13 |
39761879952 | import socket
from threading import Thread
from typing import Dict
from . import logger
msgFromServer = "Hello UDP Client"
bytesToSend = str.encode(msgFromServer)
class UdpServerException(Exception):
pass
class UdpServer(Thread):
def __init__(self, config: Dict, receiver):
super(UdpServer, self).__init__()
self.config = config
self.receiver = receiver
# run in the background
self.setDaemon(True)
# Create a datagram socket
self.server = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 10000000)
# Bind to address and ip
try:
self.server.bind((config.get("host"), config.get("port")))
except OSError as e:
logger.error(f"Could not bind UDP server to {config.get('host')}:{config.get('port')}")
logger.debug("", exc_info=True)
raise UdpServerException(e)
logger.debug("UDP server created")
self.go = True
def stop(self):
self.go = False
self.server.close()
logger.debug("Terminating UDP server")
def run(self):
if self.server.getsockname()[1] == 0:
logger.debug("Terminating UDP server as not bound")
else:
logger.debug("UDP server listening...")
# Listen for incoming datagrams
while self.go:
bytes_address_pair = self.server.recvfrom(self.config.get("buffer_size"))
message_str = bytes_address_pair[0].decode()
address = bytes_address_pair[1]
logger.debug(f"Message received from {address[0]}:{address[1]}, content: {message_str}")
# send message to the receiver
self.receiver.process_message(message_str)
| ecmwf/aviso | aviso-server/monitoring/aviso_monitoring/udp_server.py | udp_server.py | py | 1,916 | python | en | code | 9 | github-code | 13 |
2534340493 | """
Loop Detection: Given a circular linked list, implement an algorithm that returns the node at the
beginning of the loop.
DEFINITION
Circular linked list: A (corrupt) linked list in which a node's next pointer points to an earlier node, so
as to make a loop in the linked list.
EXAMPLE
Input:A - > B - > C - > D - > E - > C [the same C as earlier]
Output:C
"""
from linkedlist.RemoveDups import *
def loopdetect(xlist):
h = set()
curr = xlist
count = 0
while curr is not None and count <=10:
if curr in h:
return curr.data
else:
h.add(curr)
curr = curr.next
count += 1
a = Node(1)
b = Node(2)
c = Node(3)
d = Node(4)
e = Node(5)
a.next = b
b.next = c
c.next = d
d.next = e
e.next = c
def printNodelist(node):
curr = node
count = 0
while count < 10:
print(curr.data)
curr = curr.next
count += 1
# printNodelist(a)
print(loopdetect(a)) | fizzywonda/CodingInterview | linkedlist/Loopdetection.py | Loopdetection.py | py | 988 | python | en | code | 0 | github-code | 13 |
16980697769 | import requests
import csv
from bs4 import BeautifulSoup
import dateutil.parser as parser
## Funktion til at skrabe hvert enkelt objekt i itemlist
def item_scraper(item):
title = item.find(class_="title").get_text().strip()
subtitle = item.find(class_="dek").get_text().strip()
date = item.find(class_="date").get_text().strip()
link = item.find("a")["href"].strip()
return {"title": title,
"subtitle": subtitle,
"date": parser.parse(date, ignoretz=True),
"link": "https://www.cbsnews.com"+link}
## Funktion til at skrabe én URL
def URL_scraper(URL):
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
results = soup.find(class_="media-list content-list result-list")
itemlist = results.find_all("li")
scraped_itemlist = []
for item in itemlist:
scraped_itemlist.append(item_scraper(item))
return scraped_itemlist
## Funktion til eksport som CSV
def CSV_writer(scraped_itemlist):
with open("temp.csv", "a+") as outfile:
writer = csv.DictWriter(
outfile,
fieldnames=["date","title", "subtitle", "link"])
for item in scraped_itemlist:
writer.writerow(item)
outfile.close()
## Sammensat funktion. Tager args "keyword" som er emnet, du ønsker at søge på samt "start" og "end" som hhv. angiver første og sidste side af resultater, der skal medtages i skrabet
def CBS_scraper(keyword, start, end):
for i in range(start, end):
URL = "https://www.cbsnews.com/search/?q={keyword}&p={pagenumber}".format(keyword = keyword, pagenumber = i)
CSV_writer(URL_scraper(URL))
CBS_scraper("Iraq", 3600, 4100)
| Oeyaas/smaating | as.py | as.py | py | 1,700 | python | en | code | 0 | github-code | 13 |
2200164677 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def coef(p,q):
tmp = len([val for val in p if val in q])
return 2.0*tmp/(len(p)+len(q))
def simatt(q,Pu,alpha = 0.5):
cc = []
for p in Pu:
cc.append(coef(p,q))
cc = sorted(cc)
for i in range(1,len(cc)):
cc[i] = alpha * cc[i] + (1-alpha) * cc[i-1]
return cc[-1]
def sis(q,Pu):
cc = set()
for p in Pu:
cc = cc.union([val for val in p if val in q])
return cc
| aaeviru/pythonlib | attack.py | attack.py | py | 479 | python | en | code | 0 | github-code | 13 |
25321524274 | def toLower(s):
lowers = ""
for c in s:
c = c.lower()
lowers += c
return lowers
# print(toLower("GFgggftYBXjiIIOlh"))
def isPalindrome(s):
if len(s) <= 1:
return True
else:
return s[0] == s[-1] and isPalindrome(s[1:-1])
print(isPalindrome("qwerytrewq")) | fedpanoz/Python_Guttag | isPalindrome.py | isPalindrome.py | py | 310 | python | en | code | 0 | github-code | 13 |
6953410626 | with open('input', 'r') as f:
lines = []
for line in f:
lines.append(line)
currPos = 0
count = 0
a = True
while a:
currMove = lines[currPos]
lines[currPos] = 1 + int(lines[currPos])
currPos = currPos + int(currMove)
count = count + 1
if(currPos > (len(lines) - 1) or currPos < 0):
a = False
print(count)
| schneiderl/problems-solved | AOC2017/day5/day_5_puzzle_1.py | day_5_puzzle_1.py | py | 389 | python | en | code | 0 | github-code | 13 |
5960935316 | import shutil
import time
from typing import Optional
import zipfile
import torch
from cog import BasePredictor, ConcatenateIterator, Input, Path
from config import DEFAULT_MODEL_NAME, load_tokenizer, load_tensorizer, pull_gcp_file
from subclass import YieldingLlama
from peft import PeftModel
import os
class Predictor(BasePredictor):
def setup(self, weights: Optional[Path] = None):
self.device = "cuda" if torch.cuda.is_available() else "cpu"
if weights is not None and weights.name == "weights":
# bugfix
weights = None
if weights is None:
self.model = load_tensorizer(weights=DEFAULT_MODEL_NAME, plaid_mode=True, cls=YieldingLlama)
else:
weights = str(weights)
if '.zip' in weights:
self.model = self.load_peft(weights)
elif "tensors" in weights:
self.model = load_tensorizer(weights, plaid_mode=True, cls=YieldingLlama)
else:
self.model = self.load_huggingface_model(weights=weights)
self.tokenizer = load_tokenizer()
def load_peft(self, weights):
st = time.time()
if 'tensors' in DEFAULT_MODEL_NAME:
model = load_tensorizer(DEFAULT_MODEL_NAME, plaid_mode=False, cls=YieldingLlama)
else:
model = self.load_huggingface_model(DEFAULT_MODEL_NAME)
if 'https' in weights: # weights are in the cloud
local_weights = 'local_weights.zip'
pull_gcp_file(weights, local_weights)
weights = local_weights
out = '/src/peft_dir'
if os.path.exists(out):
shutil.rmtree(out)
with zipfile.ZipFile(weights, 'r') as zip_ref:
zip_ref.extractall(out)
model = PeftModel.from_pretrained(model, out)
print(f"peft model loaded in {time.time() - st}")
return model.to('cuda')
def load_huggingface_model(self, weights=None):
st = time.time()
print(f"loading weights from {weights} w/o tensorizer")
model = YieldingLlama.from_pretrained(
weights, cache_dir="pretrained_weights", torch_dtype=torch.float16
)
model.to(self.device)
print(f"weights loaded in {time.time() - st}")
return model
def predict(
self,
prompt: str = Input(description=f"Prompt to send to Llama."),
max_length: int = Input(
description="Maximum number of tokens to generate. A word is generally 2-3 tokens",
ge=1,
default=500,
),
temperature: float = Input(
description="Adjusts randomness of outputs, greater than 1 is random and 0 is deterministic, 0.75 is a good starting value.",
ge=0.01,
le=5,
default=0.75,
),
top_p: float = Input(
description="When decoding text, samples from the top p percentage of most likely tokens; lower to ignore less likely tokens",
ge=0.01,
le=1.0,
default=1.0,
),
repetition_penalty: float = Input(
description="Penalty for repeated words in generated text; 1 is no penalty, values greater than 1 discourage repetition, less than 1 encourage it.",
ge=0.01,
le=5,
default=1,
),
debug: bool = Input(
description="provide debugging output in logs", default=False
),
) -> ConcatenateIterator[str]:
input = self.tokenizer(prompt, return_tensors="pt").input_ids.to(self.device)
with torch.inference_mode() and torch.autocast("cuda"):
first_token_yielded = False
prev_ids = []
for output in self.model.generate(
input_ids=input,
max_length=max_length,
do_sample=True,
temperature=temperature,
top_p=top_p,
repetition_penalty=repetition_penalty,
):
cur_id = output.item()
# in order to properly handle spaces, we need to do our own tokenizing. Fun!
# we're building up a buffer of sub-word / punctuation tokens until we hit a space, and then yielding whole words + punctuation.
cur_token = self.tokenizer.convert_ids_to_tokens(cur_id)
# skip initial newline, which this almost always yields. hack - newline id = 13.
if not first_token_yielded and not prev_ids and cur_id == 13:
continue
# underscore means a space, means we yield previous tokens
if cur_token.startswith("▁"): # this is not a standard underscore.
# first token
if not prev_ids:
prev_ids = [cur_id]
continue
# there are tokens to yield
else:
token = self.tokenizer.decode(prev_ids)
prev_ids = [cur_id]
if not first_token_yielded:
# no leading space for first token
token = token.strip()
first_token_yielded = True
yield token
else:
prev_ids.append(cur_id)
continue
# remove any special tokens such as </s>
token = self.tokenizer.decode(prev_ids, skip_special_tokens=True)
if not first_token_yielded:
# no leading space for first token
token = token.strip()
first_token_yielded = True
yield token
if debug:
print(f"cur memory: {torch.cuda.memory_allocated()}")
print(f"max allocated: {torch.cuda.max_memory_allocated()}")
print(f"peak memory: {torch.cuda.max_memory_reserved()}")
class EightBitPredictor(Predictor):
"""subclass s.t. we can configure whether a model is loaded in 8bit mode from cog.yaml"""
def setup(self, weights: Optional[Path] = None):
if weights is not None and weights.name == "weights":
# bugfix
weights = None
# TODO: fine-tuned 8bit weights.
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.model = YieldingLlama.from_pretrained(
DEFAULT_MODEL_NAME, load_in_8bit=True, device_map="auto"
)
self.tokenizer = load_tokenizer()
| replicate/cog-llama | predict.py | predict.py | py | 6,540 | python | en | code | 58 | github-code | 13 |
4928416383 | from torchvision.transforms import RandomApply, Compose, ColorJitter, RandomGrayscale, RandomRotation, \
RandomResizedCrop, ToTensor
from utils.base_dataset import BaseDatasetHDF
from PIL.Image import fromarray
import torch
import h5py
class KatherHDF(BaseDatasetHDF):
def __init__(self, hdf5_filepath, phase, batch_size, use_cache, cache_size=30):
"""
Initializes the class Kather19HDF relying on a .hdf5 file that contains the complete data for all phases
(train, test, validation). It contains the input data as well as the target data to reduce the amount of
computation done in fly. The samples are first split w.r.t. the phase (train, test, validation) and then w.r.t.
the status (input, target). A pair of (input, target) samples is accessed with the same index. For a given
phase a pair is accessed as: (hdf[phase]['input'][index], hdf[phase]['target'][index]).
The .hdf5 file is stored on disk and only the queried samples are loaded in RAM. To increase retrieval speed
a small cache in RAM is implemented. When using the cache, one should note the following observations:
- The speed will only improve if the data is not shuffled.
- The cache size must be adapted to the computer used.
- The number of workers of the data loader must be adapted to the computer used and the cache size.
- The cache size must be a multiple of the chunk size that was used when filling the .hdf5 file.
:param hdf5_filepath: location of the .hdf5 file.
:param phase: phase in which the dataset is used ('train'/'valid'/'test').
:param batch_size: size of a single batch.
:param use_cache: boolean indicating if the cache should be used or not.
:param cache_size: size of the cache in number of batches.
"""
super(KatherHDF, self).__init__(hdf5_filepath, phase, batch_size, use_cache, cache_size)
# Get the image transformer
self.transformer = self.get_image_transformer()
def get_image_transformer(self):
transformations = [
ColorJitter(brightness=0.3, contrast=0.3, saturation=0.1, hue=0.1),
RandomGrayscale(p=0.5),
RandomRotation(degrees=[0., 45.]),
RandomResizedCrop(size=[224, 224], scale=(0.3, 1.0))]
return Compose([RandomApply(transformations, p=0.7), ToTensor()])
def transform(self, x):
if self.phase == 'train':
x = self.transformer(fromarray(x))
else:
x = ToTensor()(x)
return x
def __getitem__(self, index):
"""
Loads a single pair (input, target) data.
:param index: index of the sample to load.
:return: queried pair of (input, target) data.
"""
if self.use_cache:
if not self.is_in_cache(index):
self.load_chunk_to_cache(index)
x_input = self.cache['input'][index - self.cache_min_index]
x_target = self.cache['target'][index - self.cache_min_index][None]
else:
with h5py.File(self.hdf5_filepath, 'r') as hdf:
x_input = hdf[self.phase]['input'][index]
x_target = hdf[self.phase]['target'][index][None]
return self.transform(x_input), torch.from_numpy(x_target)
| stegmuel/DANN_py3 | utils/kather_dataset.py | kather_dataset.py | py | 3,348 | python | en | code | null | github-code | 13 |
23476246043 | """init db
Revision ID: 65d821058a5e
Revises: None
Create Date: 2016-08-26 14:16:32.166923
"""
# revision identifiers, used by Alembic.
revision = '65d821058a5e'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('category',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('post',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=80), nullable=True),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('pub_date', sa.DateTime(), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['category.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('post')
op.drop_table('category')
### end Alembic commands ###
| Moxikai/Salary_info | migrations/versions/65d821058a5e_init_db.py | 65d821058a5e_init_db.py | py | 1,113 | python | en | code | 0 | github-code | 13 |
3988920436 | import json
import traceback
from datetime import datetime
from typing import Callable, Dict, List
import websockets
from .homeplus.models import Item, Store
from .state import State, state
class Client:
host: str
listeners: Dict[str, List[Callable]] = {}
def __init__(self, host: str) -> None:
self.host = host
def _add_listener(self, event_name: str, func):
if event_name not in self.listeners:
self.listeners[event_name] = []
self.listeners[event_name].append(func)
async def _invoke_listeners(self, event_name: str, *args, **kwargs):
if event_name not in self.listeners:
return
for listener in self.listeners[event_name]:
await listener(*args, **kwargs)
def on_update_stores(self, func):
self._add_listener("update_stores", func)
def on_update_items(self, func):
self._add_listener("update_items", func)
def on_error(self, func):
self._add_listener("error", func)
def on_close(self, func):
self._add_listener("close", func)
def on_open(self, func):
self._add_listener("open", func)
async def _on_receive_stores_data(self, data):
state.update(
State(
stores=[Store(id=_store["id"], name=_store["name"], region=_store["region"]) for _store in data["stores"]],
)
)
await self._invoke_listeners("update_stores")
async def _on_receive_items_data(self, data):
_items = data["items"]
state.update(
State(
items=[
[
Item(
no=_item["no"],
name=_item["name"],
store_id=store.id,
stock_quantity=data["stock_quantities"][i][j],
)
for j, _item in enumerate(_items)
]
for i, store in enumerate(state.stores)
],
last_updated=datetime.fromtimestamp(data["time"] / 1000.0),
)
)
await self._invoke_listeners("update_items")
async def run(self):
async def _on_message(message):
response = json.loads(message)
# parse json data and instantiate
if response["type"] == "stores":
await self._on_receive_stores_data(response["data"])
elif response["type"] == "items":
await self._on_receive_items_data(response["data"])
async def _on_error(error):
print("websocket client: error occured")
print(error)
print(traceback.format_exc())
await self._invoke_listeners("error", error)
async def _on_close():
print(f"websocket client: connection closed")
await self._invoke_listeners("close")
async def _on_open():
print("websocket client: connection opened")
await self._invoke_listeners("open")
try:
async for websocket in websockets.connect(self.host):
await _on_open()
try:
while True:
message = await websocket.recv()
await _on_message(message)
except websockets.ConnectionClosed as e:
await _on_close()
continue
except Exception as e:
await _on_error(e)
| solo5star/dangdangrun | dangdangrun/client.py | client.py | py | 3,538 | python | en | code | 2 | github-code | 13 |
27802952519 | """This module is a thing"""
import json
from pprint import pprint
import logging
import urllib.parse
from collections import defaultdict
from io import BytesIO
import time
from orm import *
import datetime
import twitterpost
import facebookutils
import pycurlutil
import os.path
import configparser
import sys
page_access_token = ""
fb_client_secret = ""
fb_client_id = ''
rep_page = ""
target_page = ""
api_endpoint = 'https://graph.facebook.com/v2.11/'
facebook_url = 'https://www.facebook.com/'
dryrun = True
def getAccessTokens():
logging.info(fb_client_secret)
params = [('client_id', fb_client_id ), ('client_secret', fb_client_secret), ('grant_type', 'client_credentials')]
url = 'https://graph.facebook.com/oauth/access_token'
parsed_json = pycurlutil.pycurlget(url, params)
access_token = parsed_json['access_token']
return access_token
def main():
Post.create_table(True)
Comment.create_table(True)
config = configparser.ConfigParser()
config.read('config.ini')
dbname = config.get('General','dbfile')
#FACEBOOK CONFIG
global page_access_token
global fb_client_id
global fb_client_secret
global rep_page
global target_page
global dryrun
dryrun = config.get('General', 'dryrun')
page_access_token = config.get('Facebook', 'fb_page_access_token')
fb_client_id = config.get('Facebook','fb_client_id')
fb_client_secret = config.get('Facebook','fb_client_secret')
rep_page = config.get('Facebook','rep_fb_page')
target_page = config.get('Facebook','target_fb_page')
#TWITTERCONFIG
twitterpost.config(
twitter_consumer_key = config.get('Twitter','twitter_consumer_key'),
twitter_access_key = config.get('Twitter','twitter_access_key'),
twitter_consumer_secret = config.get('Twitter','twitter_consumer_secret'),
twitter_access_secret= config.get('Twitter','twitter_access_secret')
)
while True:
logging.info("Running Page Filter detection @ " + str(datetime.datetime.now()))
access_token = getAccessTokens()
params = [('fields','posts{comments,id,caption,created_time,message}'), ('access_token', access_token )]
url = api_endpoint + rep_page
check_json = pycurlutil.pycurlget(url, params)
for comments in check_json['posts']['data']:
# #get all posts
# First, check if this is a new post by getting ID from Posts table
query = Post.select().where(Post.post_id == comments['id'])
# If so, capture message, ID and time/date and save to database
if not query.exists():
logging.info("creating post...")
message = "N/A"
if 'message' in comments:
message = comments['message']
query = Post.create(post_id = comments['id'],created_date=comments['created_time'], message = message )
posted_comments = []
if 'comments' in comments:
# Check existing comments vs new comments
# if new comments are not present(check ID against dictionary/list), save them to database
# if existing comments ARE NOT FOUND, add them to suspect removed list
comment_block = comments['comments']
done_paging = False
comment_count = 0
while not done_paging:
for comment_data in comment_block['data']:
comment_count += 1
comment_query = Comment.select().where(Comment.comment_id == comment_data['id'])
if not comment_query.exists():
logging.info("Creating comment....")
if comment_data['message'].strip() != "":
Comment.create(post = query, comment_id = comment_data['id'], created_date=comment_data['created_time'], message = comment_data['message'] )
posted_comments = posted_comments + comment_block ['data']
if 'next' in comment_block['paging']:
comment_block = pycurlutil.pycurlgetURL(comment_block['paging']['next'])
else:
done_paging = True
for stored_comments in Comment.select().where(Comment.post == query):
found = False
for existing_comments in posted_comments:
if existing_comments['id'] == stored_comments.comment_id:
found = True
if not found and stored_comments.message.strip() != "" and not stored_comments.has_been_posted:
logging.info("FILTERED!")
logging.info(stored_comments.message)
logging.info("On post:" + comments['message'] )
if "False" == dryrun:
stored_comments.has_been_deleted = True
stored_comments.save()
id = facebookutils.postPage( "REMOVED: " +stored_comments.message, page_access_token, api_endpoint, target_page)
post_url = "https://www.facebook.com/" + target_page + "/posts/" + id
twitterpost.testTweet(stored_comments.message, post_url)
stored_comments.has_been_posted = True
stored_comments.has_been_deleted = True
stored_comments.save()
time.sleep(300)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, filename="FacebookTracker.log", filemode="a+",
format="%(asctime)-15s %(levelname)-8s %(message)s")
main()
| MattLud/FacebookFilterTracker | FacebookTracker.py | FacebookTracker.py | py | 5,835 | python | en | code | 3 | github-code | 13 |
41858925944 | """
Time formatter utility
"""
from datetime import timedelta
from settings import langs
def format_time(
lang_code: str,
time: timedelta,
depth: int = 1) -> str:
"""Formats timedelta into a readable format
Example
----------
>>> format_time('en', timedelta(days=2, hours=9, minutes=40, seconds=33), depth=2)
'2d. 9h.'
"""
result = ''
cur_depth = 0
# Add {days}d.
if time >= timedelta(days=1):
result += str(time.days) + ' ' + langs[lang_code]['text.time.short.days']
cur_depth += 1
# Add {hours}h.
if cur_depth < depth and time >= timedelta(hours=1):
if cur_depth != 0:
result += ' '
cur_depth += 1
result += str(time.seconds // 3600)
result += ' ' + langs[lang_code]['text.time.short.hours']
# Add {minutes}m.
if cur_depth < depth and time >= timedelta(minutes=1):
if cur_depth != 0:
result += ' '
cur_depth += 1
result += str(time.seconds // 60 % 60)
result += ' ' + langs[lang_code]['text.time.short.minutes']
# Add {seconds}s.
if cur_depth < depth:
if cur_depth != 0:
result += ' '
result += str(time.seconds % 60)
result += ' ' + langs[lang_code]['text.time.short.seconds']
return result
| cubicbyte/dteubot | bot/utils/timeformatter.py | timeformatter.py | py | 1,336 | python | en | code | 2 | github-code | 13 |
20411445844 | """This module defines all the routes for the Daily AI assistant server."""
import json
import sys
import traceback
from os.path import join, dirname, abspath
from quart.cli import load_dotenv
from quart_cors import cors
from quart import Quart, jsonify, Response, request
from server.call.errors import DailyPermissionException, SessionNotFoundException
from server.config import Config
from server.call.operator import Operator
dotenv_path = join(dirname(dirname(abspath(__file__))), '.env')
load_dotenv(dotenv_path)
app = Quart(__name__)
print("Running AI assistant server")
# Note that this is not a secure CORS configuration for production.
cors(app, allow_origin="*", allow_headers=["content-type"])
config = Config()
config.ensure_dirs()
operator = Operator(config)
@app.after_serving
async def shutdown():
"""Stop all background tasks and cancel Futures"""
operator.shutdown()
for task in app.background_tasks:
task.cancel()
@app.route('/', methods=['GET'])
async def index():
"""Just an empty index file"""
return {}, 200
@app.route('/session', methods=['POST'])
async def create_session():
"""Creates a session, which includes creating a Daily room
and returning its URL to the caller."""
err_msg = "Failed to create session"
try:
raw = await request.get_data()
data = json.loads(raw or 'null')
room_duration_mins = None
room_url = None
if data:
requested_duration_mins = data.get("room_duration_mins")
if requested_duration_mins:
room_duration_mins = int(requested_duration_mins)
provided_room_url = data.get("room_url")
if provided_room_url:
room_url = provided_room_url
room_url = operator.create_session(room_duration_mins, room_url)
return jsonify({
"room_url": room_url
}), 200
except DailyPermissionException as e:
return process_error(err_msg, 401, e)
except Exception as e:
return process_error(err_msg, 500, e)
@app.route('/summary', methods=['GET'])
async def summary():
"""Creates and returns a summary of the meeting at the provided room URL."""
room_url = request.args.get("room_url")
if not room_url:
return process_error('room_url query parameter must be provided', 400)
try:
got_summary = await operator.query_assistant(room_url)
return jsonify({
"summary": got_summary
}), 200
except SessionNotFoundException as e:
return process_error(
'Requested session not found. Has it been destroyed?', 400, e)
except Exception as e:
return process_error('failed to generate meeting summary', 500, e)
@app.route('/query', methods=['POST'])
async def query():
"""Runs a query against the session using the provided query string."""
raw = await request.get_data()
try:
data = json.loads(raw or 'null')
except Exception as e:
return process_error(
"Confirm that request body is in valid JSON format", 400, e)
room_url = None
requested_query = None
if data:
room_url = data.get("room_url")
requested_query = data.get("query")
# Both room URl and query are required for this endpoint
if not room_url or not requested_query:
return process_error(
"Request body must contain a 'room_url' and 'query'", 400)
try:
res = await operator.query_assistant(room_url, requested_query)
return jsonify({
"response": res
}), 200
except SessionNotFoundException as e:
return process_error(
'Requested session not found. Has it been destroyed?', 400, e)
except Exception as e:
return process_error('Failed to query session', 500, e)
def process_error(msg: str, code=500, error: Exception = None,
) -> tuple[Response, int]:
"""Prints provided error and returns appropriately-formatted response."""
if error:
traceback.print_exc()
print(msg, error, file=sys.stderr)
response = {'error': msg}
return jsonify(response), code
if __name__ == '__main__':
app.run(debug=True)
| daily-demos/ai-meeting-assistant | server/main.py | main.py | py | 4,242 | python | en | code | 0 | github-code | 13 |
37630235372 | import time
import logging
import datetime
from status_level import STATUS_CODE_TO_STR, OK
from jira_utils import (post_issue, PROJECT_ID_P_S3, ISSUE_TYPE_ID_BUG)
from sb_param_utils import get_license_plate_number
def generate_crash_description(exc_str, module="Fail-safe"):
dt = datetime.datetime.fromtimestamp(time.time())
plate = get_license_plate_number()
return u"""
{} crashed at timestamp: {}
License plate: {}
{}
""".format(module, dt, plate, exc_str)
def generate_issue_description(status_code, status_str, timestamp):
if status_code == OK:
logging.warn("Do not generate description for status OK")
return ""
# timestamp is expected to be of length 10
ts_len = len(str(int(timestamp)))
if ts_len == 13:
# South bridge format.
timestamp = timestamp / 1000
dt = datetime.datetime.fromtimestamp(timestamp)
plate = get_license_plate_number()
start_dt = "{}-{}-{} {}:{}".format(
dt.year, dt.month, dt.day, dt.hour, dt.minute)
minute_after_dt = dt + datetime.timedelta(minutes=1)
end_dt = "{}-{}-{} {:02d}:{:02d}".format(
minute_after_dt.year, minute_after_dt.month, minute_after_dt.day,
minute_after_dt.hour, minute_after_dt.minute)
url = (u"https://service.itriadv.co:8743/ADV/EventPlayback?plate={}&"
u"startDt={}&endDt={}").format(plate, start_dt, end_dt)
return u"""
status code: {}
status str: {}
issue is reported at timestamp: {}
Please use the url
{}
to retrieve related bag files.
- User name: u200
- User password: please ask your colleague
""".format(STATUS_CODE_TO_STR[status_code], status_str, dt, url)
class IssueReporter():
def __init__(self):
self.lastest_issue_post_time = 0
self.min_post_time_interval = 60 # Don't post same issue within 60s
self.project_id = PROJECT_ID_P_S3
self.issue_type_id = ISSUE_TYPE_ID_BUG
def set_project_id(self, project_id):
"""set project id"""
self.project_id = project_id
def set_issue_type_id(self, issue_type_id):
"""set issue type"""
self.issue_type_id = issue_type_id
def _is_repeated_issue(self):
now = time.time()
prev_post_time = self.lastest_issue_post_time
self.lastest_issue_post_time = now
return bool(now - prev_post_time <= self.min_post_time_interval)
def post_issue(self, summary, description, dry_run=False):
"""
Returns 1 if actually post an issue. 0 otherwise.
"""
if self._is_repeated_issue():
logging.warn("%s: Does not post repeated issue", summary)
return 0
if dry_run:
logging.warn("%s: Dry run mode. Do not post issue to jira", summary)
else:
logging.warn("%s: Post issue to jira", summary)
post_issue(self.project_id, summary, description, self.issue_type_id)
return 1
| wasn-lab/Taillight_Recognition_with_VGG16-WaveNet | src/utilities/fail_safe/src/issue_reporter.py | issue_reporter.py | py | 2,928 | python | en | code | 2 | github-code | 13 |
2950900443 | import tensorflow as tf
class SemanticSegmentationModelFactory:
def initialize_cost(self, load_existing_model, model, labels_one_hot, graph):
tensor_cost = None
if load_existing_model == False:
labels_one_hot_float = tf.to_float(labels_one_hot)
#tensor_cost = tf.reduce_mean(tf.multiply(-1.0, tf.add(tf.multiply(labels_one_hot_float, tf.clip_by_value(model, 1e-10, 1.0)), tf.multiply(tf.subtract(1.0, labels_one_hot_float), tf.log(tf.subtract(1.0, tf.clip_by_value(model, 1e-10, 1.0)))))))
#tensor_cost = tf.reduce_mean(tf.multiply(labels_one_hot_float, tf.log(model)))
#tensor_cost = tf.log(model)
#tensor_cost = tf.reduce_mean(tf.square(tf.subtract(tf.to_int32(model), labels_one_hot)))
#cross_entropy = -tf.reduce_sum(labels_one_hot_float * tf.log(tf.clip_by_value(model, 1e-10, 1.0)))
#tensor_cost = cross_entropy
cost = tf.multiply(labels_one_hot_float, tf.log(tf.clip_by_value(model, 0.000001, 1.0)))
cost = tf.add(cost, tf.multiply(tf.subtract(1.0, labels_one_hot_float), tf.log(tf.subtract(1.0, tf.clip_by_value(model, 0.0, 0.999999)))))
cost_per_logit = tf.multiply(-1.0, cost)
cost_per_pixel = tf.reduce_sum(cost_per_logit, -1)
pixel_has_label = tf.reduce_mean(labels_one_hot_float, axis=-1)
# Remove any cost if no label is set for this pixel (will lead to random label approximation for this pixel and hopefully reduce prediction complexity)
cost_per_pixel = tf.multiply(cost_per_pixel, pixel_has_label)
tensor_cost = tf.reduce_mean(cost_per_pixel)
#tensor_cost = cost
# Calculate distance from actual labels using cross entropy
#cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=model, labels=labels_one_hot, name="cross_entropy")
# Take mean for total loss
#tensor_cost = tf.reduce_mean(cross_entropy, name="fcn_loss")
if load_existing_model == True:
tensor_cost = graph.get_tensor_by_name("fcn_loss:0")
return tensor_cost
def initialize_model_U_net(self, label_size, tf_ph_x, tf_ph_droput_keep_prob):
model = tf.nn.dropout(tf_ph_x, keep_prob=tf_ph_droput_keep_prob)
model = self.add_convolution(model=model, filter_size=64)
model = self.add_convolution(model=model, filter_multiplier=1)
output_layer1 = model
model = self.add_max_pooling(model=model, pool_size=[4, 4], strides=4)
model = tf.nn.dropout(model, keep_prob=tf_ph_droput_keep_prob)
model = self.add_convolution(model=model, filter_multiplier=2)
model = self.add_convolution(model=model, filter_multiplier=1)
output_layer2 = model
model = self.add_max_pooling(model=model, pool_size=[4, 4], strides=4)
model = tf.nn.dropout(model, keep_prob=tf_ph_droput_keep_prob)
model = self.add_convolution(model=model, filter_multiplier=2)
model = self.add_convolution(model=model, filter_multiplier=1)
###########Deconvolution############
model = self.add_deconvolution(model=model, size_multiplier=4)
model = self.add_convolution(model=model, filter_multiplier=0.5)
model = tf.concat([model, output_layer2], axis=3)
model = self.add_convolution(model=model, filter_multiplier=1)
model = self.add_convolution(model=model, filter_multiplier=0.5)
model = self.add_deconvolution(model=model, size_multiplier=4)
model = self.add_convolution(model=model, filter_multiplier=0.5)
model = tf.concat([model, output_layer1], axis=3)
model = self.add_convolution(model=model, filter_multiplier=1)
model = self.add_convolution(model=model, filter_multiplier=1)
model = self.add_convolution(model=model, filter_multiplier=0.25)
model = self.add_convolution(model=model, filter_multiplier=0.25)
model = self.add_convolution(model=model, filter_size=label_size)
return tf.reshape(model, (-1, model.shape[1] * model.shape[2], label_size), name="fcn_logits")
def add_convolution(self, model, filter_multiplier=0, filter_size=0, kernel_size=[3, 3], strides=1):
model_filter_size = model.shape[3].value
new_filter_size = filter_size
if filter_multiplier != 0:
new_filter_size = int(model_filter_size * filter_multiplier)
model = tf.layers.conv2d(
inputs=model,
filters=new_filter_size,
kernel_size=kernel_size,
strides=strides,
padding="same",
activation=tf.nn.relu
)
return model
def add_max_pooling(self, model, pool_size=[2, 2], strides=2):
model = tf.layers.max_pooling2d(
inputs=model,
pool_size=pool_size,
strides=strides
)
return model
def add_deconvolution(self, model, size_multiplier):
model = tf.layers.conv2d_transpose(
model,
filters=model.shape[3],
strides=size_multiplier,
kernel_size=(size_multiplier, size_multiplier),
padding='same')
return model | klillas/TensorFlowLearning | ConvNets/SemanticSegmentation/SemanticSegmentationModelFactory.py | SemanticSegmentationModelFactory.py | py | 5,239 | python | en | code | 0 | github-code | 13 |
5001460540 | from tkinter import Tk, Button
import tkinter.messagebox as msg
if __name__ == '__main__':
tk : Tk = Tk()
tk.title("Tic-Tac-Toe")
xTurn: bool = True
moves: int = 0
def save(msg):
with open("results.txt", "a") as f:
f.write(msg+"\n")
def reset():
global xTurn, moves
xTurn = False
moves = 0
btns = [button1, button2, button3, button4, button5, button6, button7, button8, button9]
for btn in btns:
btn["text"]=" "
def checkWin(req="X"):
if moves<9:
if moves>=5:
if ((button1["text"] == req and button2["text"] == req and button3["text"] == req)
or (button1["text"] == req and button4["text"] == req and button7["text"] == req)
or (button4["text"] == req and button5["text"] == req and button6["text"] == req)
or (button7["text"] == req and button8["text"] == req and button9["text"] == req)
or (button2["text"] == req and button5["text"] == req and button8["text"] == req)
or (button3["text"] == req and button6["text"] == req and button9["text"] == req)
or (button1["text"] == req and button5["text"] == req and button9["text"] == req)
or (button3["text"] == req and button5["text"] == req and button7["text"] == req)):
msg.showinfo("Result", f"Player {req} won")
save(f"Player {req} won")
reset()
else:
checkWin("O")
else:
msg.showinfo("Result", "Draw")
save("Draw")
reset()
def btnClick(button):
global moves, xTurn
if button["text"]==" " and xTurn:
button["text"]="X"
xTurn = False
elif button["text"]==" " and not xTurn:
button["text"]="O"
xTurn = True
else:
msg.showerror("Error", "Can not click twice")
return
moves+=1
checkWin()
button1 = Button(tk, text=" ", bg="gray", fg="red", height="4", width="8", command= lambda : btnClick(button1))
button1.grid(row=1, column=1)
button2 = Button(tk, text=" ", bg="gray", fg="red", height="4", width="8", command= lambda : btnClick(button2))
button2.grid(row=2, column=1)
button3 = Button(tk, text=" ", bg="gray", fg="red", height="4", width="8", command= lambda : btnClick(button3))
button3.grid(row=3, column=1)
button4 = Button(tk, text=" ", bg="gray", fg="red", height="4", width="8", command= lambda : btnClick(button4))
button4.grid(row=1, column=2)
button5 = Button(tk, text=" ", bg="gray", fg="red", height="4", width="8", command= lambda : btnClick(button5))
button5.grid(row=2, column=2)
button6 = Button(tk, text=" ", bg="gray", fg="red", height="4", width="8", command= lambda : btnClick(button6))
button6.grid(row=3, column=2)
button7 = Button(tk, text=" ", bg="gray", fg="red", height="4", width="8", command= lambda : btnClick(button7))
button7.grid(row=1, column=3)
button8 = Button(tk, text=" ", bg="gray", fg="red", height="4", width="8", command= lambda : btnClick(button8))
button8.grid(row=2, column=3)
button9 = Button(tk, text=" ", bg="gray", fg="red", height="4", width="8", command= lambda : btnClick(button9))
button9.grid(row=3, column=3)
tk.mainloop() #run the game
print("Thank you for trying")
else:
print("Contents of this file can not be accessed by outer classes") | AbhiShake1/Tic-Tac-Toe | TicTacToe.py | TicTacToe.py | py | 3,587 | python | en | code | 0 | github-code | 13 |
22943023484 | import env_examples # Modifies path, DO NOT REMOVE
from sympy import Symbol, Pow, Add, atan, Mul
import numpy as np
from src import Circuit, CoordinateSystem, VoltageSource, Wire, World
if __name__ == "__main__":
WORLD_SHAPE = (101, 101)
BATTERY_VOLTAGE = 1.0
HIGH_WIRE_RESISTANCE = 1.0
LOW_WIRE_RESISTANCE = 0.01
cartesian_variables = Symbol("x"), Symbol("y")
x, y = cartesian_variables
x_expression_vertical = 0 * x
y_expression_vertical = y
vertical_eqs = (x_expression_vertical, y_expression_vertical)
x_expression_horizontal = x
y_expression_horizontal = 0 * y
horizontal_eqs = (x_expression_horizontal, y_expression_horizontal)
x_expression_diagonal = x
y_expression_diagonal = y
diagonal_eqs = (x_expression_diagonal, y_expression_diagonal)
old_x = 75
old_y = 42
sides = 64
wires = []
r = 30
old_theta = 0
theta_start = 0
pas = 0.1
for i in range(1, sides):
theta = old_theta + pas
if theta > theta_start+np.pi-0.1 and theta < theta_start+np.pi+0.1:
resistance = HIGH_WIRE_RESISTANCE
else:
resistance = LOW_WIRE_RESISTANCE
new_x = ((np.cos(theta) - np.cos(old_theta))*r) + old_x
new_y = ((np.sin(theta) - np.sin(old_theta))*r) + old_y
wire = Wire((old_x, old_y), (new_x, new_y), diagonal_eqs, cartesian_variables, resistance)
wires.append(wire)
old_x, old_y, old_theta = new_x, new_y, theta
wires.append(Wire((old_x, old_y), (77, 42), diagonal_eqs, cartesian_variables, LOW_WIRE_RESISTANCE))
wires.append(VoltageSource((77, 42), (75, 42), diagonal_eqs, cartesian_variables, BATTERY_VOLTAGE))
'''
wires = [
Wire((75, 42), ())
]
'''
ground_position = (77, 42)
circuit = Circuit(wires, ground_position)
world = World(circuit=circuit, coordinate_system=CoordinateSystem.CARTESIAN, shape=WORLD_SHAPE)
world.show_circuit()
world.compute()
world.show_all()
| AlexandreBeliveau/Devoir-electromag | examples/circuitC.py | circuitC.py | py | 2,013 | python | en | code | 0 | github-code | 13 |
24616628689 | from scipy.optimize import root
import numpy as np
import cmath
def convert_to_wavevector(H, x, t_inc, omega, kp, bool=True, shift=0):
A = np.zeros(len(H), dtype=complex)
print(len(A))
for t in range(0, len(H)):
A[t] = H[t] * cmath.exp(+1j * (kp * x - omega * t * t_inc+shift))
if bool == False:
A[t] = H[t] * cmath.exp(-1j * (kp * x - omega * t * t_inc+shift))
return A
def wavevector_to_envelope(A, x_grid, t_grid, omega, kp):
H = np.zeros(A.shape, dtype=complex)
for i in range(A.shape[0]):
for j in range(A.shape[1]):
H[i, j] = A[i, j] * cmath.exp(-1j * (kp * x_grid[j] - omega * t_grid[i]))
#H[i, j] = A[i, j] * cmath.exp(+1j * (kp * x_grid[j] - omega * t_grid[i]))
return H
def dispersion(k, *args):
"""
definition of the rearranged dispersion relation f(k) = omega^2 - k * g * tanh(k * d) (=!0)
:param k: wavenumber
:param args: omega = angular frequency [rad/s], d = water depth [m], g = gravitational acceleration constant [m/s^2]
:return: f(k_p)
"""
omega = args[0]['omega']
g = args[0]['g']
d = args[0]['d']
return np.square(omega) - k * g * np.tanh(k * d)
def wavenumber(omega, d, g=9.81):
"""
function for calculating the wavenumber k associated to a given angular frequency omega and water depth d using the dispersion relation for finite water depth
:param omega: angular frequency [rad/s]
:param d: water depth [m]
:param g: gravitational acceleration constant [m/s^2]
:return: wavenumber k [1/m]
"""
k_guess = np.divide(np.square(omega), g) # initial guess for k from deepwater dispersion relation omega^2 = k * g
k_return = root(dispersion, x0=k_guess, args={'omega': omega, 'd': d, 'g': g}) # find roots of rearraged dispersion relation
if k_return.success:
k = k_return.x[0]
return k
def NLSE_coefficients_marco(omega_p, d, g=9.81):
k_p = wavenumber(omega=omega_p, d=d) # calculates the wavenumber to angular frequency using the dispersion relation
nu = 1 + np.divide(2 * k_p * d, np.sinh(2 * k_p * d))
C_g = np.divide(omega_p, 2 * k_p) * nu
alpha = - np.square(nu) + 2 + 8 * np.square(k_p * d) * np.divide(np.cosh(2 * k_p * d),
np.square(np.sinh(2 * k_p * d)))
alpha_ = np.divide(omega_p * alpha, 8 * np.square(k_p) * np.power(C_g, 3))
beta = np.divide(np.cosh(4 * k_p * d) + 8 - 2 * np.square(np.tanh(k_p * d)), 8 * np.power(np.sinh(k_p * d), 4)) - np.divide(np.square(2 * np.square(np.cosh(k_p * d)) + 0.5 * nu),np.square(np.sinh(2 * k_p * d)) * (np.divide(k_p * d, np.tanh(k_p * d)) - np.divide(np.square(nu), 4)))
beta_ = np.divide(omega_p * np.square(k_p) * beta, 2 * C_g)
return k_p, C_g, alpha_, beta_
def NLSE_coefficients_chabchoub(omega_p, d, g=9.81):
"""calculates the NLSE coefficients for time-like und space-like form of NLSE O(eps^3) according to Chabchoub2016 https://www.mdpi.com/2311-5521/1/3/23
:param omega: angular frequency [rad/s]
:param d: water depth [m]
:param g: gravitational acceleration constant [m/s^2]
:return peak wavenumber k_p, group velocity C_g, coefficients for space-like form lamb und mu, coefficients for time-like-form delta und nu"""
k_p = wavenumber(omega=omega_p, d=d) # calculates the wavenumber to angular frequency using the dispersion relation
C_g = np.divide(omega_p, 2 * k_p)
lamb = - np.divide(omega_p, 8*np.square(k_p))
mu = - np.divide(omega_p*np.square(k_p), 2)
delta = -1/g
nu = -k_p**3
return k_p, C_g, lamb, mu, delta, nu
print(wavenumber(omega=3,d=1)) | svenjahlrs/Stusti_PINN | libs/wave_tools.py | wave_tools.py | py | 3,683 | python | en | code | 0 | github-code | 13 |
28441800433 | # -*- coding: utf-8 -*-
"""
@Time : 2022/7/25 15:51
@Auth : 罗忠建
"""
import os
import sys
import xlrd
from xlutils.copy import copy
import xlwt
class ExcelApp:
dataDir = ""
fileName = ""
sheetsName = ["Sheet1"]
def __init__(self, fileName, sheetsName):
self.fileName = fileName
self.sheetsName = sheetsName
baseDir = sys.path[0]
dataDir = ""
if os.path.isdir(baseDir):
dataDir = baseDir
elif os.path.isfile(baseDir):
dataDir = os.path.dirname(baseDir)
if not os.path.isfile(fileName):
reportBook = xlwt.Workbook()
for sht in self.sheetsName:
reportBook.add_sheet(sht)
# self.fileName = "".join([dataDir.replace("\\", "/"), "/", self.fileName])
print(self.fileName)
reportBook.save(self.fileName)
# 打开Excel
def openExcel(self, mode):
if mode == 'w':
return copy(xlrd.open_workbook(self.fileName))
elif mode == 'r':
return xlrd.open_workbook(self.fileName)
# 获取工作表序号
def getSheetByIndex(self,book, sheetIndex):
sheet = book.get_sheet(sheetIndex)
return sheet
# 获取工作表名称
def getSheetByName(self, book, sheetName):
sheet = book.sheet_by_name(sheetName)
return sheet
# 保存Excel
def saveExcel(self, book):
book.save(self.fileName)
# book.close()
# 获取所有工作表名称
def getSheetsName(self):
sheetlist = []
book = xlrd.open_workbook(self.fileName)
for sheet in book.sheets():
sheetlist.append(sheet.name)
return sheetlist
# 设置单元格数据
def setCellData(self, sheetObj, row, col, cellValue, highlight = False, backcolor= "white", fontcolor = "black", bold = "off", border = 0, font_name = 'HP Simplified'):
borders = xlwt.Borders()
borders.top = border
borders.bottom = border
borders.left = border
borders.right = border
if highlight == False:
sheetObj.write(row, col, cellValue)
else:
style = xlwt.easyxf('pattern: pattern solid, fore-color ' + backcolor+'; font:color-index '+ fontcolor + ',bold ' + bold+',name ' + font_name)
style.borders = borders
sheetObj.write(row, col, cellValue, style)
def setColWidth(self,sheetObj, colNo, widthSize):
sheetObj.col(colNo).width = widthSize
# 获取行数据
# Mode: read
def getRowData(self, sheetObj, row):
return sheetObj.row_values(row)
# 获取列数据
# Mode: read
def getColData(self, sheetObj, col):
return sheetObj.col_values(col)
# 获取单元格数据
def getCellData(self, sheetObj, row, col):
cellData = ""
try:
#data = xlrd.open_workbook(self.fileName)
cellData = sheetObj.cell(row, col).value
finally:
return cellData
# 获取行数
def getRowCount(self,sheetName, col):
return xlrd.open_workbook(self.fileName).sheet_by_name(sheetName).nrows
# 获取列数
def getColCount(self, sheetName, row):
return xlrd.open_workbook(self.fileName).sheet_by_name(sheetName).ncols
# 获取合并单元格的列数
def mergeCells(self, sheetObj, iTopRowNo, iBottomRowNo, iLeftColNo, iRightColNo):
sheetObj.merge(iTopRowNo, iBottomRowNo, iLeftColNo, iRightColNo)
if __name__=="__main__":
excel = ExcelApp(r"C:\Users\admin\Desktop\122.xls", ["Sheet1"])
book = excel.openExcel("w")
sheet = excel.getSheetByIndex(book, 0)
excel.setCellData(sheet, 0, 0, "Test",True, "blue")
excel.mergeCells(sheet, 0, 0, 0, 2)
excel.saveExcel(book)
# book = excel2.openExcel('r')
# print excel2.getCellData(excel2.getSheetByName(book,"Sheet 1"), 0, 0)
# excel = ExcelApp("C:\Users\admin\Desktop\122.xls",["Compare Result"])
# aBook = excel.openExcel('w')
# aSheet = excel.getSheetByIndex(aBook, 0)
# print "***"+excel.getCellData(aSheet, 0, 0)
# excel.setCellData(aSheet,2, 4, "sssss")
# excel.saveExcel(aBook)
# excel.setCellData(0, 1, 1, "test", True)
| luozhongjian/UIAutoTest | SaaS_Auto_Test/com/HT/SaaS/SaaSmgmt/library/excelstand.py | excelstand.py | py | 4,328 | python | en | code | 0 | github-code | 13 |
72346172178 | from rhino_io import RhinoIO
from mesh import FEMMesh
import os
def main():
mesh = FEMMesh.polygon(1, 5)
mesh.subdivide_faces(2)
mesh.shrink_buffers()
mesh = RhinoIO.convert_to_rhino(mesh)
print(mesh.Encode())
# output_path = os.path.realpath(".\\tests\\test_output\\debug_output.3dm")
# RhinoIO.write_to_file(mesh, filename=output_path, debug=True)
if __name__ == "__main__":
main()
| DerLando/FEMMeshPy | src/debug.py | debug.py | py | 424 | python | en | code | 0 | github-code | 13 |
35028265540 | # 최대공약수와 최소공배수
n1, n2 = list(map(int,input().split())) # 두 수를 받음
GCM = 0 # 최대공약수
if n1>= n2: # 1부터 작은수까지
for i in range(1,n2+1):
if n1%i == 0 and n2%i == 0: # 모두 나눠떨어지는 최대수를 대입
GCM = i
else:
for i in range(1,n1+1):
if n1%i == 0 and n2%i == 0:
GCM = i
print(GCM)
print((n1//GCM)*(n2//GCM)*GCM) # 각 수를 최대공약수로 나누고 최대공약수*나머지1*나머지2하면 최소공배수가 나옴 | Jehyung-dev/Algorithm | 백준/Bronze/2609. 최대공약수와 최소공배수/최대공약수와 최소공배수.py | 최대공약수와 최소공배수.py | py | 552 | python | ko | code | 0 | github-code | 13 |
13158627492 | '''
Created on 11/7/2015
@author: ksi
'''
import threading
import sys
from socket import *
from PyQt4.QtGui import *
from PyQt4 import QtCore
from PyQt4.QtCore import *
from Tkinter import Widget
from PyQt4.QtCore import QObject, pyqtSignal, pyqtSlot
import quopri
class QWid(QWidget):
def __init__(self):
super(QWid, self).__init__()
self.setWindowTitle("Clientes")
self.direccion=QLineEdit()
#QTextEdit()
self.boton= QPushButton ("mandar mesaje", self)
self.re=QTextEdit()
self.mensa=QTextEdit()
self.boton_conectar=QPushButton("conectarse",self)
self.boton_cerrar= QPushButton ("cerrar", self)
self.reci=None
grilla=QGridLayout(self)
grilla.addWidget(self.mensa,0,0)
grilla.addWidget(self.re,0,1)
grilla.addWidget(self.boton,0,2)
grilla.addWidget(self.direccion)
grilla.addWidget(self.boton_conectar)
grilla.addWidget(self.boton_cerrar)
#layout_horizontal=QHBoxLayout(self)
#layout_horizontal.addWidget(self.mensa)
#layout_horizontal.addWidget(self.re)
#layout_horizontal.addWidget(self.boton)
#layout_vertical=QVBoxLayout(self)
#layout_vertical.addWidget(self.direccion)
#layout_vertical.addWidget(self.boton_conectar)
#layout_horizontal.addWidget(self.direccion)
#layout_vertical.addWidget(self.boton_conectar)
self.resize(800,600)
#self.direccion.setGeometry(0,0,50,50)
#self.boton_conectar.move(500,0)
#f=threading.Thread(target=self.conectar_a,())
#f.daemon=True
#z=threading.Thread(target=self.miboton)
#z.daemon=True
self.boton.clicked.connect(self.miboton)
self.boton_cerrar.clicked.connect(self.close)
self.boton_conectar.clicked.connect(self.conectar_a)
#f=threading.Thread(target=self.run)
#f.start()
def conectar_a(self):
self.addrl=self.direccion.text()
#self.addrl=str(self.direccion.toPlainText())
#self.addrl=self.addrl.rstrip("\n")
self.s=socket(AF_INET,SOCK_STREAM)
self.s.connect((self.addrl, 1685))
self.boton_conectar.setEnabled(False)
self.c=comunicate()
self.c.comuni.connect(self.set_message)
self.c.comu.connect(self.get_message)
self.cliente=servidor_1(self.s,self.c)
self.cliente.start()
def miboton(self):
self.cliente.send()
self.boton.setEnabled(False)
self.cliente.recv()
self.boton.setEnabled(True)
def get_message(self,so):
data=str(self.mensa.toPlainText())
so.socketclient.send(data)
self.mensa.setPlainText("")
def set_message(self,data):
self.re.setPlainText(str(data))
class servidor_1(threading.Thread):
def __init__(self,s,c):
threading.Thread.__init__(self)
self.socketclient = s
self.c=c
#self.socketclient.settimeout(1)
def send(self):
self.c.comu.emit(self,("comu(PyQt_PyObject)"))
def recv(self):
self.data=""
#while self.data is None:
self.data=str(self.socketclient.recv(1024))
self.c.comuni.emit(self.data,("comuni(PyQt_PyObject)"))
class comunicate(QtCore.QObject):
comuni=QtCore.pyqtSignal(object,type(""))
comu=QtCore.pyqtSignal(object,object)
def __init__(self, parent = None):
super(comunicate, self).__init__(parent)
def run(self):
pass
if __name__=='__main__':
app= QApplication(sys.argv)
w= QWid()
w.show()
sys.exit(app.exec_()) | robertofocke/chatpyqt1 | Clientes.py | Clientes.py | py | 3,786 | python | en | code | 0 | github-code | 13 |
5441499002 | """
Simple CNN model for the CIFAR-10 Dataset
@author: Adam Santos
"""
import numpy
from keras.constraints import maxnorm
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout, Convolution2D
import tensorflow as tf
from keras.utils import np_utils
from tensorflow.keras.datasets import cifar10
# physical_devices = tf.config.list_physical_devices('GPU')
# try:
# tf.config.experimental.set_memory_growth(physical_devices[0], True)
# except:
# # Invalid device or cannot modify virtual devices once initialized.
# pass
from tensorflow.python.keras.callbacks import ModelCheckpoint
from tensorflow.python.keras.models import load_model
def train(save_best=True):
import tensorflow as tf
physical_devices = tf.config.list_physical_devices('GPU')
try:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
print("Training MNIST CNN classifier...")
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# reshape to be [samples][width][height][channels]
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32')
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32')
# normalize inputs from 0-255 to 0-1
train_images = X_train / 255
test_images = X_test / 255
# one hot encode outputs
train_labels = np_utils.to_categorical(y_train)
test_labels = np_utils.to_categorical(y_test)
# Create the model
model = Sequential()
model.add(Convolution2D(32, kernel_size=3, padding='valid', input_shape=(28, 28, 1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, kernel_size=5, padding='valid', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
callbacks_list = []
if save_best:
filepath = "best_mnist_cnn_weights.hdf5"
# filepath = "weights-improvement-{epoch:02d}-{val_accuracy:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list.append(checkpoint)
history = model.fit(train_images, train_labels, batch_size=64, epochs=500,
validation_data=(test_images, test_labels), callbacks=callbacks_list)
return [model, history]
def load_weights():
# load YAML and create model
# yaml_file = open('model.yaml', 'r')
# loaded_model_yaml = yaml_file.read()
# yaml_file.close()
# loaded_model = model_from_yaml(loaded_model_yaml)
# load weights into new model
loaded_model = load_model("best_mnist_cnn_weights.hdf5")
print("Loaded model from disk")
loaded_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return loaded_model
def eval(model):
# load data
(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
score = model.evaluate(test_images, test_labels, verbose=1)
print("%s: %.2f%%" % (model.metrics_names[1], score[1] * 100))
| Addrick/DL4ARP | Models/mnist_modelfn.py | mnist_modelfn.py | py | 3,836 | python | en | code | 1 | github-code | 13 |
12402818003 | from flask import Flask, request, Response, abort
import os
import requests
import logging
import json
import dotdictify
from time import sleep
import base64
import cherrypy
app = Flask(__name__)
logger = None
format_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logger = logging.getLogger('cvpartner-rest-service')
# Log to stdout
stdout_handler = logging.StreamHandler()
stdout_handler.setFormatter(logging.Formatter(format_string))
logger.addHandler(stdout_handler)
logger.setLevel(logging.getLevelName(os.environ.get('log_level', 'INFO')))
headers = {}
if os.environ.get('headers') is not None:
headers = json.loads(os.environ.get('headers').replace("'", "\""))
def encode(v):
for key, value in v.items():
if isinstance(value,dict):
encode(value)
else:
v[key] = base64.b64encode(requests.get(value).content).decode("utf-8")
return v
def str_to_bool(string_input):
return str(string_input).lower() == "true"
def transform(obj):
res = {}
for k, v in obj.items():
if k == "image":
if dotdictify.dotdictify(v).large.url is not None:
res[k] = encode(v)
else:
pass
try:
_ = json.dumps(v)
except Exception:
pass
else:
res[k] = v
return res
class DataAccess:
def __get_all_users(self, path):
logger.info("Fetching data from url: %s", path)
offset = 0
clean = "start"
while clean == "start" or len(clean) == 100:
url = os.environ.get("base_url") + path + "?offset=" + str(offset)
logger.debug("url :" + url)
req = requests.get(url, headers=headers)
if req.status_code != 200:
req = self.check_error(req, url, headers, 'get')
clean = json.loads(req.text)
offset += len(clean)
for entity in clean:
yield entity
def __post_user(self, url, entity):
logger.debug("url: " + url)
logger.debug('entity["payload"]:')
logger.debug(entity["payload"])
req = requests.post(url, headers=headers, json=entity["payload"])
if req.status_code != 200:
req = self.check_error(req, url, headers, 'post', 'json', entity["payload"])
return str(req.status_code)
def __put_user(self, url, entity):
url = url + "/" + entity["id"]
logger.debug("url: " + url)
logger.debug('entity["payload"]:')
logger.debug(entity["payload"])
req = requests.put(url, headers=headers, json=entity["payload"])
if req.status_code != 200:
req = self.check_error(req, url, headers, 'put', 'json', entity["payload"])
return str(req.status_code)
def __get_all_cvs(self, path):
logger.info("Fetching data from url: %s", path)
offset = 0
clean = "start"
while clean == "start" or len(clean) == 100:
url = os.environ.get("base_url") + path + "?offset=" + str(offset)
logger.debug("url :" + url)
req = requests.get(url, headers=headers)
if req.status_code != 200:
req = self.check_error(req, url, headers, 'get')
clean = json.loads(req.text)
offset += len(clean)
cv_url = os.environ.get("base_url") + "v3/cvs/"
for entity in clean:
if entity.get('deactivated'):
pass
for k, v in entity.items():
if k == "id":
cv_url += v + "/"
for k, v in entity.items():
if k == "default_cv_id":
cv_url += v
req = requests.get(cv_url, headers=headers)
if req.status_code != 200:
req = self.check_error(req, cv_url, headers, 'get')
cv = json.loads(req.text)
if str_to_bool(os.environ.get('delete_company_images', "False")) == True:
for i in range(len(cv["project_experiences"])):
del cv["project_experiences"][i]["images"]
yield transform(cv)
cv_url = os.environ.get("base_url") + "v3/cvs/"
def __get_all_paged_entities(self, path):
logger.info("Fetching data from paged url: %s", path)
url = os.environ.get("base_url") + path
next_page = url
page_counter = 1
while next_page is not None:
logger.info("Fetching data from url: %s", next_page)
req = requests.get(next_page, headers=headers)
if req.status_code != 200:
req = self.check_error(req, next_page, headers, 'get')
dict = dotdictify.dotdictify(json.loads(req.text))
for entity in dict.get(os.environ.get("entities_path")):
yield transform(entity)
if dict.get(os.environ.get('next_page')) is not None:
page_counter += 1
next_page = dict.get(os.environ.get('next_page'))
else:
next_page = None
logger.info('Returning entities from %i pages', page_counter)
def __get_all_references(self, path):
logger.info('Fetching data from paged url: %s', path)
url = os.environ.get("base_url") + path
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": os.environ.get('token')
}
reference_data = json.loads(os.environ.get('reference_post').replace("'","\""))
total_amount = json.loads(requests.post(url, data=json.dumps(reference_data), headers=headers).text)["total"]
counter = 0
size = 10
while counter < total_amount:
req = requests.post(url, data=json.dumps(reference_data), headers=headers)
if req.status_code != 200:
req = self.check_error(req, url, headers, 'post', 'data', json.dumps(reference_data))
res = dotdictify.dotdictify(json.loads(req.text))
counter += size
reference_data["offset"] = counter
entities = res.get(os.environ.get("references_path"))
for entity in entities:
yield(entity.get("reference"))
logger.info("returned from all pages")
def __get_all_categories(self, path):
logger.info("Fetching data from url: %s", path)
url = os.environ.get("base_url") + path
req = requests.get(url, headers=headers)
if req.status_code != 200:
req = self.check_error(req, url, headers, 'get')
clean = json.loads(req.text)
for entity in clean:
yield entity
def __post_custom_tag(self, url, entity):
logger.debug("url: " + url)
logger.debug('entity["payload"]:')
logger.debug(entity["payload"])
req = requests.post(url, headers=headers, json=entity["payload"])
if req.status_code != 200:
req = self.check_error(req, url, headers, 'post', 'json', entity["payload"])
return str(req.status_code)
def __put_custom_tag(self, url, entity):
url = url + "/" + entity["id"]
logger.debug("url: " + url)
logger.debug('entity["payload"]:')
logger.debug(entity["payload"])
req = requests.put(url, headers=headers, json=entity["payload"])
if req.status_code != 200:
req = self.check_error(req, cv_url, headers, 'put', 'json', entity["payload"])
return str(req.status_code)
def check_error(self, req, url, headers, method, json = None, data = None):
if req.status_code == 429:
return self.recursive_request(url, headers, req.headers.get('Retry-After'), method, json, data)
else:
logger.error("Unexpected response status code: %d with response text %s" % (req.status_code, req.text))
raise AssertionError("Unexpected response status code: %d with response text %s" % (req.status_code, req.text))
def recursive_request(self, url, headers, retry_after, method, json = None, data = None):
logger.info("Sleeping for %.2f seconds" % float(retry_after))
sleep(float(retry_after))
if method in('get', 'post', 'put'):
req = requests.request(method=method, url=url, headers=headers, json=json, data=data)
else:
logger.error("Unexpected request method: request method = %s" % method)
raise AssertionError("Unexpected request method: request method = %s" % method)
if req.status_code != 200:
if req.status_code == 429:
req = self.recursive_request(url, headers, req.headers.get('Retry-After'), method, data_type, data)
else:
logger.error("Unexpected response status code: %d with response text %s" % (req.status_code, req.text))
raise AssertionError("Unexpected response status code: %d with response text %s" % (req.status_code, req.text))
return req
def get_paged_entities(self, path):
logger.info("getting all paged")
return self.__get_all_paged_entities(path)
def get_users(self, path):
logger.info('getting all users')
return self.__get_all_users(path)
def post_or_put_users(self, path, entities):
logger.info('posting/putting users')
url = os.environ.get("base_url") + path
status = ""
for entity in entities:
if entity["operation"] == "post":
status = self.__post_user(url, entity)
elif entity["operation"] == "put":
status = self.__put_user(url, entity)
return status
def get_cvs(self, path):
logger.info('getting all cvs')
return self.__get_all_cvs(path)
def get_references(self, path):
logger.info('getting all references')
return self.__get_all_references(path)
def get_custom_tag_categories(self, path):
logger.info('getting all categories')
return self.__get_all_categories(path)
def post_or_put_custom_tags(self, path, entities):
logger.info('posting/putting custom tags')
url = os.environ.get("base_url") + path
status = ""
for entity in entities:
if entity["operation"] == "post":
status = self.__post_custom_tag(url, entity)
elif entity["operation"] == "put":
status = self.__put_custom_tag(url, entity)
return status
data_access_layer = DataAccess()
def stream_json(clean):
first = True
yield '['
for i, row in enumerate(clean):
if not first:
yield ','
else:
first = False
yield json.dumps(row)
yield ']'
@app.route("/<path:path>", methods=["GET", "POST"])
def get(path):
entities = data_access_layer.get_paged_entities(path)
return Response(
stream_json(entities),
mimetype='application/json'
)
@app.route("/references", methods=["GET"])
def get_references():
path = os.environ.get("reference_url")
entities = data_access_layer.get_references(path)
return Response(
stream_json(entities),
mimetype='application/json'
)
@app.route("/user", methods=["GET"])
def get_user():
path = os.environ.get("user_url")
entities = data_access_layer.get_users(path)
return Response(
stream_json(entities),
mimetype='application/json'
)
@app.route("/user", methods=["POST"])
def post_or_put_user():
path = os.environ.get("user_url")
entities = json.load(request.stream)
status_code = data_access_layer.post_or_put_users(path, entities)
return Response(status_code)
@app.route("/cv", methods=["GET"])
def get_cv():
path = os.environ.get("user_url")
entities = data_access_layer.get_cvs(path)
return Response(
stream_json(entities),
mimetype='application/json'
)
@app.route("/custom_tag_category", methods=["GET"])
def get_custom_tag_category():
path = os.environ.get("custom_tag_category_url")
entities = data_access_layer.get_custom_tag_categories(path)
return Response(
stream_json(entities),
mimetype='application/json'
)
@app.route("/custom_tag", methods=["POST"])
def post_or_put_custom_tag():
path = os.environ.get("custom_tag_url")
entities = json.load(request.stream)
status_code = data_access_layer.post_or_put_custom_tags(path, entities)
return Response(status_code)
if __name__ == '__main__':
cherrypy.tree.graft(app, '/')
# Set the configuration of the web server to production mode
cherrypy.config.update({
'environment': 'production',
'engine.autoreload_on': False,
'log.screen': True,
'server.socket_port': 5000,
'server.socket_host': '0.0.0.0'
})
# Start the CherryPy WSGI web server
cherrypy.engine.start()
cherrypy.engine.block()
| sesam-community/cvpartner-rest | service/cvpartner.py | cvpartner.py | py | 13,114 | python | en | code | 0 | github-code | 13 |
1969241541 | import logging
import os
import sys
from models import SimpleCNN, calc_accuracy
import torch
from torch.utils.data import DataLoader
from torch import optim
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from transformers import (
HfArgumentParser,
TrainingArguments,
set_seed,
)
from tqdm import tqdm
from utils.DataTrainingArguments import DataTrainingArguments
from utils.ModelArguments import ModelArguments
import wandb
logger = logging.getLogger(__name__)
def full_train():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
data_args.is_use_wandb = True
if data_args.is_use_wandb:
wandb.init(project="prjctr-ML-in-Prod", entity="vadyusikh")
train(model_args, data_args, training_args)
def train(model_args, data_args, training_args):
# Set seed before initializing model.
set_seed(training_args.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if data_args.dataset_name == 'fashion_mnist':
# LOAD DATA
train_dataset = datasets.FashionMNIST(root="dataset/", train=True, transform=transforms.ToTensor(), download=True)
train_dataloader = DataLoader(dataset=train_dataset, batch_size=training_args.train_batch_size, shuffle=True)
test_dataset = datasets.FashionMNIST(root="dataset/", train=False, transform=transforms.ToTensor(), download=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=training_args.train_batch_size, shuffle=True)
else:
raise ValueError(f"Proper dataset not provided! Provided '{data_args.dataset_name}'")
# INIT NETWORK
model = None
if model_args.model_name_or_path.lower() == 'SimpleCNN'.lower():
model = SimpleCNN(
conv1channels_num=model_args.conv1channels_num,
conv2channels_num=model_args.conv2channels_num,
final_activation=model_args.final_activation
).to(device)
else:
raise ValueError(f"Proper model not provided! Provided '{model_args.model_name_or_path}'")
if training_args.do_train:
logger.info("*** Training ***")
if data_args.is_use_wandb:
wandb.config = {
"learning_rate": training_args.learning_rate,
"epochs": training_args.num_train_epochs,
"batch_size": training_args.train_batch_size,
"seed": training_args.seed,
'conv1channels_num': model_args.conv1channels_num,
'conv2channels_num': model_args.conv2channels_num,
'final_activation': model_args.final_activation
}
train_loop(train_dataloader, model, training_args, device, is_use_wandb=data_args.is_use_wandb)
if training_args.do_eval:
logger.info("*** Evaluate ***")
test_acc = calc_accuracy(test_loader, model, device)
logger.info(f"Test acc is {test_acc:.4f}")
if data_args.is_use_wandb:
wandb.log({"val_acc": test_acc})
def train_loop(train_dataloader, model, training_args, device, is_use_wandb, optimizer=None, loss_fn=None):
# LOSS AND OPTIMIZER
if loss_fn is None:
loss_fn = nn.CrossEntropyLoss()
if optimizer is None:
optimizer = optim.Adam(model.parameters(), lr=training_args.learning_rate)
# TRAIN
update_rate = max(1, len(train_dataloader)//20)
epoch_tqdm = tqdm(range(int(training_args.num_train_epochs)), desc="Epochs", ascii=True)
for epoch in epoch_tqdm:
if epoch > 0:
checkpoint = {"state_dict": model.state_dict(), "optimizer": optimizer.state_dict()}
train_tqdm = tqdm(enumerate(train_dataloader), desc="Training batches", ascii=True, total=len(train_dataloader),
leave=True, miniters=len(train_dataloader)//10)
loss_vals = list()
for step, (data, target) in train_tqdm:
data = data.to(device)
target = target.to(device)
#forward
pred = model(data)
loss = loss_fn(pred, target)
#backward
optimizer.zero_grad()
loss.backward()
#optimizer step
optimizer.step()
loss_vals += [loss.item()]
if step % update_rate == 0:
mean_loss = torch.tensor(loss_vals).mean()
loss_vals.clear()
train_tqdm.set_postfix_str(f"Train mean loss is {mean_loss:.4f} (step no. {step})")
if is_use_wandb:
wandb.log({"train_loss": mean_loss}, step=step + epoch*len(train_dataloader))
train_acc = calc_accuracy(train_dataloader, model, device)
epoch_tqdm.set_postfix_str(f"Train acc is {train_acc:.4f}")
if is_use_wandb:
wandb.log({"train_accuracy": train_acc, 'epoch': epoch})
return loss.item()
if __name__ == "__main__":
full_train()
| VadyusikhLTD/prjctr-ML-in-Prod | week3/image_classification/image_classification/fashion_mnist.py | fashion_mnist.py | py | 5,878 | python | en | code | 0 | github-code | 13 |
28379594179 | from distutils.core import setup
import os
from os.path import join
from distutils.extension import Extension
from Cython.Build import cythonize
import numpy
from numpy.distutils.system_info import get_info
from numpy.distutils.misc_util import Configuration
def get_blas_info():
def atlas_not_found(blas_info_):
def_macros = blas_info.get('define_macros', [])
for x in def_macros:
if x[0] == "NO_ATLAS_INFO":
# if x[1] != 1 we should have lapack
# how do we do that now?
return True
if x[0] == "ATLAS_INFO":
if "None" in x[1]:
# this one turned up on FreeBSD
return True
return False
blas_info = get_info('blas_opt', 0)
if (not blas_info) or atlas_not_found(blas_info):
cblas_libs = ['cblas']
blas_info.pop('libraries', None)
else:
cblas_libs = blas_info.pop('libraries', [])
return cblas_libs, blas_info
cblas_libs, blas_info = get_blas_info()
blas_include_dirs = blas_info['include_dirs']
del blas_info['include_dirs']
extensions = [
Extension("ClusterCNN.custom_kmeans._k_means_elkan",
sources=["_k_means_elkan.pyx"],
include_dirs = [numpy.get_include()]
),
Extension("ClusterCNN.custom_kmeans._k_means",
libraries=cblas_libs,
sources=["_k_means.pyx"],
include_dirs = [
'/home/andy/Documents/ClusterCNN/src/cblas',
numpy.get_include(),
*blas_include_dirs],
extra_compile_args=blas_info.pop( 'extra_compile_args', []),
**blas_info
),
]
setup(
name = "Cython KMeans Build",
ext_modules = cythonize(extensions),
)
| ASzot/ClusterCNN | custom_kmeans/setup.py | setup.py | py | 1,740 | python | en | code | 10 | github-code | 13 |
73493586257 | # Sorteio de uma ordem
from random import shuffle
nome1 = str(input('Primeiro grupo: '))
nome2 = str(input('Segundo grupo: '))
nome3 = str(input('Terceiro grupo: '))
nome4 = str(input('Quarto grupo: '))
lista = [nome1, nome2, nome3, nome4]
shuffle(lista)
print('A ordem de apresentação das bandas é')
print(lista)
| damiati-a/CURSO-DE-PYTHON | Mundo 1/ex020.py | ex020.py | py | 333 | python | pt | code | 0 | github-code | 13 |
17592801884 | # longest_common_subsequence is function to find lcs between 2 arrays or strings
def longest_common_subsequence(self,s1,s2):
if len(s1)==0 or len(s2)==0:
return 0
x=len(s1)
y=len(s2)
mm=[[0 for k in range(y+1)] for l in range(x+1)]
for i in range(1,len(mm)):
for j in range(1,len(mm[0])):
if s1[i-1]==s2[j-1]:
mm[i][j]=1+mm[i-1][j-1]
else:
mm[i][j]=max(mm[i-1][j],mm[i][j-1])
return int(mm[i][j]) | Mukesh-kanna/python-content-repo | longest_common_subsequence.py | longest_common_subsequence.py | py | 541 | python | en | code | 0 | github-code | 13 |
70707345618 | import scrapy
import re
from itlaoqi.service.CatalogueService import CatalogueService
class ChapterSpider(scrapy.Spider):
name = "chapter"
custom_settings = {'ITEM_PIPELINES': {
'itlaoqi.pipeline.RabbitPipeline.RabbitPipeline': 300,
}}
def start_requests(self):
result = CatalogueService().get_all()
for catalogue in result:
print(catalogue)
yield scrapy.Request(url=catalogue['url'], callback=self.parse, meta=catalogue)
def parse(self, response):
order = 0
for t in response.css('.col-8 a.list-group-item'):
url = t.xpath('@href').get().strip()
name = t.xpath('text()')[1].get().strip()
ttime = t.css('span:last-child').xpath('text()').get()
num = re.findall(r'\d+', ttime)
if len(num) > 0:
time = num[0].strip()
else:
time = 0
cid = response.meta['id']
order = order + 1
yield {"exchange": "EXCAHNGE_CHAPTER",
"name": name,
"url": 'https://www.itlaoqi.com' + url,
"sort": str(order),
"time": str(time),
"cid": str(cid)
}
| himcs/itlaoqi-spider | itlaoqi/spiders/ChapterSpider.py | ChapterSpider.py | py | 1,267 | python | en | code | 0 | github-code | 13 |
19472573972 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 20:24:49 2020
@author: leokt
"""
#import modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#import data
fileloc = "J:\Depot - dDAVP-time course - Kirby\Analysis\\201110_Total_Protein.xlsx"
df_cntl = pd.read_excel(fileloc, sheet_name="median norm", header=2)
#variables
df_cntl['T1_log_1'] = np.log2(df_cntl['V1_Replicate1']/df_cntl['C1_Replicate1'])
df_cntl['T1_log_2'] = np.log2(df_cntl['V1_Replicate2']/df_cntl['C1_Replicate2'])
df_cntl['T1_log_3'] = np.log2(df_cntl['V1_Replicate3']/df_cntl['C1_Replicate3'])
df_cntl['T2_log_1'] = np.log2(df_cntl['V2_Replicate1']/df_cntl['C2_Replicate1'])
df_cntl['T2_log_2'] = np.log2(df_cntl['V2_Replicate2']/df_cntl['C2_Replicate2'])
df_cntl['T2_log_3'] = np.log2(df_cntl['V2_Replicate3']/df_cntl['C2_Replicate3'])
df_cntl['T3_log_1'] = np.log2(df_cntl['V3_Replicate1']/df_cntl['C3_Replicate1'])
df_cntl['T3_log_2'] = np.log2(df_cntl['V3_Replicate2']/df_cntl['C3_Replicate2'])
df_cntl['T3_log_3'] = np.log2(df_cntl['V3_Replicate3']/df_cntl['C3_Replicate3'])
df_cntl['T4_log_1'] = np.log2(df_cntl['V4_Replicate1']/df_cntl['C4_Replicate1'])
df_cntl['T4_log_2'] = np.log2(df_cntl['V4_Replicate2']/df_cntl['C4_Replicate2'])
df_cntl['T4_log_3'] = np.log2(df_cntl['V4_Replicate3']/df_cntl['C4_Replicate3'])
df_cntl['T1_log'] = df_cntl[['T1_log_1','T1_log_2','T1_log_3']].mean(axis=1)
df_cntl['T2_log'] = df_cntl[['T2_log_1','T2_log_2','T2_log_3']].mean(axis=1)
df_cntl['T3_log'] = df_cntl[['T3_log_1','T3_log_2','T3_log_3']].mean(axis=1)
df_cntl['T4_log'] = df_cntl[['T4_log_1','T4_log_2','T4_log_3']].mean(axis=1)
#abundance of unqiue peptides non-phos data
fig, (ax1, ax2, ax3, ax4) = plt.subplots(1,4,figsize=(20,5))
plt.subplots_adjust(wspace=0, hspace=0)
dist_T1 = sns.distplot(df_cntl['T1_log'], axlabel = "$log_2$(dDAVP/control)", bins = 180, kde = False, color = "black", ax = ax1)
ax1.set_ylabel("Number of Unique Proteins")
ax1.set_title("dDAVP vs. Vehicle 1 min")
dist_T2 = sns.distplot(df_cntl['T2_log'], axlabel = "$log_2$(dDAVP/control)", bins = 180, kde = False, color = "blue", ax = ax2)
ax2.set_yticklabels([])
ax2.set_title("dDAVP vs. Vehicle 2 min")
dist_T3 = sns.distplot(df_cntl['T3_log'], axlabel = "$log_2$(dDAVP/control)", bins = 180, kde = False, color = "purple", ax = ax3)
ax3.set_yticklabels([])
ax3.set_title("dDAVP vs. Vehicle 5 min")
dist_T4 = sns.distplot(df_cntl['T4_log'], axlabel = "$log_2$(dDAVP/control)", bins = 180, kde = False, color = "red", ax = ax4)
ax4.set_yticklabels([])
ax4.set_title("dDAVP vs. Vehicle 15 min")
plt.setp((ax1, ax2, ax3, ax4), ylim = (0,2500))
plt.savefig("abundance_distribution", dpi = 300)
plt.close()
#total heatmap time course
df_hm = pd.DataFrame({'T1':df_cntl['T1_log'], 'T2':df_cntl['T2_log'],
'T3':df_cntl['T3_log'],'T4':df_cntl['T4_log']})
df_hm = pd.DataFrame(df_hm.values,
columns = ['1','2','5','15'],
index = df_cntl['Gene Symbol'])
fig, ax = plt.subplots(figsize=(11, 9))
hm = sns.heatmap(df_hm, cmap="Blues",
cbar_kws={'label': '$log_2$(dDAVP/control)'})
ax.set_yticklabels([])
plt.xlabel('Time (min)')
plt.ylabel('')
plt.title('Time Course Heatmap Total Abundance', loc='left')
plt.savefig("abundance_heatmap", dpi = 1200)
plt.close()
#volcano plot of peptide abundance FDR vs. ratio(dDAVP/vehicle)
#15 min
df_T4 = pd.read_excel(fileloc, sheet_name="15 min")
fig, ax = plt.subplots(figsize=(12,10))
plt_T4 = sns.scatterplot(data = df_T4, x="OVERALL RATIO", y="minus log p",
color = 'lightgrey')
outr = df_T4.query('`minus log p` > 1.303 & `OVERALL RATIO` > 0.054397916*2')
outl = df_T4.query('`minus log p` > 1.303 & `OVERALL RATIO` < -0.054397916*2')
outr.index = range(len(outr.index))
outl.index = range(len(outl.index))
plt.scatter(outr['OVERALL RATIO'], outr['minus log p'], color = "red")
plt.scatter(outl['OVERALL RATIO'], outl['minus log p'], color = "red")
for i in range(len(outr)):
ax.annotate(outr['Gene Symbol'][i],
xy = (outr['OVERALL RATIO'][i], outr['minus log p'][i]),
fontsize = 10,
textcoords="offset points")
for i in range(len(outl)):
ax.annotate(outl['Gene Symbol'][i],
xy = (outl['OVERALL RATIO'][i], outl['minus log p'][i]),
fontsize = 10,
textcoords="offset points")
sns.despine()
plt.ylabel('-$log_{10}$(p-value)', fontsize = 18)
plt.xlabel('$log_2$(dDAVP/control)', fontsize = 18)
plt.title("Volcano Plot (15 min) n = 5444", fontsize = 24)
plt.ylim(0,4.5)
plt.xlim(-1.3,1.3)
ax.axhline(1.303, color = 'blue', ls='--')
plt.text(-1.2,1.35,'1.303')
ax.axvline(0.054397916*2, color = 'blue', ls='--')
plt.text(0.12,4.1,'0.109', rotation=90)
ax.axvline(-0.054397916*2, color = 'blue', ls='--')
plt.text(-0.10,4.1,'-0.109', rotation=90)
plt.savefig("Volcano total 15", dpi = 1200)
#5 min
df_T3 = pd.read_excel(fileloc, sheet_name="5 min")
fig, ax = plt.subplots(figsize=(12,10))
plt_T3 = sns.scatterplot(data = df_T3, x="OVERALL RATIO", y="minus log p",
color = 'lightgrey')
outr = df_T3.query('`minus log p` > 1.303 & `OVERALL RATIO` > 0.050789416*2')
outl = df_T3.query('`minus log p` > 1.303 & `OVERALL RATIO` < -0.050789416*2')
outr.index = range(len(outr.index))
outl.index = range(len(outl.index))
plt.scatter(outr['OVERALL RATIO'], outr['minus log p'], color = "red")
plt.scatter(outl['OVERALL RATIO'], outl['minus log p'], color = "red")
for i in range(len(outr)):
ax.annotate(outr['Gene Symbol'][i],
xy = (outr['OVERALL RATIO'][i], outr['minus log p'][i]),
fontsize = 10,
textcoords="offset points")
for i in range(len(outl)):
ax.annotate(outl['Gene Symbol'][i],
xy = (outl['OVERALL RATIO'][i], outl['minus log p'][i]),
fontsize = 10,
textcoords="offset points")
sns.despine()
plt.ylabel('-$log_{10}$(p-value)', fontsize = 18)
plt.xlabel('$log_2$(dDAVP/control)', fontsize = 18)
plt.title("Volcano Plot (5 min) n = 5444", fontsize = 24)
plt.ylim(0,4.5)
plt.xlim(-1.3,1.3)
ax.axhline(1.303, color = 'blue', ls='--')
plt.text(-1.2,1.35,'1.303')
ax.axvline(0.050789416*2, color = 'blue', ls='--')
plt.text(0.12,4.1,'0.102', rotation=90)
ax.axvline(-0.050789416*2, color = 'blue', ls='--')
plt.text(-0.10,4.1,'-0.102', rotation=90)
plt.savefig("Volcano total 5", dpi = 1200)
#2 min
df_T2 = pd.read_excel(fileloc, sheet_name="2 min")
fig, ax = plt.subplots(figsize=(12,10))
plt_T2 = sns.scatterplot(data = df_T2, x="OVERALL RATIO", y="minus log p",
color = 'lightgrey')
outr = df_T2.query('`minus log p` > 1.303 & `OVERALL RATIO` > 0.044202462*2')
outl = df_T2.query('`minus log p` > 1.303 & `OVERALL RATIO` < -0.044202462*2')
outr.index = range(len(outr.index))
outl.index = range(len(outl.index))
plt.scatter(outr['OVERALL RATIO'], outr['minus log p'], color = "red")
plt.scatter(outl['OVERALL RATIO'], outl['minus log p'], color = "red")
for i in range(len(outr)):
ax.annotate(outr['Gene Symbol'][i],
xy = (outr['OVERALL RATIO'][i], outr['minus log p'][i]),
fontsize = 10,
textcoords="offset points")
for i in range(len(outl)):
ax.annotate(outl['Gene Symbol'][i],
xy = (outl['OVERALL RATIO'][i], outl['minus log p'][i]),
fontsize = 10,
textcoords="offset points")
sns.despine()
plt.ylabel('-$log_{10}$(p-value)', fontsize = 18)
plt.xlabel('$log_2$(dDAVP/control)', fontsize = 18)
plt.title("Volcano Plot (2 min) n = 5444", fontsize = 24)
plt.ylim(0,4.5)
plt.xlim(-1.3,1.3)
ax.axhline(1.303, color = 'blue', ls='--')
plt.text(-1.2,1.35,'1.303')
ax.axvline(0.044202462*2, color = 'blue', ls='--')
plt.text(0.11,4.1,'0.088', rotation=90)
ax.axvline(-0.044202462*2, color = 'blue', ls='--')
plt.text(-0.09,4.1,'-0.088', rotation=90)
plt.savefig("Volcano total 2", dpi = 1200)
#1 min
df_T1 = pd.read_excel(fileloc, sheet_name="1 min")
fig, ax = plt.subplots(figsize=(12,10))
plt_T1 = sns.scatterplot(data = df_T1, x="OVERALL RATIO", y="minus log p",
color = 'lightgrey')
outr = df_T1.query('`minus log p` > 1.303 & `OVERALL RATIO` > 0.05433744*2')
outl = df_T1.query('`minus log p` > 1.303 & `OVERALL RATIO` < -0.05433744*2')
outr.index = range(len(outr.index))
outl.index = range(len(outl.index))
plt.scatter(outr['OVERALL RATIO'], outr['minus log p'], color = "red")
plt.scatter(outl['OVERALL RATIO'], outl['minus log p'], color = "red")
for i in range(len(outr)):
ax.annotate(outr['Gene Symbol'][i],
xy = (outr['OVERALL RATIO'][i], outr['minus log p'][i]),
fontsize = 10,
textcoords="offset points")
for i in range(len(outl)):
ax.annotate(outl['Gene Symbol'][i],
xy = (outl['OVERALL RATIO'][i], outl['minus log p'][i]),
fontsize = 10,
textcoords="offset points")
sns.despine()
plt.ylabel('-$log_{10}$(p-value)', fontsize = 18)
plt.xlabel('$log_2$(dDAVP/control)', fontsize = 18)
plt.title("Volcano Plot (1 min) n = 5444", fontsize = 24)
plt.ylim(0,4.5)
plt.xlim(-1.3,1.3)
ax.axhline(1.303, color = 'blue', ls='--')
plt.text(-1.2,1.35,'1.303')
ax.axvline(0.05433744*2, color = 'blue', ls='--')
plt.text(0.12,4.1,'0.109', rotation=90)
ax.axvline(-0.05433744*2, color = 'blue', ls='--')
plt.text(-0.10,4.1,'-0.109', rotation=90)
plt.savefig("Volcano total 1", dpi = 1200)
| krbyktl/Kidneys | 110320_dDAVP_MedianNorm.py | 110320_dDAVP_MedianNorm.py | py | 9,792 | python | en | code | 0 | github-code | 13 |
39614247474 | import argparse
import pandas as pd
from sklearn.feature_selection import SelectKBest, chi2, f_classif, mutual_info_classif
import numpy as np
from sklearn.preprocessing import MinMaxScaler
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input_filepath", help="CSV input filepath. With features and the label", type=str)
parser.add_argument("output_filepath", help="CSV containing the features of the training samples", type=str)
parser.add_argument("n_features", help="K best features", type=int)
parser.add_argument('--chi2', dest='chi2', action='store_true')
parser.add_argument('--no-chi2', dest='chi2', action='store_false')
parser.set_defaults(chi2=False)
parser.add_argument('--anova', dest='anova', action='store_true')
parser.add_argument('--no-anova', dest='anova', action='store_false')
parser.set_defaults(anova=False)
parser.add_argument('--mutual', dest='mutual', action='store_true')
parser.add_argument('--no-mutual', dest='mutual', action='store_false')
parser.set_defaults(mutual=False)
args = parser.parse_args()
# Three feature selection metrics
# Chi-squared stats of non-negative features for classification tasks.
# ANOVA F-value between label/feature for classification tasks.
# Mutual information for a discrete target.
df = pd.read_csv(args.input_filepath)
y = df[['Class']].copy()
y_ = df['Class']
Id_ = df["Id"]
X = df.drop(columns=["Id", "Class"])
#X = X.astype(float)
#X = X.to_numpy(dtype=np.float64, copy=True)
#y = y.to_numpy(dtype=np.int64, copy=True)
#y = np.reshape(y, (X.shape[0],))
#print(X.shape, type(X), X.dtype, y.shape, type(y), y.dtype)
#y = np.reshape(y, (X.shape[0],))
#print(X.shape, type(X), X.dtype)
#print(y.shape, type(y), y.dtype)
#print(X[-1], y[-1])
columns = list(X.columns)
if args.chi2 is True:
# MinMaxScale values before SelectKBest
min_max_scaler = MinMaxScaler()
X_scaled = X.copy()
X_scaled[columns] = min_max_scaler.fit_transform(X[columns])
#print(X_scaled)
selector = SelectKBest(score_func=chi2, k=args.n_features)
selector.fit(X_scaled, y)
elif args.anova is True:
selector = SelectKBest(score_func=f_classif, k=args.n_features)
selector.fit(X, y)
elif args.mutual is True:
selector = SelectKBest(score_func=mutual_info_classif, k=args.n_features)
selector.fit(X, y)
else:
print("No feature selection algorithm selected")
cols = selector.get_support(indices=True)
print("Columns: {}".format(cols))
features_df_new = X.iloc[:, cols]
print(features_df_new.columns)
with open(args.output_filepath, "w") as output_file:
output_file.write(",".join(list(features_df_new.columns)))
| danielgibert/fusing_feature_engineering_and_deep_learning_a_case_study_for_malware_classification | src/preprocessing/select_K_best_features.py | select_K_best_features.py | py | 2,864 | python | en | code | 6 | github-code | 13 |
38777611639 | import execute
import queries
import logging
import get_token
from create_xml import createXML
from create_xml_apli import createXMLA
from create_xml_otros import createXMLO
from create_xml_saldo import createXMLS
from create_xml_cacre import createXMLC
from create_xml_mova import createXMLM
def main():
"""Alta de créditos
Instrucción:
Comentar/Descomentar líneas para dar de alta en el sistema
de Linq los créditos nuevos
- qalta, qmova, qcare : Queries
- result_alta, result_mova, result_cacre: Resultado de ejecutar query
- createXML, createXMLM, createXMLC: Creación de xml
"""
logging.info({
"process": "main",
"message": "Getting token"
})
token = get_token.getToken()
# Envío normal de cobranza (pagos, saldos, refinanciamientos)
qalta = queries.QueryL.queryD() # DEU - ALTA - Créditos nuevos
qmova = queries.QueryL.queryM() # MOVA - FAC - Cobranza futura
qcacre = queries.QueryL.queryD() # CACRE - Acreedores
qapli = queries.QueryL.queryApli() # APLI - PAG
qotros = queries.QueryL.queryOtros() # APLI - OTROS
# qaplir = queries.QueryL.queryApliRe() # APLI - NCR - Refinanciados
qsaldo = queries.QueryL.querySaldo() # Saldos
result_alta = execute.queryExecute(qalta)
result_mova = execute.queryExecute(qmova)
result_cacre = execute.queryExecute(qcacre)
result_apli = execute.queryExecute(qapli)
result_otros = execute.queryExecute(qotros)
# result_aplir = execute.queryExecute(qaplir)
result_saldo = execute.queryExecute(qsaldo)
createXML(result_alta, token)
createXMLM(result_mova, token)
createXMLC(result_cacre, token)
createXMLA(result_apli, token)
createXMLO(result_otros, token)
# createXMLA(result_aplir, token)
createXMLS(result_saldo, token)
if __name__ == "__main__":
logging.basicConfig(filename='linq_process.log',
format='%(asctime)s %(levelname)s:%(message)s',
level=logging.INFO)
logging.info({
"process": "main",
"message": "Started Process"
})
main()
| mariomtzjr/dsLinqPublic | main.py | main.py | py | 2,222 | python | es | code | 0 | github-code | 13 |
12400733359 | import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
# only applies at low intensity, ie, I << Isat
f87 = (384.230484468-0.002563006+0.000193741)*1e12 # Hz, Rb87 D2 F=2->F'=3
f85 = (384.230406373-0.001264889+0.000100205)*1e12 # Hz, Rb85 D2 F=3->F'=4
isotope_shift = f85-f87 # Hz
h = 6.62607015e-34 # Joule x second
c = 299792458 # m/s
Gamma = 6.0666e6*2*np.pi # Hz, Rb87 D2 linen natural linewidth
wavevec87 = 2*np.pi*f87/c # 1/meter, laser wavevector
wavevec85 = 2*np.pi*f85/c # 1/meter, laser wavevector
mass87 = 0.001/(6.022e23)*87 # kg, Rb87 mass
mass85 = 0.001/(6.022e23)*85 # kg, Rb87 mass
cross_section0 = (780.24e-9)**2/2/np.pi # meter^2, Rb87(85), D2, lambad^2/2/Pi
Kb = 1.38e-23 # m^2*kg/s^2/Kelvin
Rb87e_F = np.array([3, 2, 1, 0]) # Rb87 excited state F quantum number
Rb87g_F = np.array([2, 1])
Rb87e_split = np.array([0, -266.65e6, (-266.65-156.947)*1e6, (-266.65-156.947-72.2180)*1e6])
Rb85e_F = np.array([4, 3, 2, 1])
Rb85g_F = np.array([3, 2])
Rb85e_split = np.array([0, -120.64e6, (-120.64-63.401)*1e6, (-120.64-63.401-29.372)*1e6])
eta87 = 0.2783 # natural abundance of Rb87
eta85 = 0.7217 # natural abundance of Rb85
def integrand87(v, T, detuning):
return np.exp(-mass87*v**2/2/Kb/T)/(1+4*(2*np.pi*detuning-wavevec87*v)**2/Gamma**2)
def integrand85(v, T, detuning):
return np.exp(-mass85*v**2/2/Kb/T)/(1+4*(2*np.pi*(detuning-isotope_shift)-wavevec85*v)**2/Gamma**2)
# detuning here refers to laser detuning w.r.t. Rb87 D2 F=2->F'=3 transition
def crossRb87(T, detuning):
cs = np.sum([cross_section0*(2*Rb87e_F[i]+1)/(2*Rb87g_F[0]+1)*np.sqrt(mass87/2/np.pi/Kb/T)
*integrate.quad(integrand87, -2000, 2000, args=(T, detuning-Rb87e_split[i]))[0]
for i in np.arange(len(Rb87e_F)-1)])
return cs
# returns Rb87 D2 F=2->F'=1/2/3 transition cross section
# presume that Rb population uniformly distributes among ground hyperfine states
def crossRb85(T, detuning):
cs = np.sum([cross_section0*(2*Rb85e_F[i]+1)/(2*Rb85g_F[0]+1)*np.sqrt(mass85/2/np.pi/Kb/T)
*integrate.quad(integrand85, -2000, 2000, args=(T, detuning-Rb85e_split[i]))[0]
for i in np.arange(len(Rb85e_F)-1)])
return cs
# returns Rb85 D2 F=3->F'=2/3/4 transition cross section
# presume that Rb population uniformly distributes among ground hyperfine states
def absorption(T, vaporpres, L, detuning):
N = vaporpres/Kb/T # Rb density based on ideal gas law
NRb87 = N*eta87*(Rb87g_F[0]*2+1)/np.sum(Rb87g_F*2+1) # F=1 ground state Rb87 density
NRb85 = N*eta85*(Rb85g_F[0]*2+1)/np.sum(Rb85g_F*2+1) # F=2 ground state Rb85 density
return np.exp(-L*(crossRb87(T, detuning)*NRb87+crossRb85(T, detuning)*NRb85)) | qw372/Lab-essential-calculation | Rb-vapor-pressure/absorptionRb.py | absorptionRb.py | py | 2,878 | python | en | code | 0 | github-code | 13 |
18770744709 | # 수식 최대화
# N : len(expression)
# 시간 복잡도 : O(N) 공간 복잡도 : O(N)
import itertools
# op 들의 모음
operations = []
# 초기화
def init(expression: str):
for op in '*+-':
if op in expression:
operations.append(op)
# 연산을 해주는 함수
def operation(operator1: int, operator2: int, op: str) -> int:
if op == '*':
return operator1 * operator2
if op == '+':
return operator1 + operator2
if op == '-':
return operator1 - operator2
#
def calculate_expression(expression: str, op_priority: dict):
start = 0
expression_list = []
# op 와 operator 를 기준으로 list 로 쪼개 준다.
for end, ex in enumerate(expression):
if end == len(expression) - 1:
expression_list.append(int(expression[start:end + 1]))
if ex in '*+-':
expression_list.append(int(expression[start:end]))
expression_list.append(ex)
start = end + 1
# 우선 순위에 우위가 있는 op 부터 계산해준다.
for op in op_priority:
new_expression = []
ex_idx = 0
while ex_idx < len(expression_list):
if type(expression_list[ex_idx]) == str and expression_list[ex_idx] == op:
operator1 = new_expression.pop()
operator2 = expression_list[ex_idx + 1]
new_expression.append(operation(operator1, operator2, op))
ex_idx += 2
continue
new_expression.append(expression_list[ex_idx])
ex_idx += 1
expression_list = new_expression
return abs(expression_list[0])
def solution(expression: str) -> int:
answer = 0
init(expression)
# op의 우선순위를 정해서 전해준다.
for p in itertools.permutations(operations, len(operations)):
answer = max(answer, calculate_expression(expression, p))
return answer
| galug/2023-algorithm-study | level_2/maximum_expression.py | maximum_expression.py | py | 1,939 | python | en | code | null | github-code | 13 |
42857448558 | import pickle as pkl
import networkx as nx
import numpy as np
import scipy.sparse as sp
import torch
from sklearn.metrics import roc_auc_score, average_precision_score
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_data(dataset):
# load the data: x, tx, allx, graph
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
'''
fix Pickle incompatibility of numpy arrays between Python 2 and 3
https://stackoverflow.com/questions/11305790/pickle-incompatibility-of-numpy-arrays-between-python-2-and-3
'''
with open("data/ind.{}.{}".format(dataset, names[i]), 'rb') as rf:
u = pkl._Unpickler(rf)
u.encoding = 'latin1'
cur_data = u.load()
objects.append(cur_data)
# objects.append(
# pkl.load(open("data/ind.{}.{}".format(dataset, names[i]), 'rb')))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file(
"data/ind.{}.test.index".format(dataset))
test_idx_range = np.sort(test_idx_reorder)
if dataset == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(
min(test_idx_reorder), max(test_idx_reorder) + 1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range - min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
# features = torch.DoubleTensor(np.array(features.todense()))
features = torch.FloatTensor(np.array(features.todense()))
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y) + 500)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, y_test, tx, ty, test_mask, np.argmax(labels,1)
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sparse_to_tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
#Updated
def mask_test_edges(adj):
# Function to build test set with 10% positive links
# NOTE: Splits are randomized and results might slightly deviate from reported numbers in the paper.
# TODO: Clean up.
# Remove diagonal elements
adj = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)
adj.eliminate_zeros()
# Check that diag is zero:
assert np.diag(adj.todense()).sum() == 0
adj_triu = sp.triu(adj)
adj_tuple = sparse_to_tuple(adj_triu)
edges = adj_tuple[0]
edges_all = sparse_to_tuple(adj)[0]
num_test = int(np.floor(edges.shape[0] / 10.))
num_val = int(np.floor(edges.shape[0] / 20.))
all_edge_idx = np.arange(edges.shape[0])
np.random.shuffle(all_edge_idx)
val_edge_idx = all_edge_idx[:num_val]
test_edge_idx = all_edge_idx[num_val:(num_val + num_test)]
test_edges = edges[test_edge_idx]
val_edges = edges[val_edge_idx]
train_edges = np.delete(edges, np.hstack([test_edge_idx, val_edge_idx]), axis=0)
def ismember(a, b, tol=5):
rows_close = np.all(np.round(a - b[:, None], tol) == 0, axis=-1)
return np.any(rows_close)
test_edges_false = []
while len(test_edges_false) < len(test_edges):
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
if ismember([idx_i, idx_j], edges_all):
continue
if test_edges_false:
if ismember([idx_j, idx_i], np.array(test_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(test_edges_false)):
continue
test_edges_false.append([idx_i, idx_j])
val_edges_false = []
while len(val_edges_false) < len(val_edges):
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
if ismember([idx_i, idx_j], train_edges):
continue
if ismember([idx_j, idx_i], train_edges):
continue
if ismember([idx_i, idx_j], val_edges):
continue
if ismember([idx_j, idx_i], val_edges):
continue
if val_edges_false:
if ismember([idx_j, idx_i], np.array(val_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(val_edges_false)):
continue
if ~ismember([idx_i,idx_j],edges_all) and ~ismember([idx_j,idx_i],edges_all):
val_edges_false.append([idx_i, idx_j])
else:
# Debug
print(str(idx_i)+" "+str(idx_j))
# Original:
# val_edges_false.append([idx_i, idx_j])
#TODO: temporary disable for ismember function may require huge memory.
# assert ~ismember(test_edges_false, edges_all)
# assert ~ismember(val_edges_false, edges_all)
# assert ~ismember(val_edges, train_edges)
# assert ~ismember(test_edges, train_edges)
# assert ~ismember(val_edges, test_edges)
data = np.ones(train_edges.shape[0])
# Re-build adj matrix
adj_train = sp.csr_matrix((data, (train_edges[:, 0], train_edges[:, 1])), shape=adj.shape)
adj_train = adj_train + adj_train.T
# NOTE: these edge lists only contain single direction of edge!
return adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false
def preprocess_graph(adj):
adj = sp.coo_matrix(adj)
adj_ = adj + sp.eye(adj.shape[0])
rowsum = np.array(adj_.sum(1))
degree_mat_inv_sqrt = sp.diags(np.power(rowsum, -0.5).flatten())
adj_normalized = adj_.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt).tocoo()
# return sparse_to_tuple(adj_normalized)
return sparse_mx_to_torch_sparse_tensor(adj_normalized)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
# sparse_mx = sparse_mx.tocoo().astype(np.float64)
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
# return torch.sparse.DoubleTensor(indices, values, shape)
return torch.sparse.FloatTensor(indices, values, shape)
def get_roc_score(emb, adj_orig, edges_pos, edges_neg):
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Predict on test set of edges
adj_rec = np.dot(emb, emb.T)
preds = []
pos = []
for e in edges_pos:
preds.append(sigmoid(adj_rec[e[0], e[1]]))
pos.append(adj_orig[e[0], e[1]])
preds_neg = []
neg = []
for e in edges_neg:
preds_neg.append(sigmoid(adj_rec[e[0], e[1]]))
neg.append(adj_orig[e[0], e[1]])
preds_all = np.hstack([preds, preds_neg])
labels_all = np.hstack([np.ones(len(preds)), np.zeros(len(preds))])
roc_score = roc_auc_score(labels_all, preds_all)
ap_score = average_precision_score(labels_all, preds_all)
return roc_score, ap_score
| juexinwang/scGNN | gae/utils.py | utils.py | py | 8,312 | python | en | code | 112 | github-code | 13 |
14324551309 | import reportlab
from reportlab.graphics.shapes import Drawing
from book import Book, BookPage
import math
from reportlab.pdfgen import canvas
from reportlab.lib.colors import Color
import reportlab.rl_config
reportlab.rl_config.warnOnMissingFontGlyphs = 0
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.graphics import renderPM
from enum import Enum
class PdfPage(BookPage):
def __init__(self, template_id, data, func):
super().__init__(template_id)
self.data = data
self.func = func
def render(self, pdf_canvas: canvas.Canvas):
if self.func is not None:
self.func(pdf_canvas, self, self.book, self.data)
class PdfRenderer(object):
class OutputType(Enum):
PDF = 0,
TIFF = 1,
PNG = 2
def __init__(self):
pass
def renderTemplate(self, book: Book, id):
t = book.getPageTemplate(id)
if t is None:
print(f"ERROR: Page template with id '{id}' does not exist.")
return
filename = f"output/template_{id}.pdf"
c = canvas.Canvas(filename, (book.width_px * 2, book.height_px), 0)
stroke = Color(0,0,0)
stroke_width = 2
c.setStrokeColor(stroke)
c.setLineWidth(stroke_width)
c.rect(0, 0, book.width_px * 2, book.height_px)
c.line(book.width_px, 0, book.width_px, book.height_px)
# left header
c.rect(
t['margins']['outer'],
t['margins']['top'] - t['header']['height'] - t['header']['offset'],
book.width_px - t['margins']['outer'] - t['margins']['inner'],
t['header']['height'])
# left footer
c.rect(
t['margins']['outer'],
book.height_px - t['margins']['bottom'] + t['footer']['offset'],
book.width_px - t['margins']['outer'] - t['margins']['inner'],
t['footer']['height'])
# left content
c.rect(
t['margins']['outer'],
t['margins']['top'],
book.width_px - t['margins']['outer'] - t['margins']['inner'],
book.height_px - t['margins']['top'] - t['margins']['bottom'])
# right header
c.rect(
book.width_px + t['margins']['inner'],
t['margins']['top'] - t['header']['height'] - t['header']['offset'],
book.width_px - t['margins']['inner'] - t['margins']['outer'],
t['header']['height'])
# right footer
c.rect(
book.width_px + t['margins']['inner'],
book.height_px - t['margins']['bottom'] + t['footer']['offset'],
book.width_px - t['margins']['inner'] - t['margins']['outer'],
t['footer']['height'])
# right content
c.rect(
book.width_px + t['margins']['inner'],
t['margins']['top'],
book.width_px - t['margins']['inner'] - t['margins']['outer'],
book.height_px - t['margins']['top'] - t['margins']['bottom'])
c.showPage()
try:
print(f"Save template rendering to {filename}")
c.save()
except IOError as ioe:
print(ioe)
def addFont(self, name, file):
pdfmetrics.registerFont(TTFont(name, file))
def renderPage(self, page: PdfPage, output_path, outputType: OutputType, render_meta = False):
book = page.book
filename = f"{output_path}/{page.number:04}.pdf"
c = canvas.Canvas(filename, (book.width_px, book.height_px), 0)
# https://github.com/source-foundry/font-line
if render_meta:
c.setStrokeColor(Color(0,0,0))
c.setLineWidth(2)
c.rect(0, 0, book.width_px, book.height_px)
metaColor = Color(221,0,0)
grid_size = book.px(10)
c.setLineWidth(1)
c.setStrokeColor(metaColor)
c.rect(page.content['x'], page.content['y'], page.content['w'], page.content['h'])
c.setDash(book.px(1), book.px(1))
x = page.content['x'] + grid_size
y = page.content['y'] + grid_size
while y < page.content['y'] + page.content['h'] - 1:
c.line(page.content['x'] + 9, y, page.content['x'] + page.content['w'] - 9, y)
y += grid_size
while x < page.content['x'] + page.content['w'] - 1:
c.line(x, page.content['y'] + 9, x, page.content['y'] + page.content['h'] - 9)
x += grid_size
print(f"Try rende page of type: {type(page)}")
page.render(c)
try:
print(f"Save template rendering to {filename}, as {outputType}")
if outputType is self.OutputType.PDF:
c.save()
#elif outputType in [self.OutputType.TIFF, self.OutputType.PNG]:
# filename = filename.rstrip("pdf") + outputType.name.lower()
# print(f"Save file as '{filename}'")
# renderPM.drawToFile(c, filename, outputType.name)
else:
print(f"Unknown output type: {outputType}")
except IOError as ioe:
print(ioe)
if __name__ == '__main__':
book = Book(200, 220, 72)
book.page_count_offset = 0
book.addPage(BookPage('default'))
book.addPage(BookPage('default'), number=-1)
book.addPage(PdfPage('default', {
'text': "En liten text."
}, None), number=None)
book.update()
print(book)
c = canvas.Canvas("output/hello.pdf", (book.width_px, book.height_px), 0)
# https://www.reportlab.com/docs/reportlab-userguide.pdf
#c.setFont("arial", 12, 1.6)
c.drawString(100,100,"Hello World")
# t = c.beginText(150, 150)
# #t.setFont("Alte Haas Grotesk", 12)
# t.textLine("En text i olles bok")
# t.textLine("Två texter i olles bok")
# c.drawText(t)
color = Color(0.7,0.3,0.9)
c.setStrokeColor(color)
c.rect(0,0,250,200)
c.setLineWidth(10)
c.setLineCap(1)
c.line(10,100,400,400)
c.setDash(10, 50)
c.line(10,200,400,500)
c.drawImage("output/original_frames/frame_0000.jpg", 300, 300, 64*3, 48*3)
c.showPage()
c.save()
# template
r = PdfRenderer()
r.renderTemplate(book, 'default')
# page
page = book.getPage(3)
print(type(page))
r.renderPage(page, 'output', True)
| cjrosen/at-the-catastrophy-point | src/book/pdf_renderer.py | pdf_renderer.py | py | 6,422 | python | en | code | 0 | github-code | 13 |
39982481664 | # -*- coding: utf-8 -*-
import codecs
import os
import sys
reload(sys)
sys.setdefaultencoding('GBK')
#1.读取文件
def readfile(filepath):
f = codecs.open(filepath, 'r', "utf-8") #打开文件
lines = f.readlines()
word_list = []
for line in lines:
line = line.strip()
words = line.split(" ") #用空格分割
word_list.extend(words)
return word_list
#2.构造四个字的二元词组
def two_word(wordlist):
i = 0
twoword = []
fourletter =[]
while i < len(wordlist)-1:
if len(wordlist[i]) == 2 and len(wordlist[i+1]) == 2:
twoword.append(wordlist[i] + wordlist[i+1])
i = i +1
#print twoword
return twoword
# #3.清洗格式
# def format(word_original):
# fmt = ',。!?'
# format_wordlist = []
# for word in word_original:
# for char in word :
# #if str(char) not in str(fmt):
# format_wordlist.append(word)
# return format_wordlist
#4.统计频率
def count_number(format_list):
word_dict = {}
for word in format_list:
if word_dict.has_key(word):
word_dict[word] = word_dict[word] + 1
else:
word_dict[word] = 1
#排序
sorted_dict = sorted(word_dict.iteritems(), key=lambda d: d[1], reverse=True)
return sorted_dict
def print_to_csv(combined_wordlist, to_file_path):
nfile = open(to_file_path,'w+')
for list_tuple in combined_wordlist[0:10]:
nfile.write("%s,%d\n" % (list_tuple[0],list_tuple[1]))
nfile.close()
def main():
singleword = readfile('happiness_seg.txt')
doubleword = two_word(singleword)
dict_and_frequence = count_number(doubleword)
print_to_csv(dict_and_frequence,'test.csv')
if __name__ == '__main__':
main()
| lonelyKSA/Deep-Learning | pre_task/pretask.py | pretask.py | py | 1,801 | python | en | code | 0 | github-code | 13 |
16825931304 | """Tests for the database interface."""
from typing import List
from pytest import mark
from hyrisecockpit.api.app.database.interface import (
AvailableWorkloadTablesInterface,
DatabaseInterface,
DetailedDatabaseInterface,
WorkloadTablesInterface,
)
from hyrisecockpit.api.app.database.model import (
AvailableWorkloadTables,
Database,
DetailedDatabase,
)
class TestDatabaseInterface:
"""Tests for the database namespace interfaces."""
@mark.parametrize("attribute", ["I am a database", "Whats Up"])
def test_creates_database_interface(self, attribute: str) -> None:
"""A database model can be created."""
assert DatabaseInterface(id=attribute)
@mark.parametrize("attribute", ["I am a database", "Whats Up"])
def test_creates_database_interface_works(self, attribute: str) -> None:
"""A database model can be created."""
interface = DatabaseInterface(id=attribute)
assert Database(**interface)
def test_creates_detailed_database_interface(self) -> None:
"""A detailed database model can be created."""
assert DetailedDatabaseInterface(
id="hycrash",
host="linux",
port="666",
number_workers=42,
dbname="post",
user="Alex",
password="1234",
)
def test_creates_detailed_database_interface_works(self) -> None:
"""A database model can be created."""
interface = DetailedDatabaseInterface(
id="hycrash",
host="linux",
port="666",
number_workers=42,
dbname="post",
user="Alex",
password="1234",
)
assert DetailedDatabase(**interface)
def test_creates_workload_tables_interface(self) -> None:
"""A benchmark tables interface can be created."""
assert WorkloadTablesInterface(workload_type="tpch", scale_factor=0.1)
def test_creates_available_workload_tables_interface(self) -> None:
"""A available benchmark tables interface can be created."""
assert AvailableWorkloadTablesInterface(
workload_tables=[
WorkloadTablesInterface(workload_type="tpch", scale_factor=0.1),
WorkloadTablesInterface(workload_type="tpcds", scale_factor=0.1),
]
)
@mark.parametrize("tables", [["nations", "product"], ["just one table"]])
def test_creates_available_workload_tables_interface_works(
self, tables: List[str]
) -> None:
"""A available benchmark tables interface work."""
interface = AvailableWorkloadTablesInterface(
workload_tables=[
WorkloadTablesInterface(workload_type="tpch", scale_factor=0.1),
WorkloadTablesInterface(workload_type="tpcds", scale_factor=0.1),
]
)
assert AvailableWorkloadTables(**interface)
| hyrise/Cockpit | tests/api/database/test_interface.py | test_interface.py | py | 2,938 | python | en | code | 14 | github-code | 13 |
24622299214 | from datetime import datetime, timedelta
import logging
from io import BytesIO
from functools import lru_cache
from contextlib import contextmanager
from dateutil.parser import parse
import boto3
from ocs_archive.input.file import DataFile
from ocs_archive.storage.filestore import FileStore, FileStoreConnectionError
from ocs_archive.settings import settings
logger = logging.getLogger('ocs_ingester')
def strip_quotes_from_etag(etag):
"""Amazon returns the md5 sum of the uploaded file in the 'ETag' header wrapped in quotes."""
if etag.startswith('"') and etag.endswith('"'):
return etag[1:-1]
class S3Store(FileStore):
def __init__(self, bucket: str = settings.BUCKET):
"""Create an S3 file storage manager using the bucket specified."""
super().__init__()
self.bucket = bucket
@classmethod
@lru_cache(maxsize=1)
def get_s3_client(cls):
config = boto3.session.Config(signature_version=settings.S3_SIGNATURE_VERSION, s3={'addressing_style': 'virtual'})
return boto3.client('s3', endpoint_url=settings.S3_ENDPOINT_URL, config=config)
def get_storage_class(self, observation_date):
# if the observation was more than X days ago, this is someone
# uploading older data, and it can skip straight to STANDARD_IA
if observation_date < (datetime.utcnow() - timedelta(days=settings.S3_DAYS_TO_IA_STORAGE)):
return 'STANDARD_IA'
# everything else goes into the STANDARD storage class, and will
# be switched to STANDARD_IA by S3 Lifecycle Rules
return 'STANDARD'
def store_file(self, data_file: DataFile):
storage_class = self.get_storage_class(parse(data_file.get_header_data().get_observation_date()))
# start_time = datetime.utcnow()
s3 = boto3.resource('s3', endpoint_url=settings.S3_ENDPOINT_URL)
key = data_file.get_filestore_path()
content_disposition = 'attachment; filename={0}{1}'.format(data_file.open_file.basename, data_file.open_file.extension)
content_type = data_file.get_filestore_content_type()
try:
response = s3.Object(self.bucket, key).put(
Body=data_file.open_file.get_from_start(),
ContentDisposition=content_disposition,
ContentType=content_type,
StorageClass=storage_class,
)
except Exception as exc:
raise FileStoreConnectionError(exc)
s3_md5 = strip_quotes_from_etag(response['ETag'])
key = response['VersionId']
logger.info('Ingester uploaded file to s3', extra={
'tags': {
'filename': '{}{}'.format(data_file.open_file.basename, data_file.open_file.extension),
'key': key,
'storage_class': storage_class,
}
})
return {'key': key, 'md5': s3_md5, 'extension': data_file.open_file.extension}
def delete_file(self, path: str, version_id: str):
"""
Delete a file from s3.
:param path: s3 path for file
"""
client = S3Store.get_s3_client()
client.delete_object(
Bucket=self.bucket,
Key=path,
VersionId=version_id
)
def get_url(self, path: str, version_id: str, expiration: float):
"""
Get a downloadable url for a file from s3.
:param path: s3 path for file
"""
client = S3Store.get_s3_client()
return client.generate_presigned_url(
'get_object',
ExpiresIn=expiration,
Params={
'Bucket': self.bucket,
'Key': path,
'VersionId': version_id
}
)
@contextmanager
def get_fileobj(self, path: str):
"""
Get a file from s3.
DataFile can contain an EmptyFile within it, but the headers must be correct
To figure out the file storage path.
:param data_file: DataFile with filled in headers pointing to storage path
:return: File-like object
"""
client = S3Store.get_s3_client()
fileobj = BytesIO()
client.download_fileobj(Bucket=self.bucket,
Key=path,
Fileobj=fileobj)
fileobj.seek(0)
try:
yield fileobj
finally:
fileobj.close()
def get_file_size(self, path: str):
"""
Get the size of a file in s3.
:param path: s3 path for file
:return: file size in bytes
"""
client = S3Store.get_s3_client()
return client.head_object(Bucket=self.bucket, Key=path)['ContentLength']
| observatorycontrolsystem/ocs_archive | ocs_archive/storage/s3store.py | s3store.py | py | 4,723 | python | en | code | 0 | github-code | 13 |
73134522576 | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
from scipy.spatial import distance_matrix
from sklearn.mixture import GaussianMixture
def removeKNearest(coordinates,K):
N = coordinates.shape[0]
updated_coordinates = np.copy(coordinates)
for k in range(K):
dist_matrix = distance_matrix(updated_coordinates,updated_coordinates)
np.fill_diagonal(dist_matrix, np.inf)
row_idx,col_idx = np.where(dist_matrix == np.min(dist_matrix))[0]
a = updated_coordinates[row_idx]
b = updated_coordinates[col_idx]
c = np.expand_dims(0.5 * (a + b), 0)
updated_coordinates[row_idx] = 0
updated_coordinates[col_idx] = 0
updated_coordinates = np.append(updated_coordinates, c, axis=0)
updated_coordinates = updated_coordinates[~np.all(updated_coordinates == 0, axis=1)]
return updated_coordinates
class FixedCovMixture:
"""The model to estimate gaussian mixture with fixed covariance matrix
in order to reduce duplication in SMLM experiments"""
def __init__(self, n_components, var, max_iter=100, random_state=None, tol=1e-10):
self.n_components = n_components
self.var = var
self.random_state = random_state
self.max_iter = max_iter
self.tol=tol
def fit(self,X):
np.random.seed(self.random_state)
n_obs, n_features = X.shape
self.mean_ = removeKNearest(X,n_obs-self.n_components)
self.init_means = np.copy(self.mean_)
gmixture = GaussianMixture(n_components=self.n_components, covariance_type='tied')
gmixture.means_ = self.init_means
gmixture.covariances_ = self.var*np.eye(2)
precision_matrix_cholesky = np.sqrt(1 / self.var) * np.eye(2)
gmixture.precisions_cholesky_ = precision_matrix_cholesky
gmixture.weights_ = np.ones((self.n_components,))/self.n_components
log_like = gmixture.score_samples(X)
return np.sum(log_like), gmixture
| cwseitz/miniSMLM | miniSMLM/utils/correct.py | correct.py | py | 2,024 | python | en | code | 0 | github-code | 13 |
35300931898 | from collections import deque, defaultdict
t = int(input())
for _ in range(t):
n, m = map(int, input().split())
c = list(map(int, input().split()))
g = [[] for _ in range(n)]
for i in range(m):
u, v = map(lambda x: x-1, map(int, input().split()))
g[u].append(v)
g[v].append(u)
# もし、最初の状態で同じ色なら不可
if c[0]==c[-1]: print(-1); continue
q = deque()
q.append((0, n-1))
dist = {}
dist[(0, n-1)] = 0
while q:
v1, v2 = q.popleft()
for v3 in g[v1]:
for v4 in g[v2]:
if (v3, v4) in dist: continue
if c[v3]!=c[v4]:
dist[(v3, v4)] = dist[(v1, v2)] + 1
q.append((v3, v4))
print(dist[(n-1, 0)]) if (n-1, 0) in dist else print(-1) | nozomuorita/atcoder-workspace-python | abc/abc289/e.py | e.py | py | 839 | python | en | code | 0 | github-code | 13 |
70078642579 | from discord import Message, Member, TextChannel, Guild, Embed, Client, User
from datetime import datetime
from database.select import Report
from utilities import util, secret
from system import permission, appearance
from system.moderation import moderation
from database import insert, select
async def report_cmd(message: Message, member: Member, reason: str) -> None:
"""
Report a member
:param message: Message of command execution
:param member: Member to be reported
:param reason: Reason for report
"""
# Initialize varaibles
channel: TextChannel = message.channel
guild: Guild = message.guild
reported_by: Member = message.author
mod_log = moderation.get_mod_log(guild)
if not mod_log:
# Ignore if the moderation log is not set up
raise Exception('Moderation log must be set up')
# Delete message of member
await util.delete_message(message)
if member.id == secret.bot_id:
# Don't warn the bot
raise Exception('Cannot report the bot')
if permission.is_mod(member=member):
# Don't warn moderators of the server
raise Exception('Cannot report moderators')
# Insert into database
insert.report(reporter_id=reported_by.id, user_id=member.id, date=datetime.utcnow(), guild_id=guild.id,
reason=reason)
# Send embed as response in chat
await moderation.chat_message(channel, f'Reported {member.mention} for {reason}', appearance.moderation_color,
reported_by)
# Send log message in moderation log
log_embed: Embed = Embed(colour=appearance.moderation_color, timestamp=datetime.utcnow())
# Add fields
log_embed.set_author(name='Report', icon_url=member.avatar_url)
log_embed.set_footer(text=reported_by.display_name, icon_url=reported_by.avatar_url)
log_embed.add_field(name='User', value=member.mention, inline=True)
log_embed.add_field(name='Reported by', value=reported_by.mention, inline=True)
log_embed.add_field(name='Channel', value=channel.mention, inline=True)
log_embed.add_field(name='Reason', value=reason, inline=False)
await mod_log.send(embed=log_embed)
async def reports_of_member_cmd(client: Client, message: Message, member: Member) -> None:
"""
Get a list of the reports of the member on a guild
:param client: Bot client
:param message: Message of command execution
:param member: Member to get reports of
"""
# Initialize varaibles
channel: TextChannel = message.channel
guild: Guild = message.guild
mod_log = moderation.get_mod_log(guild)
if not mod_log:
# Ignore if the moderation log is not set up
raise Exception('Moderation log must be set up')
# Delete message of member
await util.delete_message(message)
# Get count of warns of member
count = select.count_reports(member.id, guild.id)
# Fetch warns
reports: list[Report] = select.reports_of_user(member.id, guild.id, limit=5)
# Create embed
desc = f'{member.mention} has **{count} reports** total.'
if count > 0:
desc += '\n\u200b'
if count > 5:
desc += '\n**Here are the latest 5 reports:**'
embed = Embed(title=f'Reports - {member.display_name}', description=desc, colour=appearance.moderation_color)
# Add reports to embed
for r in reports:
reported_by: User = await client.fetch_user(r.reporter_id)
date: datetime = r.date
embed.add_field(name=date.strftime("%Y.%m.%d"), value=f'• Moderator: {reported_by.mention}'
f'\n• Reason: {r.reason}', inline=False)
await channel.send(embed=embed)
| FynnFromme/fryselBot | system/moderation/report.py | report.py | py | 3,741 | python | en | code | 1 | github-code | 13 |
3499321610 | def solution(board, moves):
answer = 0
besket = []
for i in moves:
for j in range(0, len(board)):
if(board[j][i-1] != 0):
pick = board[j][i-1]
# print(pick)
board[j][i-1] = 0
if(len(besket) > 0 and besket[-1] == pick):
besket = besket[0: -1]
answer += 2
else :
besket.append(pick)
break
return answer
board = [[0,0,0,0,0],[0,0,1,0,3],[0,2,5,0,1],[4,2,4,4,2],[3,5,1,3,1]]
moves = [1,5,3,5,1,2,1,4]
print(solution(board, moves)) | wizard9582/Algo | Programmers/Python/q64061_Programmers_크레인인형뽑기게임.py | q64061_Programmers_크레인인형뽑기게임.py | py | 623 | python | en | code | 2 | github-code | 13 |
28743166510 | import re
import requests
from bs4 import BeautifulSoup
from collections import Counter
user_input = input("Введите URL статьи или вставьте скопированный текст: ")
if user_input.startswith("http"):
url = user_input
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
text = soup.find(id="mw-content-text").get_text()
else:
text = user_input
words = re.findall(r'\w+', text.lower())
word_counts = Counter(words)
top_10 = word_counts.most_common(10)
print("Слово", '"{}"'.format(top_10[0][0]), top_10[0][1], "раз")
for i in range(1, len(top_10)):
print("Слово", '"{}"'.format(top_10[i][0]), top_10[i][1], "раз") | Vitariuss/Hometasks2 | NewLesson3/hw2.py | hw2.py | py | 723 | python | en | code | 0 | github-code | 13 |
31189374608 | from lib.action import PyraxBaseAction
from lib.formatters import to_server_dict
__all__ = [
'ListVMImagesAction'
]
class ListVMImagesAction(PyraxBaseAction):
def run(self):
cs = self.pyrax.cloudservers
imgs = cs.images.list()
result = {}
for img in imgs:
result[img.id] = img.name
return result
| gtmanfred/st2contrib | packs/rackspace/actions/list_vm_images.py | list_vm_images.py | py | 360 | python | en | code | null | github-code | 13 |
13741781833 | #
# Figure
#
plt.rc('font', family='Helvetica', size=14)
# To later reset to default font settings: plt.rcdefaults()
fig = plt.figure(figsize=(12, 6))
ax = plt.subplot()
plt.plot(
x_values, ldc,
lw=1.5,
label='Load duration curve'
)
plt.plot(
x_values, coal,
lw=1.5,
label='Coal generation'
)
legend = plt.legend()
legend.draw_frame(False)
plt.xlim(0, 8760)
plt.ylim(0, 1)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_color('grey')
ax.spines['left'].set_color('grey')
ax.yaxis.grid(True, which='major', color='#CDCDCD', linestyle='-')
plt.savefig('01_ex01.png', dpi=300, bbox_inches='tight', pad_inches=0.5)
| htw-pv3/weather-data | python/tutorial/vis-tutorial-master/01-ex01-example.py | 01-ex01-example.py | py | 695 | python | en | code | 9 | github-code | 13 |
32834097093 | import requests, json
import argparse,os
def url(link):
params = {'apikey': 'api_key', 'url': link}
response = requests.post('https://www.virustotal.com/vtapi/v2/url/scan', data=params)
results = response.json()
print("REPORT")
params2 = {'apikey': 'api_key',
'resource': results['scan_id']}
response = requests.get('https://www.virustotal.com/vtapi/v2/url/report', params=params2)
results2 = json.loads(response.text)
print("TOTAL : ", results2['total'])
print("POSİTİVES : ", results2['positives'])
if results2['positives'] == 0:
print("The url is clear")
else:
print(json.dumps(results2["scans"], indent=4))
def file(file):
params = {'apikey': 'api_key'}
files = {'file': (file, open(file, 'rb'))}
response = requests.post('https://www.virustotal.com/vtapi/v2/file/scan', files=files, params=params)
results = response.json()
print("REPORT")
params = {'apikey': 'api_key',
'resource': results['scan_id']}
response = requests.get('https://www.virustotal.com/vtapi/v2/file/report', params=params)
results2 = json.loads(response.text)
print("TOTAL : ", results2['total'])
print("POSİTİVES : ", results2['positives'])
if results2['positives'] == 0:
print("The file is clear")
else:
print(json.dumps(results2["scans"], indent=4))
def ip(IP):
params = {'apikey': 'api_key', 'ip': IP}
response = requests.get('https://www.virustotal.com/vtapi/v2/ip-address/report', params=params)
results= json.loads(response.text)
print("#"*50,"> IP INFO <",'#'*50)
print("NETWORK : ",results["network"])
print("COUNTRY : ",results["country"])
print("OWNER : ",results["as_owner"])
print("CONTINENT : ",results["continent"])
print("#"*50,"> HOSTNAMES (",len(results["resolutions"]),") <","#"*50)
for i in results["resolutions"]:
print("HOSTNAME : ",i['hostname'])
print("#"*50,"> UNDETECTED URLS (",len(results["undetected_urls"]),") <","#"*50)# burayı düzenle
for i in results["undetected_urls"]:
print(i,"\n")
print("-"*100)
print("#" * 50, "> DETECTED URLS (",len(results["detected_urls"]),") <", "#" * 50)
for i in results["detected_urls"]:
print("URL : ",i['url'])
print("POSITIVES : ",i['positives'])
print("TOTAL : ",i['total'])
print("SCAN DATE : ",i['scan_date'])
print("-"*100)
def domain(dmn):
params = {'apikey': 'api_key', 'domain': dmn}
response = requests.get('https://www.virustotal.com/vtapi/v2/domain/report', params=params)
results=json.loads(response.text)
print("#"*50,"> IP ADDRESS (",len(results["resolutions"]),") <","#"*50)
j = []
for i in results["resolutions"]:
j.append(i['ip_address'])
j.sort()
for k in j:
print(k)
print("#"*50,"> SUBDOMAINS (",len(results['domain_siblings']),") <","#"*50)
for i in results['domain_siblings']:
print(i)
print("#"*50,"> WHOIS <","#"*50)
print(results["whois"])
print("#"*50,"> ALEXA RANK <","#"*50)
try:
print("Alexa Rank: ",json.dumps(results["Alexa rank"]))
except:
print("Alexa is not exist")
print("#"*50,"> SAFETY SCORE <","#"*50)
try:
print("Safety Score: ",results["Webutation domain info"]["Safety score"])
except:
print("Safety score is not exist")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--url", "-u", help="Enter an url like http://www.example.com")
parser.add_argument("--file", "-f", help="Enter a path of file")
parser.add_argument("--ip", "-p", help="Enter an ip")
parser.add_argument("--domain", "-d", help="Enter a domain like www.example.com")
data = parser.parse_args()
if data.url is not None:
url(data.url)
elif data.file is not None:
file(data.file)
elif data.ip is not None:
ip(data.ip)
elif data.domain is not None:
domain(data.domain)
if __name__ == '__main__':
if os.name == 'nt':
os.system("cls")
elif os.name == 'Linux':
os.system("clear")
main()
| fatihh92/virustotaler | virustotal.py | virustotal.py | py | 4,165 | python | en | code | 0 | github-code | 13 |
36296861551 | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(TransactionType)
admin.site.register(TransactionStatus)
@admin.register(Transaction)
class TransactionAdmin(admin.ModelAdmin):
list_display = ("cs", "bl", "transaction_type", "transaction_status", "amount")
| shoora-tech/dheera | transaction/admin.py | admin.py | py | 319 | python | en | code | 0 | github-code | 13 |
39942354931 | #!/usr/bin/env python
from flexbe_core import EventState, Logger
from flexbe_core.proxy import ProxyActionClient
# example import of required action
from o2ac_msgs.msg import FastenAction, FastenGoal
class FastenActionState(EventState):
'''
Actionlib for aligning the bearing holes
-- task_name string Name of the task
-- object_name string Name of the object to be fasten
<= success Fasten completed successfully.
<= error Fasten failed to execute.
'''
def __init__(self, task_name, object_name):
super(FastenActionState, self).__init__(outcomes=['success', 'error'])
self._topic = 'o2ac_flexbe/fasten'
# pass required clients as dict (topic: type)
self._client = ProxyActionClient({self._topic: FastenAction})
self._task_name = task_name
self._object_name = object_name
self._success = False
def execute(self, userdata):
if not self._success:
return 'error'
if self._client.has_result(self._topic):
result = self._client.get_result(self._topic)
Logger.logwarn('result %s' % str(result))
if not result.success:
Logger.logwarn('Fail to complete Fasten')
self._success = False
return 'error'
else:
Logger.logwarn('Succeed! completed Fasten')
self._success = True
return 'success'
def on_enter(self, userdata):
goal = FastenGoal()
goal.task_name = self._task_name
goal.object_name = self._object_name
self._success = True
try:
self._client.send_goal(self._topic, goal)
except Exception as e:
Logger.logwarn('Failed to send the Fasten command:\n%s' % str(e))
self._success = False
def on_exit(self, userdata):
if not self._client.has_result(self._topic):
self._client.cancel(self._topic)
Logger.loginfo('Cancelled active action goal.')
| o2ac/o2ac-ur | catkin_ws/src/o2ac_flexbe/o2ac_flexbe_states/src/o2ac_flexbe_states/fasten.py | fasten.py | py | 2,079 | python | en | code | 92 | github-code | 13 |
24511632609 | import matplotlib.pyplot as plt
import numpy as np
width = 0.1
country = ["USA" , "India" , "Amarica"]
gold=[20,10,6]
silver=[100,50,30]
Bronze=[200,100,60]
bar1 = np.arange(len(country))
bar2 = [i+width for i in bar1]
bar3= [i+width for i in bar2]
plt.bar(bar1,gold,width,color="r")
plt.bar(bar2,silver,width,color="y")
plt.bar(bar3,Bronze,width)
plt.xlabel("Country")
plt.title("No of medals earn by a country")
plt.ylabel("Medals")
plt.xticks(bar1+width,country)
plt.show()
| iamvaibhav31/Machine-Learning | 1. some important library of python used in machine learning/Matplotlib/Multiple_bar_chart(MATPLOTLIB).py | Multiple_bar_chart(MATPLOTLIB).py | py | 503 | python | en | code | 1 | github-code | 13 |
5528342770 | from flask import Flask, render_template, jsonify, redirect, request
from flask_pymongo import PyMongo
from pymongo import MongoClient
from time import gmtime, strftime
import pandas as pd
import json
import sys
import os
from urllib.parse import urlsplit
from bson import BSON
from bson import json_util
from dotenv import load_dotenv
import scrape_glassdoor
app = Flask(__name__)
mongo = PyMongo(app)
MONGODB_URI = os.getenv('MONGODB_URI', 'mongodb://localhost:27017/ADA')
def _get_db_connection(self):
parsed = urlsplit(MONGODB_URI)
db_name = parsed.path[1:]
db = MongoClient(MONGODB_URI)[db_name]
if '@' in MONGODB_URI:
user, password = parsed.netloc.split('@')[0].split(':')
db.authenticate(user, password)
return db
@app.route("/", methods=["GET", "POST"])
def index():
key = request.args.get('key')
value = request.args.get('value')
if key == 'salaryMED':
value = int(value)
elif key == 'location':
value = [float(value[0]), float(value[1])]
value = request.args.get('value')
query = {
"key" : key,
"value" : value
}
glass = mongo.db.glass.find( { key:value } )
if glass == []:
query = None
return render_template("index.html", query=query)
@app.route("/api/mongodb", methods=["GET", "POST"])
def mongodb():
date = request.args.get('date')
if (date != None):
path = 'Resources/glass{}.json'.format(date)
else:
path = 'Resources/glass2018-07-03.json'
glass = mongo.db.glass
with open(path, 'r') as infile:
glassdb = json.load(infile)
for job in glassdb:
glass.insert(job)
return redirect("/", code=302)
@app.route("/api/scrape")
def scraper():
timestamp = strftime("%Y-%m-%d", gmtime())
path = "Resources/glass{}.json".format(timestamp)
glass_data = scrape_glassdoor.scrape()
with open(path, 'w') as outfile:
json.dump(glass_data, outfile)
queryURL = "/api/mongodb?date={}".format(timestamp)
return redirect(queryURL, code=302)
@app.route("/api/query", methods=["GET", "POST"])
def query():
key = request.args.get('key')
value = request.args.get('value')
if key == 'salaryMIN' or key == 'salaryMED' or key == 'salaryMAX':
value = int(value)
elif key == 'lat':
value = float(value)
elif key == 'lng':
value = float(value)
elif key == 'rating':
value = float(value)
if key != None and value != None:
glass = mongo.db.glass.find( { key:value } )
else:
glass = mongo.db.glass.find()
if glass == None:
glass = mongo.db.glass.find()
data =[]
entry={}
for job in glass:
entry = {
'title': job['title'],
'company': job['company'],
'city' : job['city'],
'state': job['state'],
'salaryMIN': job['salaryMIN'],
'salaryMED': job['salaryMED'],
'salaryMAX': job['salaryMAX'],
'rating': job['rating'],
'duration': job['duration'],
'lat': job['lat'],
'lng': job['lng']
}
data.append(entry)
entry={}
return jsonify(data)
@app.route("/process")
def process():
return render_template("process.html")
@app.route("/summary")
def summary():
return render_template("summary.html")
@app.route("/bellchart")
def bellchart():
return render_template("bell.html")
@app.route("/bubbleplot")
def bubbleplot():
return render_template("bubble.html")
@app.route("/geomap")
def geomap():
return render_template("map.html")
@app.route("/datatable")
def datatable():
return render_template("datatable.html")
if __name__ == "__main__":
app.run(debug=True) | jjahn0/ADA | app.py | app.py | py | 3,764 | python | en | code | 1 | github-code | 13 |
15822892265 | from data import Log
from users import user
def new_log(username):
with open('log.txt', 'r+') as reader: # file object for reading and writing
print("Enter New Journal")
content = reader.read() # getting file content into a single object
journal = input() # journal string is being entered by the user
L = Log(journal) # making an object of Log class from data.py file
j = L.get_message() # getting the journal with timestamp
reader.seek(0, 0)
reader.write(username + "$" + j + "\n" + content) # writing the journal to file
print("New Log Successfully Written")
def view_log(username):
try: # handling the exception if no journal was created yet
# Hence no file was created successfully
no_of_journals = 0
with open('log.txt', 'r') as reader: # file object for reading
for i in reader: # printing the journals
s_part = list(i.split("$"))
if s_part[0] == username:
print(s_part[1])
no_of_journals += 1
if no_of_journals == 10:
break
except FileNotFoundError or IndexError:
print("No Journals Yet. Press Y/n to Create New")
want = input()
if want == "y" or want == "Y":
new_log(username)
def main(): # the app runs from here
username = user() # making use of users.py file
stop = "Y"
while stop != "n":
print("Enter V to View Logs \nEnter N for Create New Log")
choice = input() # getting the user choice to view old or create new journals
if choice == "N" or choice == "n":
new_log(username)
elif choice == "V" or choice == "v":
view_log(username)
print("Want to continue (Y/n) ?") # checking if user wants to continue using the app
stop = input()
main() # runner code
| slk007/Journal-App | main.py | main.py | py | 1,943 | python | en | code | 0 | github-code | 13 |
39324835649 | #To Find Errors
try:
file = open("a_file.text")
a_dictionary = {"key": "value"}
print(a_dictionary["key"])
except FileNotFoundError:
file = open("a_file.text", "w")
file.write("something")
except KeyError as error_message:
print(f"the key {error_message} dosen't exist")
else:
content = file.read()
print(content)
finally:
file.close()
#print("File was closed") | mukalaraja/python_bootcamp | day030/day-30.py | day-30.py | py | 385 | python | en | code | 0 | github-code | 13 |
24423077620 | import sgtk
from sgtk.platform.qt import QtCore, QtGui
# import the shotgun_fields module from the qtwidgets framework
shotgun_fields = sgtk.platform.import_framework(
"tk-framework-qtwidgets", "shotgun_fields")
# import the shotgun_globals module from the qtwidgets framework
shotgun_globals = sgtk.platform.import_framework(
"tk-framework-shotgunutils", "shotgun_globals")
# import the shotgun_model module from the qtwidgets framework
shotgun_model = sgtk.platform.import_framework(
"tk-framework-shotgunutils", "shotgun_model")
# import the task_manager module from shotgunutils framework
task_manager = sgtk.platform.import_framework(
"tk-framework-shotgunutils", "task_manager")
# import the shotgun_fields module from the qtwidgets framework
views = sgtk.platform.import_framework(
"tk-framework-qtwidgets", "views")
# ensure our icon resources are imported
from .ui import resources_rc
# importing this will register the class with the fields manager
from .favorite_widget import MyProjectFavoritesWidget
class CustomFieldWidgetDemo(QtGui.QWidget):
"""
Demonstrates how to override one of the default Shotgun field widgets.
"""
def __init__(self, parent=None):
"""
Initialize the demo widget.
"""
# call the base class init
super(CustomFieldWidgetDemo, self).__init__(parent)
# create a background task manager for each of our components to use
self._bg_task_manager = task_manager.BackgroundTaskManager(self)
# the fields manager is used to query which fields are supported
# for display. it can also be used to find out which fields are
# visible to the user and editable by the user. the fields manager
# needs time to initialize itself. once that's done, the widgets can
# begin to be populated.
self._fields_manager = shotgun_fields.ShotgunFieldManager(
self, bg_task_manager=self._bg_task_manager)
self._fields_manager.initialized.connect(self._populate_ui)
self._fields_manager.initialize()
def _populate_ui(self):
"""Populate the ui after the fields manager has been initialized."""
# create a SG model to retrieve our data
self._model = shotgun_model.SimpleShotgunModel(
self, self._bg_task_manager)
# and a table view to display our SG model
table_view = views.ShotgunTableView(self._fields_manager, self)
table_view.horizontalHeader().setStretchLastSection(True)
# the fields to query
fields = [
"image",
"name",
"current_user_favorite",
"sg_description",
]
# load the data into the model
self._model.load_data(
"Project",
fields=fields,
limit=10,
columns=fields,
editable_columns=["current_user_favorite"]
)
# now apply the model to the table view
table_view.setModel(self._model)
table_view.hideColumn(0)
# info label
info_lbl = QtGui.QLabel(
"The table below is showing a list of all <strong>Project</strong> "
"entities for the current SG site with a custom field widget in "
"the <strong>Favorite</strong> column. The default widget is a "
"standard <tt>QtGui.QCheckBox</tt>. Here you'll see a subclass of "
"<tt>QCheckBox</tt> that uses images as the check indicator. This "
"is a simple example of how you can override a field widget for "
"a specific field on a specific entity.<br><br>"
"Double click a cell in the <strong>Favorite</strong> to make the "
"entry editable. Then click on the icon to toggle the favorite "
"value. Note, this is not a live demo. SG will not be updated."
)
info_lbl.setWordWrap(True)
# lay out the widgets
layout = QtGui.QVBoxLayout(self)
layout.setSpacing(20)
layout.addWidget(info_lbl)
layout.addWidget(table_view)
def destroy(self):
"""
Clean up the object when deleted.
"""
self._bg_task_manager.shut_down()
shotgun_globals.unregister_bg_task_manager(self._bg_task_manager)
| ColinKennedy/tk-config-default2-respawn | bundle_cache/app_store/tk-multi-demo/v1.0.2/python/tk_multi_demo/demos/custom_field_widget/demo.py | demo.py | py | 4,306 | python | en | code | 10 | github-code | 13 |
10992276259 | import time
from sys import argv
payload_size = 32
msg_tot = int(float(256))
file_name = 'rank_' + str('TEST') + '.csv'
sent_time = {}
msg_num = 0
delta = 0.0
rank = 1
if rank == 1:
data = bytes(payload_size)
beg = time.time()
while True:
if msg_tot in sent_time:
sent_time[msg_tot][msg_num] = time.time()
else:
sent_time[msg_tot] = {msg_num: time.time()}
msg_num += 1
if msg_tot == msg_num:
break
end = time.time()
delta = end - beg
with open(file_name, 'a') as csv:
# sent_time[msg_tot] = {msg_num: time.time()}
for k1, v1 in sent_time.items():
for k2 in v1.keys():
for v2 in v1.values():
csv.write('%d, %d, %d, %f' % (rank, k1, k2, v2))
csv.write('\n')
| folkpark/MPI_Benchmarking | exp10/exp10_3n/test.py | test.py | py | 836 | python | en | code | 0 | github-code | 13 |
2623831602 | #Una función lambda son funciones anonimas
#Son pequeñas, una linea de codigo
def sumar(a, b):
return a + b
#Con una función lambda, la función es anonima
#No se necesita agregar paréntesis para los parámetros
#No se necesita usar la palabra return, pero sí debe regresar una expresión valida]
mi_funcion_lambda = lambda a, b: a + b
resultado = mi_funcion_lambda(4, 6)
print(resultado)
#Funcion lambda que no recibe argumentos, pero si debemos de regresar una funcion válida
mi_funcion_lambda = lambda : 'Función sibn argumentos'
print(f'Llamar función lambda sin argumentos: {mi_funcion_lambda()}')
#Funcion lambda con parametros por default
mi_funcion_lambda = lambda a = 2, b = 3 : a + b
print(f'Resultado de argumentos por default: {mi_funcion_lambda()}')
#Funcion con argumentos variables k**args
mi_funcion_lambda = lambda *args, **kwargs: len(args) + len(kwargs)
print(f'Resultado de argumentos variables: {mi_funcion_lambda(1, 2, 3, a=5, b=6)}')
#Funciones lambda con argumentos, argumentos variables y valores por default
mi_funcion_lambda = lambda a, b, c=3, *args, **kwargs: a+b+c+len(args)+len(kwargs)
print(f'Resultado de la funcion lambda: {mi_funcion_lambda(1, 2 ,4, 5, 6, 7, e=5, f=7)}')
| Chrisgmsl22/python_course | funciones_lambda.py | funciones_lambda.py | py | 1,261 | python | es | code | 0 | github-code | 13 |
22756216624 |
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView, Request, Response, status
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from kmdb.pagination import CustomPageNumberPagination
from .permission import ReviewPermission, ReviewObjectPermission
from reviews.models import Review
from reviews.serializer import ReviewSerializer
from movies.models import Movie
class ReviewsViews(APIView, CustomPageNumberPagination):
authentication_classes = [TokenAuthentication]
permission_classes = [ReviewPermission]
def get(self, request: Request, movie_id:int) -> Response:
movie = get_object_or_404(Movie, id=movie_id)
reviews = Review.objects.filter(movie_id=movie_id)
result_page = self.paginate_queryset(reviews, request, view=self)
serializer = ReviewSerializer(result_page, many=True)
return self.get_paginated_response(serializer.data)
def post(self, request:Request, movie_id:int) -> Response:
movie = get_object_or_404(Movie, id=movie_id)
reviews = Review.objects.filter(movie_id=movie_id)
for review in reviews:
if review.user.id == request.user.id:
return Response({"detail": "Review already exists."}, status.HTTP_403_FORBIDDEN)
serializer = ReviewSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save(movie = movie, user= request.user )
return Response(serializer.data, status.HTTP_201_CREATED)
class ReviewDetailView(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticatedOrReadOnly, ReviewObjectPermission]
def get(self, request: Request, movie_id:int, review_id:int) -> Response:
review = get_object_or_404(Review, movie_id=movie_id, id=review_id)
serializer = ReviewSerializer(review)
return Response(serializer.data)
def delete(self, request: Request, movie_id:int, review_id:int) -> Response:
review = get_object_or_404(Review, movie_id=movie_id, id=review_id)
self.check_object_permissions(request, review)
review.delete()
return Response(status=status.HTTP_204_NO_CONTENT) | Marc-bd/KMDB-API | reviews/views.py | views.py | py | 2,328 | python | en | code | 0 | github-code | 13 |
40394054165 | # -*- encoding=utf8 -*-
# 场景五:到店点餐点“焦糖奶茶-中杯+1元香草”,“波霸奶茶+价格-1”、下单,退一个“焦糖奶茶-中杯+1元香草”,改为“炸鸡腿(称重)+打包”,菜品上齐结账,和商家协商抹零操作,现金-找零,结账后查看宝报表
__author__ = "lsd"
from airtest.core.api import *
from poco.drivers.android.uiautomation import AndroidUiautomationPoco
stop_app("com.yhbc.tablet")
start_app("com.yhbc.tablet",activity=None)
touch(Template(r"tpl1550659223584.png", record_pos=(0.305, -0.063), resolution=(1366, 768)))
sleep(10)
poco = AndroidUiautomationPoco(use_airtest_input=True, screenshot_each_action=False)
auto_setup(__file__)
# 点餐操作
touch(Template(r"tpl1546074092173.png", record_pos=(-0.339, -0.237), resolution=(1366, 768)))
sleep(1)
touch(Template(r"tpl1546934613612.png", record_pos=(-0.444, -0.143), resolution=(1366, 768)))
sleep(1)
touch(Template(r"tpl1546520822277.png", record_pos=(0.122, 0.01), resolution=(1366, 768)))
touch(Template(r"tpl1546591423210.png", record_pos=(0.152, -0.088), resolution=(1366, 768)))
touch(Template(r"tpl1546520930485.png", record_pos=(0.295, -0.106), resolution=(1366, 768)))
sleep(2)
touch(Template(r"tpl1546499359252.png", record_pos=(-0.1, -0.156), resolution=(1366, 768)))
touch(Template(r"tpl1546499377959.png", record_pos=(-0.239, 0.198), resolution=(1366, 768)))
sleep(2)
# 验证属性是否增加显示
prop=poco("com.yhbc.tablet:id/tv_note").get_text()
assert_equal("+香草",prop,"属性显示=+香草")
touch(Template(r"tpl1546521088095.png", record_pos=(-0.329, 0.107), resolution=(1366, 768)))
touch(Template(r"tpl1546521111524.png", record_pos=(0.294, -0.105), resolution=(1366, 768)))
touch(Template(r"tpl1546589072879.png", record_pos=(0.067, 0.156), resolution=(1366, 768)))
touch(Template(r"tpl1546499377959.png", record_pos=(-0.239, 0.198), resolution=(1366, 768)))
sleep(2)
prop2=poco("com.yhbc.tablet:id/tv_note").get_text()
assert_equal("+价格-1",prop2,"属性显示=价格-1")
# 验证购物车中订单金额是否正确
total_price=poco("com.yhbc.tablet:id/tv_total").get_text()
assert_equal("11.0",total_price,"验证订单总金额是否=11.0")
# 下单
touch(Template(r"tpl1546521705527.png", record_pos=(0.242, -0.184), resolution=(1366, 768)))
sleep(1.0)
touch(Template(r"tpl1546521744139.png", record_pos=(0.302, -0.185), resolution=(1366, 768)))
touch(Template(r"tpl1546523904395.png", record_pos=(0.247, 0.219), resolution=(1366, 768)))
sleep(2)
poco("com.yhbc.tablet:id/lv_order").child("android.widget.LinearLayout")[0].child("com.yhbc.tablet:id/rl").child("com.yhbc.tablet:id/ll").child("com.yhbc.tablet:id/rl_reduce").child("com.yhbc.tablet:id/tv_reduce").click()
touch(Template(r"tpl1546528990614.png", record_pos=(-0.217, -0.078), resolution=(1366, 768)))
touch(Template(r"tpl1546529053064.png", record_pos=(-0.098, -0.038), resolution=(1366, 768)))
touch(Template(r"tpl1546529062659.png", record_pos=(-0.101, 0.085), resolution=(1366, 768)))
touch(Template(r"tpl1546529073893.png", record_pos=(0.0, 0.005), resolution=(1366, 768)))
touch(Template(r"tpl1546529092862.png", record_pos=(-0.002, 0.137), resolution=(1366, 768)))
sleep(2)
touch(Template(r"tpl1546529131908.png", record_pos=(0.289, -0.141), resolution=(1366, 768)))
sleep(2)
poco("com.yhbc.tablet:id/pack").click()
touch(Template(r"tpl1546499377959.png", record_pos=(-0.239, 0.198), resolution=(1366, 768)))
sleep(2)
assert_exists(Template(r"tpl1546521341641.png", record_pos=(0.351, 0.031), resolution=(1366, 768)), "菜单显示打包")
# 验证购物车中订单金额是否正确
total_price2=poco("com.yhbc.tablet:id/tv_total").get_text()
assert_equal("8.5",total_price2,"验证订单总金额是否=8.5")
# 下单
touch(Template(r"tpl1546521705527.png", record_pos=(0.242, -0.184), resolution=(1366, 768)))
sleep(1.0)
touch(Template(r"tpl1546521744139.png", record_pos=(0.302, -0.185), resolution=(1366, 768)))
sleep(1.0)
touch(Template(r"tpl1546589873332.png", record_pos=(0.413, 0.217), resolution=(1366, 768)))
sleep(2.0)
# 收银台
poco("com.yhbc.tablet:id/iv_erase").click()
sleep(1.0)
poco(text="抹零小数").click()
# touch(Template(r"tpl1546589944922.png", record_pos=(-0.356, 0.018), resolution=(1366, 768)))
sleep(1.0)
#获取收银台应收金额
yingshou=poco("com.yhbc.tablet:id/tv_realprice").get_text()
assert_equal("8.0",yingshou,"验证收银台应收金额=8.0")
#获取收银台抹零金额
moling=poco("com.yhbc.tablet:id/et_bonus").get_text()
assert_equal("0.5",moling,"验证收银台抹零金额=0.5")
touch(Template(r"tpl1546590391549.png", record_pos=(0.177, -0.165), resolution=(1366, 768)))
touch(Template(r"tpl1546590409925.png", record_pos=(-0.043, 0.121), resolution=(1366, 768)))
touch(Template(r"tpl1546590420205.png", record_pos=(0.387, 0.031), resolution=(1366, 768)))
#获取收银台找零金额
zhaoling=poco("com.yhbc.tablet:id/tv_back_maney").get_text()
assert_equal("42.0",zhaoling,"验证收银台找零金额=42.0")
touch(Template(r"tpl1546590427751.png", record_pos=(0.316, 0.12), resolution=(1366, 768)))
sleep(3.0)
# 查看订单
touch(Template(r"tpl1546072136278.png", record_pos=(0.161, -0.236), resolution=(1366, 768)))
sleep(2.0)
# 验证已结账订单金额是否正确
order_total_price=poco("android.widget.LinearLayout").offspring("android:id/content").offspring("com.yhbc.tablet:id/viewpager").child("android.widget.LinearLayout").offspring("com.yhbc.tablet:id/order_listview").offspring("com.yhbc.tablet:id/tv_total_price").get_text()
print("order_total_price="+order_total_price)
# 打包盒、餐具费不参与打折
assert_equal("8.0",order_total_price," 订单详情-验证金额总价是否=8.0")
# 验证已结账订单支付方式
pay_mode=poco("com.yhbc.tablet:id/order_listview").child("android.widget.LinearLayout")[0].child("com.yhbc.tablet:id/ll_item").child("com.yhbc.tablet:id/ll_click_pay_model").child("com.yhbc.tablet:id/tv_pay_model").get_text()
assert_equal("人工现金",pay_mode," 订单详情-验证支付方式=人工现金")
| gaojk/AirtestCase | AirtestCase/SmartPOS-Old/用例集/场景五-现金-找零.air/场景五-现金-找零.py | 场景五-现金-找零.py | py | 6,190 | python | en | code | 0 | github-code | 13 |
27460840925 | from PIL import Image
from math import ceil, sqrt
class GridCanvasPainter:
def __init__(self, images, layout=None, grid_shape=None, gap_shape=None, force_list=False, bg_color=None):
"""
painter for a new canvas with images placed on it as grids
:param images: iterable, sequence of PIL images
:param layout: None, int, or tuple, columns by rows of thumbnails to display on canvas,
if None, automatically uses ceil(sqrt(n_images)) as n_cols, and floor(sqrt(n_images)) as n_rows
if int, uses `layout` as both n_cols and n_rows
if tuple, the first integer is construed (n_cols, n_rows),
if either element in tuple is None, automatically computes the n_cols or n_rows
note that automatic computation result in listing `images` sequence in MEMORY, which can be inefficient
:param grid_shape: None, int, or tuple, number of pixels in width and height of each thumbnail
if None: uses default = 32 * 32
if int: construed as both width and height for thumbnails
if tuple: construed as (width, height) for thumbnails
:param gap_shape: None, int, or tuple, shape of gaps in between thumbnails
if None: no gap is used, thumbnails are placed right next to each other
if int: construed as both width and height for gaps
if tuple: construed as (width, height) for gaps
:param force_list: bool, if set to True, `images` will be converted to list, default=False
:param bg_color: int, background color for new canvas, default=None
"""
if force_list:
self.images = list(images)
elif layout is None:
self.images = list(images)
elif hasattr(layout, "__contains__") and None in layout:
self.images = list(images)
else:
self.images = images
# call setters
self.layout = layout
self.grid_shape = grid_shape
self.margin_shape = gap_shape
if bg_color is None:
self.bg_color = 0
else:
self.bg_color = bg_color
@property
def layout(self):
return self._layout
@layout.setter
def layout(self, value):
if value is None:
n_cols = ceil(sqrt(len(self.images)))
n_rows = ceil(len(self.images) / n_cols)
self._layout = (n_cols, n_rows)
elif hasattr(value, "__len__"):
try:
n_cols = value[0]
n_rows = value[1]
except IndexError as e:
print(e)
raise IndexError("Illegal layout shape")
if n_cols is None and n_rows is None:
# raise ValueError("n_cols and n_rows cannot both be None. Check layout, got {}".format(value))
self.layout = None
return
if n_cols is None:
n_cols = ceil(len(self.images) / n_rows)
if n_rows is None:
n_rows = ceil(len(self.images) / n_cols)
self._layout = (n_cols, n_rows)
else:
self._layout = (value, value)
@property
def margin_shape(self):
return self._margin_shape
@margin_shape.setter
def margin_shape(self, value):
if value is None:
self._margin_shape = (0, 0)
elif hasattr(value, "__len__"):
try:
self._margin_shape = tuple(value[:2])
except IndexError as e:
print(e)
raise IndexError("Illegal margin shape")
else:
self._margin_shape = (value, value)
@property
def grid_shape(self):
return self._grid_shape
@grid_shape.setter
def grid_shape(self, value):
if value is None:
self._grid_shape = (64, 64)
elif hasattr(value, "__len__"):
try:
self._grid_shape = tuple(value[:2])
except IndexError as e:
print(e)
raise IndexError("Illegal grid shape")
else:
self._grid_shape = (value, value)
def update_canvas(self):
width = self.layout[0] * (self.grid_shape[0] + self.margin_shape[0]) - self.margin_shape[0]
heigth = self.layout[1] * (self.grid_shape[1] + self.margin_shape[1]) - self.margin_shape[1]
image_iterator = iter(self.images)
self._canvas = Image.new("RGB", (width, heigth), color=self.bg_color)
try:
for j in range(0, heigth, self.grid_shape[1] + self.margin_shape[1]):
for i in range(0, width, self.grid_shape[0] + self.margin_shape[0]):
self._canvas.paste(next(image_iterator).resize((self.grid_shape[0], self.grid_shape[1])), (i, j))
except StopIteration:
pass
# TODO consider force updating
@property
def canvas(self):
if not hasattr(self, "_canvas"):
self.update_canvas()
return self._canvas
def paint_grid_canvas(images, layout=None, grid_shape=None, gap_shape=None, force_list=None, bg_color=None):
"""
:param images: iterable, sequence of PIL images
:param layout: None, int, or tuple, columns by rows of thumbnails to display on canvas,
if None, automatically uses ceil(sqrt(n_images)) as n_cols, and floor(sqrt(n_images)) as n_rows
if int, uses `layout` as both n_cols and n_rows
if tuple, the first integer is construed (n_cols, n_rows),
if either element in tuple is None, automatically computes the n_cols or n_rows
note that automatic computation result in listing `images` sequence in MEMORY, which can be inefficient
:param grid_shape: None, int, or tuple, number of pixels in width and height of each thumbnail
if None: uses default = 32 * 32
if int: construed as both width and height for thumbnails
if tuple: construed as (width, height) for thumbnails
:param gap_shape: None, int, or tuple, shape of gaps in between thumbnails
if None: no gap is used, thumbnails are placed right next to each other
if int: construed as both width and height for gaps
if tuple: construed as (width, height) for gaps
:param force_list: bool, if set to True, `images` will be converted to list, default=False
:param bg_color: int, background color for new canvas, default=None
:return: a new canvas with images placed on it as grids
"""
return GridCanvasPainter(
images,
layout=layout,
grid_shape=grid_shape,
gap_shape=gap_shape,
force_list=force_list,
bg_color=bg_color
).canvas
| shuheng-liu/misc-tools-python | image_utils/_grid_canvas.py | _grid_canvas.py | py | 6,698 | python | en | code | 0 | github-code | 13 |
16006202120 | import gym
import os
import torch
import tqdm
import numpy as np
from mani_skill2.utils.wrappers import RecordEpisode
from tools.utils import animate
from mani_skill2.utils.sapien_utils import get_entity_by_name, look_at
from copy import deepcopy
import multiprocessing as mp
import PIL.Image as im
mp.set_start_method("spawn")
def sample_unit_ball_north():
xyz = np.random.randn(3)
if xyz[2] < 0:
xyz *= -1
return xyz / np.linalg.norm(xyz)
def sample_unit_ball_north_():
xyz = np.random.randn(3)
if xyz[2] < 0:
xyz[2] *= -1
if xyz[0] > 0:
xyz[0] *= -1
return xyz / np.linalg.norm(xyz)
def env_render(env, pose=None):
# scene_center = np.array([-0.2,0.,0.2])
# radius = 1.0
scene_center = np.array([0.,0.,0.3])
# scene_center = np.array([-0.3,0.,0.3])
scene_center = np.array([-0.6,0.,0.3])
radius = 2.5
# radius = 1.6
# radius = 1.0
# radius = 2.0
# radius = 1.0
# pose = look_at(radius*sample_unit_ball_north()+scene_center, scene_center)
if pose is None:
pose = look_at(radius*sample_unit_ball_north_()+scene_center, scene_center)
env.render_camera.set_local_pose(pose)
pose_new = pose.to_transformation_matrix()
r = pose_new[:3,:3] @np.array([[0,-1,0],[0,0,1],[-1,0,0]]).T
pose_new[:3,:3] = r
pose_ori = deepcopy(env.render_camera.get_model_matrix())
return pose, pose_new
def render_traj(env_id, enable_kuafu, env_kwargs, states, poses=None):
env = gym.make(env_id, enable_kuafu=enable_kuafu, **env_kwargs)
output = {'image': [], 'pose': [], 'depth': [], 'actor_seg': [], 'camera_pose': []}
env.reset()
env_render(env)
idx = 0
for qpos, state in tqdm.tqdm(states, total=len(states)):
env.step(env.action_space.sample())
env.cabinet.set_qpos(qpos)
env.agent.set_state(state)
if poses is None:
pose, camera_pose = env_render(env)
output['pose'].append(pose)
output['camera_pose'].append(camera_pose)
output['image'].append(env.render('rgb_array'))
else:
env_render(env, poses[idx])
if not enable_kuafu:
img = env.render('rgb_array')
env.update_render()
env.render_camera.take_picture()
ret = env.unwrapped._get_camera_images(
env.render_camera, rgb=False, depth=True, visual_seg=True, actor_seg=True
)
output['depth'].append(ret['depth'])
output['actor_seg'].append(ret['actor_seg'].astype(np.uint8)[..., 0])
idx += 1
output['intrinsic'] = env.render_camera.get_intrinsic_matrix()
return output
def plot(traj, output_path):
if 'pose' in traj and len(traj['pose']) > 0:
animate(traj['image'], os.path.join(output_path, 'output.mp4'))
for idx, img in enumerate(traj['image']):
alpha = ((img == 170).sum(axis=2) != 3).astype(np.uint8) * 255
img = np.concatenate((img, alpha[:,:,None]), axis=2)
img_name = f"0_{idx:03d}.png"
im.fromarray(img).save(os.path.join(output_path, img_name))
torch.save(traj['pose'], os.path.join(output_path, 'pose.pth'))
np.savetxt(os.path.join(output_path, f'0_{idx:03d}.txt'), traj['camera_pose'][idx])
np.savetxt(os.path.join(output_path, f'0_intrinsic.txt'), traj['intrinsic'])
else:
for idx in range(len(traj['depth'])):
img_name = f"0_{idx:03d}_depth.npy"
np.save(os.path.join(str(output_path), img_name), traj['depth'][idx])
img_name = f"0_{idx:03d}_seg.png"
im.fromarray(traj['actor_seg'][idx]).save(os.path.join(str(output_path), img_name))
img_name = f"0_{idx:03d}_seg_c.png"
import matplotlib.pyplot as plt
colors = np.array(plt.get_cmap('tab20').colors)
colors = colors[traj['actor_seg'][idx] % 20]
im.fromarray((colors * 255).astype(np.uint8)).save(os.path.join(str(output_path), img_name))
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env_id', type=str, default='door')
parser.add_argument('--kuafu', action='store_true')
args = parser.parse_args()
if args.env_id == 'door':
traj_path = 'demos/rigid_body_envs/OpenCabinetDoor-v1/1018/link_1/'
env_id = 'OpenCabinetDoor-v1'
pid = '1018'
else:
traj_path = 'demos/rigid_body_envs/OpenCabinetDrawer-v1/1004/link_0/'
pid = '1004'
env_id = 'OpenCabinetDrawer-v1'
env_kwargs = {'reward_mode': 'sparse', 'obs_mode': 'state', 'model_ids': [pid], 'fixed_target_link_idx': 1}
old_state = torch.load(traj_path + 'states')
new_state = []
n_interpolate = 5
for i in range(len(old_state) - 1):
q1, s1 = old_state[i]
q2, s2 = old_state[i+1]
for j in range(n_interpolate):
q = q1 + (q2 - q1) * j / n_interpolate
for k, v in s1.items():
print(k, type(v))
s = {k: v + (s2[k] - v) * j / n_interpolate for k, v in s1.items() if k != 'robot_root_pose' and k!= 'controller'}
s['robot_root_pose'] = s1['robot_root_pose']
s['controller'] = s1['controller']
new_state.append([q, s])
new_state.append(old_state[-1])
state = new_state #[:10]
if args.kuafu:
traj = render_traj(
env_id, True, env_kwargs, state,
)
else:
traj = render_traj(
env_id, False, env_kwargs, state, torch.load(traj_path + 'pose.pth')
)
plot(traj, traj_path) | haosulab/RPG | external/ManiSkill2/tools/replay.py | replay.py | py | 5,499 | python | en | code | 18 | github-code | 13 |
1664590867 | import turtle
import random
screen = turtle.Screen()
turtlemain = turtle.Turtle()
(x_pos, y_pos) = screen.screensize()
xpos = x_pos / 2
ypos = y_pos / 2
(turtle_xpos, turtle_ypos) = turtlemain.pos()
turtlex = abs(turtle_xpos)
turtley = abs(turtle_ypos)
turtlemain.penup()
turtlemain.goto(0,0)
def flipcoin():
return(random.choice(["Heads","Tails"]))
while turtlex <= xpos and turtley <= ypos:
flipcoin()
if flipcoin() == "Heads":
turtlemain.left(90)
turtlemain.forward(50)
elif flipcoin() == "Tails":
turtlemain.right(90)
turtlemain.forward(50)
screen.exitonclick() | bucs110SPRING23/portfolio-regina-lu | ch04/exercises/coinflip.py | coinflip.py | py | 618 | python | en | code | 0 | github-code | 13 |
72124701138 | from django.shortcuts import render, Http404,redirect, get_object_or_404
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.core import serializers
import datetime
from .models import Country
from .forms import NameForm, ContactForm, CountryForm
# Create your views here.
def hello(request):
print(User.objects.all());
return HttpResponse('hey wassup!');
def create_update_country(request, id = None):
if (request.method == 'GET'):
if id != None:
country = get_object_or_404(Country, pk = id)
f = CountryForm(instance = country);
else:
f = CountryForm();
return render(request, 'account/createcountry.html', { 'form': f, 'id' : id });
elif (request.method == 'POST'):
if id!=None:
country = get_object_or_404(Country, pk = id)
f = CountryForm(request.POST, instance = country);
else:
f= CountryForm(request.POST)
print(f)
if (f.is_valid()):
country = f.save();
return redirect('country-edit', id = country.id)
else:
return render(request, 'account/createcountry.html', { 'form': f, 'id' : id });
def sendmessage(request):
if request.method == 'GET':
f = ContactForm();
return render(request, 'account/sendmessage.html',{'nForm' : f})
elif request.method == 'POST':
f = ContactForm(request.POST);
if (f.is_valid()):
return redirect('home', id = 2)
else:
return render(request, 'account/sendmessage.html', {'nForm' : f})
else:
raise Http404('Method Not Supported!')
def newhome(request, id):
try:
user = User.objects.get(pk = id)
except User.DoesNotExist:
raise Http404("User doesn't exitst")
f = NameForm();
return render(request, 'base/index.html',{'u' : user, 'nForm' : f})
def current_datetime(request):
now = datetime.datetime.now()
html = "<html><body>It is now %s.</body></html>" % now
return HttpResponse(html)
def getallusers(request):
data = serializers.serialize('json', User.objects.all())
return HttpResponse(data, content_type = 'application/json')
def follow(request):
id1 = request.GET['id1'];
id2 = request.GET['id2'];
try:
follower = Profile.objects.get(pk = id1)
except:
follower = Profile.objects.create(account = User.objects.get(pk = id1), city = City.objects.get(id = 1))
follower.following.add(User.objects.get(id = id2));
| SahibSethi/mynewinstagram | mynewinstagram/account/views.py | views.py | py | 2,588 | python | en | code | 0 | github-code | 13 |
16470983201 | import json
from django.http import JsonResponse
from .models import FoodData, IntakeData, User, WaterConsumption
from .serializers import FoodDataSerializer, UserSerializer, IntakeDataSerializer, WaterCountSerializer
from django.views.decorators.csrf import csrf_exempt
def food_search(request, query):
foods = FoodData.objects.filter(item__icontains=query)
sd = FoodDataSerializer(foods, many=True)
return JsonResponse({"data": sd.data})
@csrf_exempt
def user_signin(request):
data = json.loads(request.body)
email = data['data']['email']
name = data['data']['name']
user = User.objects.filter(email=email).exists()
if(not user):
user = User(email = email, name = name)
user.save()
return JsonResponse({"message":"user signed in"})
def user_detail(request, email):
user = User.objects.get(email=email)
sd = UserSerializer(user)
return JsonResponse({"user":sd.data})
@csrf_exempt
def add_intake(request):
data = json.loads(request.body)['data']
date = data.get('date')
time = data.get('time')
email = data.get('email')
item = data.get('item')
quantity = data.get('quantity')
calorie = data.get('calorie')
protein = data.get('proteins')
carb = data.get('carbs')
fat = data.get('fats')
fiber = data.get('fiber')
obj = IntakeData(date=date, email=email,time=time , item=item, quantity=quantity, calorie=calorie, proteins=protein, carbs=carb, fats=fat, fiber=fiber)
obj.save()
return JsonResponse({"message":"Data Added"})
@csrf_exempt
def update_intake(request, id):
data = json.loads(request.body)
item = data.get('item')
quantity = data.get('quantity')
calorie = data.get('calorie')
obj = IntakeData.objects.filter(id=id)
obj.update(item=item, quantity=quantity, calorie=calorie)
return JsonResponse({"message":"Data Updated"})
def get_data(request):
data = json.loads(request.body)
date = data.get('date')
email = data.get('email')
obj = IntakeData.objects.filter(email=email, date=date)
sd = IntakeDataSerializer(obj, many=True)
return JsonResponse({"data":sd.data})
@csrf_exempt
def user_update(request, email):
data = json.loads(request.body)
data = data['data']
user = User.objects.filter(email=email)
user.update(gender=data['gender'], height=data['height'], weight=data['weight'], age=data['age'], activity=data['activity'])
return JsonResponse({"message":"user updated"})
def get_main_data(request, email,date):
obj = IntakeData.objects.filter(email=email, date=date)
sd = IntakeDataSerializer(obj, many=True)
return JsonResponse({"message":sd.data})
def get_time_data(request, email,date, time):
obj = IntakeData.objects.filter(email=email, date=date, time=time)
sd = IntakeDataSerializer(obj, many=True)
return JsonResponse({"data":sd.data})
@csrf_exempt
def delete_item(request, id):
obj = IntakeData.objects.filter(id=id)
obj.delete()
return JsonResponse({"message":"Removed"})
def inc_count(request, email, date):
obj = WaterConsumption.objects.filter(email=email, date=date).exists()
if(not obj):
ob = WaterConsumption(email = email, date=date, counts = 1)
ob.save()
else:
ob = WaterConsumption.objects.get(email=email, date=date)
sd = WaterCountSerializer(ob)
curr_count = sd.data['counts']
ob.counts = curr_count+1
ob.save()
return JsonResponse({"message":"updated"})
def dec_count(request, email, date):
ob = WaterConsumption.objects.get(email=email, date=date)
sd = WaterCountSerializer(ob)
curr_count = sd.data['counts']
ob.counts = curr_count-1
ob.save()
return JsonResponse({"message":"updated"})
def get_count(request, email, date):
count = 0
try:
ob = WaterConsumption.objects.get(email=email, date=date)
sd = WaterCountSerializer(ob)
count = sd.data['counts']
except:
count = 0
return JsonResponse({"message":count}) | ankitsawho/calorie-tracker | server/api/views.py | views.py | py | 4,030 | python | en | code | 0 | github-code | 13 |
23158323008 | import requests as r
import json
import tryagain
import time
import config as cfg
tg_url = 'https://api.telegram.org/bot' + cfg.tg_token + '/sendmessage'
tunnels_old = ''
def checker():
global tunnels_old
iteration = 0
tunnels = (r.get('https://api.ngrok.com/tunnels',
headers={'Authorization': 'Bearer ' + cfg.ngrok_token,
'Ngrok-Version': '2', 'Cache-Control': 'no-cache'})).text
if tunnels != tunnels_old:
if 'public_url' in tunnels:
tunnels_old = tunnels
tunnels_json = json.loads(tunnels)['tunnels']
tunnels_list = [item.get('public_url') for item in tunnels_json]
while iteration < len(tunnels_list):
resp = r.get('https://mcstatus.snowdev.com.br/api/query/v3/' + tunnels_list[iteration][6::], timeout=15)
if 'online' in resp.text:
url_new = tunnels_list[iteration][6::]
r.post(tg_url, data={'chat_id': cfg.chat_id, 'text': url_new}, timeout=10)
break
else:
iteration += 1
else:
r.post(tg_url, data={'chat_id': cfg.chat_id, 'text': 'It seems like my server is down. Please wait.'},
timeout=10)
time.sleep(300)
checker()
try:
checker()
except Exception as e:
print(e)
time.sleep(300)
tryagain.call(checker)
| ZemlyakovDmitry/Ngrok-URL-Notifier-Minecraft | main.py | main.py | py | 1,473 | python | en | code | 0 | github-code | 13 |
43246427942 | import forecastio
from datetime import datetime, timedelta
from flask import current_app
class Forecast:
def __init__(self, key, lat, lon, units, refresh):
self.forecast = None
self.refresh = refresh
self.key = key
self.lat = lat
self.lon = lon
self.units = units
def get_forecast(self):
if not self.forecast == None:
time = self.forecast.currently().time + timedelta(hours=self.forecast.offset())
if time < datetime.now() - timedelta(seconds=self.refresh):
self.forecast = None
if self.forecast == None:
current_app.logger.debug('Updating forecast from DarkSky...')
self.forecast = forecastio.load_forecast(self.key, self.lat, self.lon,
units=self.units)
return self.forecast
| leoscholl/wallberry | wallberry/cache.py | cache.py | py | 842 | python | en | code | 1 | github-code | 13 |
19092503687 | import argparse
import gzip
import random
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--file1', required=True, type=str,
metavar='<path>', help='path to _true_ sequence file')
parser.add_argument('--file2', required=True, type=str,
metavar='<path>', help='path to _fake_ sequence file')
parser.add_argument('--count1', required=True, type=int,
metavar='<int>', help='length of sequence')
parser.add_argument('--count2', required=True, type=int,
metavar='<int>', help='length of sequence')
parser.add_argument('--offset', required=True, type=int,
metavar='<int>', help='left-hand offset')
parser.add_argument('--length', required=True, type=int,
metavar='<int>', help='length of sequence')
parser.add_argument('--fixseed', required=False, action='store_true',
help='length of sequence')
arg = parser.parse_args()
def readseq(path, o, l):
seqs = []
with gzip.open(path, 'rt') as fp:
for line in fp.readlines():
seq = []
for i in range(o, o+l):
seq.append(line[i])
nt = line[i]
seqs.append(seq)
random.shuffle(seqs)
return seqs
def seq2hot(seqs, label, n):
output = []
for s in seqs:
val = []
for nt in s:
if nt == 'A': val.append('1,0,0,0')
elif nt == 'C': val.append('0,1,0,0')
elif nt == 'G': val.append('0,0,1,0')
else: val.append('0,0,0,1')
val.append(label)
output.append(','.join(val))
if len(output) == n:
return output
if __name__ == '__main__':
if arg.fixseed: random.seed(1)
true = readseq(arg.file1, arg.offset, arg.length)
fake = readseq(arg.file2, arg.offset, arg.length)
assert(len(true) >= arg.count1)
assert(len(fake) >= arg.count2)
t = seq2hot(true, 't', arg.count1)
f = seq2hot(fake, 'f', arg.count2)
all = t + f
random.shuffle(all)
for line in all:
print(line)
| KorfLab/genDL | arch/data/seq2csv.py | seq2csv.py | py | 1,791 | python | en | code | 1 | github-code | 13 |
12344248635 | # program for computing the diameter of the tree.
# Now, diameter is the maximum of the distance between two leaves.
# So, we have already seen other solutions for the problem and also observed how we are processing the nodes again and again once while computing the
# height and again while computing the diameter of individual node and then getting the max, thus making complexity 0(N^2).
# We have also seen bottom up approach in 0(N).
# Now, we will understand the use of DP in trees as we can see we can store the height of nodes and then use it to compute diameter of all the subtrees
# left and right and get the max of it without computing the height again for other subtrees.
# This will also be the general syntax for DP on trees.
# TIME : 0(N)
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
# DP using postorder traversal
def diameter(root, res):
if root == None:
return 0
left = diameter(root.left, res)
right = diameter(root.right, res)
temp = 1 + max(left, right) # this case is when root is not part of answer and it will be passed onto above parent who called it
ans = max(temp, 1 + left + right) # this will be when root is part of answer
res[0] = max(temp, ans) # we have to take max of both possible above cases
return temp
# res is passed as reference and will contain the final answer
def compute_diameter(root):
res = [-sys.maxsize-1]
diameter(root, res)
return res[0]
import sys
# driver test function
if __name__ == '__main__':
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
print(compute_diameter(root))
| souravs17031999/100dayscodingchallenge | dynamic programming/diameter_tree_dp_general_syntax.py | diameter_tree_dp_general_syntax.py | py | 1,748 | python | en | code | 43 | github-code | 13 |
1435857667 | class Solution(object):
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
def mycmp(a, b):
if a[0] == b[0]:
return a[1] - b[1]
else:
return b[0] - a[0]
people.sort(mycmp)
#print people
res = []
for i in people:
res.insert(i[1], i)
return res | hagho/leetcode_py | 406_Queue_Reconstruction_by_Height[M].py | 406_Queue_Reconstruction_by_Height[M].py | py | 457 | python | en | code | 0 | github-code | 13 |
21656835054 | import sys, os
fin = open(sys.argv[1],'r')
os.system("rm -f sched_plot_files/*")
os.system("rm -f sched_plot_files/selected/*")
processLogFiles = {}
for line in fin:
line = line.strip().split()
process = line[0]
process = process.replace('/','-')
timestamp = line[3][:-1]
if process not in processLogFiles:
processLogFiles[process] = open('sched_plot_files/'+process, 'w')
processLogFiles[process].write(timestamp+'\n')
| yuvraj2987/ResourceAccounting | python_module/graphScripts/backup/sched_1.py | sched_1.py | py | 482 | python | en | code | 2 | github-code | 13 |
32687911732 | import os
import logging
from distutils.util import strtobool
from typing import Dict, Any
from gamekithelpers import ddb
from gamekithelpers.handler_request import get_player_id, get_path_param, get_query_string_param, log_event
from gamekithelpers.handler_response import response_envelope
from gamekithelpers.validation import is_valid_primary_identifier
DEFAULT_CONSISTENT_READ = 'True'
EMPTY_RESPONSE = {}
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
"""
Retrieve metadata for the player's specified save slot, or an empty dictionary if no metadata is found.
Parameters:
Request Context:
custom:gk_user_id: str
The player_id to get the slot metadata for. This value comes from the Cognito Authorizer that validates the
API Gateway request.
Path Parameters:
slot_name: str
The save slot to get the slot metadata for.
Limited to 512 characters long, using alphanumeric characters, dashes (-), underscores (_), and periods (.).
This lambda will return an error if a malformed slot name is provided.
Query String Parameters:
consistent_read: bool
Whether to use "Consistent Read" when querying DynamoDB.
[Optional, defaults to True (DEFAULT_CONSISTENT_READ).]
Errors:
400 Bad Request - Returned when a malformed 'slot_name' path parameter is provided.
401 Unauthorized - Returned when the 'custom:gk_user_id' parameter is missing from the request context.
"""
log_event(event)
# Get player_id from requestContext:
player_id = get_player_id(event)
if player_id is None:
return response_envelope(status_code=401)
# Get path param inputs:
slot_name = get_path_param(event, 'slot_name')
if not is_valid_primary_identifier(slot_name):
logger.error(f'Malformed slot_name: {slot_name} provided for player_id: {player_id}')
return response_envelope(status_code=400)
# Get query param inputs:
consistent_read = bool(strtobool(get_query_string_param(event, 'consistent_read', DEFAULT_CONSISTENT_READ)))
# Get metadata from DynamoDB:
metadata = get_slot_metadata(player_id, slot_name, consistent_read)
# Construct response object:
return response_envelope(
status_code=200,
response_obj=metadata
)
def get_slot_metadata(player_id: str, slot_name: str, consistent_read: bool) -> Dict[str, Any]:
"""Get metadata for the named save slot, or an empty dictionary if no metadata is found."""
gamesaves_table = ddb.get_table(table_name=os.environ.get('GAMESAVES_TABLE_NAME'))
response = gamesaves_table.get_item(
Key={
'player_id': player_id,
'slot_name': slot_name,
},
ConsistentRead=consistent_read,
)
return response.get('Item', EMPTY_RESPONSE)
| aws/aws-gamekit-unreal | AwsGameKit/Resources/cloudResources/functions/gamesaving/GetSlotMetadata/index.py | index.py | py | 2,921 | python | en | code | 68 | github-code | 13 |
22115937253 | import unittest
import mock
from scotty.config import ScottyConfig
from scotty.core.exceptions import ScottyException
class ScottyConfigTest(unittest.TestCase):
def test_scotty_config_constructor(self):
scotty_config = ScottyConfig()
self.assertIsNotNone(scotty_config)
@mock.patch('scotty.config.ScottyConfig._find_base_dir')
def test_config_log_fields(self, base_dir_mock):
base_dir_mock.return_value = 'samples/etc/'
scotty_config = ScottyConfig()
log_dir = scotty_config.get('logging', 'log_dir')
self.assertEquals(log_dir, '../log')
log_file = scotty_config.get('logging', 'log_file')
self.assertEquals(log_file, 'scotty.log')
log_format = scotty_config.get('logging', 'log_format')
self.assertEquals(log_format,
'%(asctime)s - %(levelname)s:%(name)s: %(message)s')
log_level = scotty_config.get('logging', 'log_level')
self.assertEquals(log_level, 'debug')
self._assert_num_options(scotty_config, 'logging', 4)
def test_scotty_config_gerrit_fields(self):
scotty_config = ScottyConfig()
host = scotty_config.get('gerrit', 'host')
self.assertEquals(host, 'https://gerrit')
self._assert_num_options(scotty_config, 'gerrit', 1)
def test_config_osmod_fields(self):
scotty_config = ScottyConfig()
endpoint = scotty_config.get('osmod', 'endpoint')
self.assertEquals(endpoint,
'https://api.liberty.mikelangelo.gwdg.de:8020')
username = scotty_config.get('osmod', 'username')
self.assertEquals(username, 'us3r')
password = scotty_config.get('osmod', 'password')
self.assertEquals(password, 'p4ss')
self._assert_num_options(scotty_config, 'osmod', 3)
def _assert_num_options(self, scotty_config, section, num_options):
options = scotty_config._config.options(section)
self.assertEquals(len(options), num_options)
@mock.patch('os.path.isfile')
def test_find_base_dir(self, isfile_mock):
isfile_mock.return_value = True
scotty_config = ScottyConfig()
base_dir = scotty_config._find_base_dir()
self.assertEquals(base_dir, '/etc/scotty/')
@mock.patch('os.path.isfile')
def test_no_scotty_config_file(self, isfile_mock):
isfile_mock.return_value = False
with self.assertRaises(ScottyException):
ScottyConfig()
| mikelangelo-project/scotty.py | tests/test_config.py | test_config.py | py | 2,479 | python | en | code | 0 | github-code | 13 |
72395622738 | import numpy as np
import time
def backtracking_line_search(func, x_k, p_k, g_x_k, alpha, rho, c1, f_x_k, norm_g_x_k):
number_function_call_bls = 0
nk = 1
#checking the gradient of the function
if norm_g_x_k >= 1:
nk = 1
else:
nk = 1 - np.floor(np.log(norm_g_x_k))
su = 0
nk1 = nk.astype(np.int64)
for i0 in range(nk1):
f_x_kp1 = func.evaluate_function(x_k + p_k)
su = su + f_x_kp1
i0 += 1
f_x_kplus1 = su / nk
number_function_call_bls = number_function_call_bls + nk
#print(f'alpha before = {alpha}')
while (f_x_kplus1 > f_x_k + c1 * alpha * np.dot(g_x_k, p_k)):
alpha = rho * alpha
su1 = 0
for i0 in range(nk):
f_x_kp1 = func.evaluate_function(x_k + alpha*p_k)
su1 = su1 + f_x_kp1
f_x_kplus1 = su1/nk
#print(f_x_kplus1)
number_function_call_bls = number_function_call_bls + nk
#print(f'rho = {rho}')
time.sleep(0.5)
#print(alpha)
f_x_k = f_x_kplus1
print(f_x_k)
return alpha, number_function_call_bls, f_x_k
def BFGSVK_1(func, x_k, max_iteration = 5000, abs_tol = 10**(-5), line_search = False, alpha = 1, rho = 0.95, c1 = 0.01):
# choosing an initial approximat hessian B_0
v_k = np.identity(len(x_k))
s_k = np.ones(len(x_k))
y_k = np.ones(len(x_k))
x_1 = []
x_2 = []
x_1.append(x_k[0])
x_2.append(x_k[1])
number_function_call = 0
f_x_k = func.evaluate_function(x_k)
# loop
norm_g_x_k_matrix = []
for n in range(max_iteration):
# computing gradient G(x_k) = grad of f(x_k)
g_x_k = func.gradient_function(x_k)
norm_g_x_k = np.linalg.norm(g_x_k)
norm_g_x_k_matrix = np.append(norm_g_x_k_matrix, norm_g_x_k)
# stoping criteria mod of G(x_k) < e
if np.linalg.norm(g_x_k) < abs_tol:
break
# computing the search direction p_k from B(x_k) * p_k = - G(x_k)
p_k = np.matmul(v_k, (-1) * g_x_k)
# computing the step size alpha
if line_search:
#Backtracking line search
alpha = 1
alpha_ = backtracking_line_search(func, x_k, p_k, g_x_k, alpha, rho, c1, f_x_k, norm_g_x_k)
alpha = alpha_[0]
number_function_call += alpha_[1]
f_x_k = alpha_[2]
else:
alpha = 1
# update the design variables x_k = x_k + alpha * p_k
x_k = x_k + alpha * p_k
# updating s_k = alpha * p_k and y_k = G(x_kplus1) - G(x_k)
s_k = alpha * p_k
#s_k_t = np.transpose(s_k)
g_x_kplus1 = func.gradient_function(x_k)
y_k = g_x_kplus1 - g_x_k
#y_k_t = np.transpose(y_k)
# updating the approximate hessian inverse v_kplus1
sktyk = np.inner(s_k, y_k)
skykt = np.outer(s_k, y_k)
first_bracket = np.identity(len(x_k)) - (skykt / sktyk)
ykskt = np.outer(y_k, s_k)
second_bracket = np.identity(len(x_k)) - (ykskt / sktyk)
firstmul = np.matmul(first_bracket, v_k)
firstterm = np.matmul(firstmul, second_bracket)
skskt = np.outer(s_k, s_k)
secondterm = skskt / sktyk
v_k = firstterm + secondterm
# Record the best x values at the end of every cycle
x_1.append(x_k[0])
x_2.append(x_k[1])
return x_k, n, norm_g_x_k_matrix, x_1, x_2, number_function_call
# end loop
| cvshah/blackbox_for_optimization | sampling_function.py | sampling_function.py | py | 3,456 | python | en | code | 0 | github-code | 13 |
16397051052 | from flask import Flask, render_template, request, url_for, make_response, jsonify, session, redirect, g, flash
import random, urllib
from dogeify import *
from quotes import *
import dogeconfig
import nltk.data, nltk.tag
tagger = nltk.data.load("taggers/maxent_treebank_pos_tagger/english.pickle")
app = Flask(__name__)
# app.config.from_object(__name__)
@app.route("/", methods=['GET', 'POST'])
def index():
getMode = False
return render_template("home.html", getMode=getMode, result=[])
@app.route("/dogeify", methods=['GET', 'POST'])
def dogeifyText():
text = ""
if request.method == "GET":
text = request.args.get("userText")
else:
text = request.form['userText']
if len(text) > 2000:
errormsg = colorify(["such error."])
return render_template("home.html", getMode=True, result=["such error.", errormsg])
dogeTextArray = dogeify(text, tagger)
dogeResult = colorify(dogeTextArray)
result = [text, dogeResult]
if request.method == "GET":
return render_template("home.html", getMode=True, result=result)
elif request.method == "POST":
return jsonify(list=result)
@app.route("/lucky")
def lucky():
quote = random.choice(quotes)
return redirect("/dogeify?userText="+urllib.quote(quote))
if __name__ == '__main__':
app.run(host=dogeconfig.host, port=dogeconfig.port, debug=dogeconfig.debug)
| cloudcrypt/dogeify | app.py | app.py | py | 1,409 | python | en | code | 4 | github-code | 13 |
43085603602 | class Solution:
def translateNum(self, num: int) -> int:
# 动态规划中的零位目前都是为了保证转移方程的完整性,实际上不对应数据,是为了完整而构建的数字,值根据转移方程推导而得
str_num = str(num)
a = 1
b = 1
for i in range(2, len(str_num) + 1):
c = a + b if '10' <= str_num[i - 2: i] <= '25' else a
b = a
a = c
return a | Guo-xuejian/leetcode-practice | 剑指 Offer46把数字翻译成字符串.py | 剑指 Offer46把数字翻译成字符串.py | py | 461 | python | en | code | 1 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.