| | import gradio as gr |
| | import threading |
| | import time |
| | import schedule |
| | import requests |
| | import urllib.parse |
| | from huggingface_hub import HfApi, Repository |
| | import os |
| | import math |
| | from datetime import datetime, timedelta |
| | import pandas as pd |
| | from workalendar.europe import Hungary |
| | from pandas.tseries.offsets import CustomBusinessDay |
| | import numpy as np |
| | import gspread |
| | from oauth2client.service_account import ServiceAccountCredentials |
| | import base64 |
| | import json |
| | from datasets import load_dataset, DatasetDict, Dataset |
| | from gradio.themes.base import Base |
| | import matplotlib.pyplot as plt |
| | from io import BytesIO |
| | from PIL import Image |
| | import random |
| | import httpx |
| | from collections import defaultdict |
| |
|
| |
|
| |
|
| | |
| |
|
| |
|
| |
|
| | def medserv_authenticate(): |
| | |
| | MEDSERV_CREDS = os.getenv('MEDSERV_CREDS') |
| |
|
| | if not MEDSERV_CREDS: |
| | print("Environment variable PAYLOAD not set.") |
| | return None |
| | |
| | try: |
| | |
| | payload = json.loads(MEDSERV_CREDS) |
| | except json.JSONDecodeError: |
| | print("Failed to decode the JSON from the environment variable.") |
| | return None |
| | |
| | |
| | if 'azonosito' not in payload or 'jelszo' not in payload: |
| | print("Payload must contain both 'azonosito' and 'jelszo'.") |
| | return None |
| | |
| | |
| | login_url = 'https://api.lelet.medserv.hu/auth/login' |
| | |
| | |
| | response = httpx.post(login_url, json=payload) |
| | |
| | |
| | if response.status_code == 200: |
| | |
| | token = response.json().get('jwt') |
| | if token: |
| | print("Authenticated successfully, token received.") |
| | return token |
| | else: |
| | print("Token not found in the response.") |
| | return None |
| | else: |
| | print(f"Failed to authenticate. Status code: {response.status_code}") |
| | print(f"Response: {response.text}") |
| | return None |
| |
|
| | headers = { |
| | "Authorization": f"Bearer {medserv_authenticate()}", |
| | "Accept": "application/json" |
| | } |
| |
|
| |
|
| |
|
| | def url_conversion(include_list): |
| | |
| | encoded_fields = json.dumps(include_list) |
| | |
| | |
| | return urllib.parse.quote_plus(encoded_fields) |
| |
|
| |
|
| | |
| | service_account_base64 = os.getenv('GSHEETS_KEY') |
| | |
| | try: |
| | service_account_json_str = base64.b64decode(service_account_base64).decode('utf-8') |
| | |
| | except Exception as e: |
| | print("Error decoding service account:", e) |
| |
|
| |
|
| | |
| | service_account_info = json.loads(service_account_json_str) |
| |
|
| |
|
| | scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"] |
| | creds = ServiceAccountCredentials.from_json_keyfile_dict(service_account_info, scope) |
| | client = gspread.authorize(creds) |
| |
|
| |
|
| | |
| | SMEARSHARE_URL ='https://docs.google.com/spreadsheets/d/1D7w_ewX37953fAZdGfJAc_pIHpCinlpK2YSsTuT4a3U/edit?gid=0#gid=0' |
| | smearshare_config = client.open_by_url(SMEARSHARE_URL).sheet1 |
| | smearshare_config_data = smearshare_config.get_all_records() |
| | smearshare_config_df = pd.DataFrame(smearshare_config_data) |
| |
|
| |
|
| |
|
| | |
| | SZABIK_URL = 'https://docs.google.com/spreadsheets/d/1Dh8wCA5i3w-t7sQWecbtcj2ZlidF3KhS8S-OS2JrMPg/edit?gid=0#gid=0' |
| | |
| | szabik_munkalap_1 = client.open_by_url(SZABIK_URL).sheet1 |
| |
|
| | |
| | munkalap1_data = szabik_munkalap_1.get_all_records() |
| | szabik_df = pd.DataFrame(munkalap1_data) |
| |
|
| | |
| | |
| | tomorrow_date = datetime.now() + timedelta(days=0) |
| |
|
| | |
| | |
| | current_date = tomorrow_date.strftime("%Y-%m-%d") |
| |
|
| | |
| | |
| | current_date_dt = pd.to_datetime(current_date) |
| |
|
| |
|
| | |
| | repo_id = "drkvcsstvn/smear_share_testing" |
| | api_token = os.getenv("HF_API_TOKEN") |
| | api = HfApi(token=api_token) |
| |
|
| | |
| | DATASET_NAME = "drkvcsstvn/TEST_smearshare_cumulative_distribution_lims" |
| |
|
| | DISTRIBUTION_ACTIVITY_DATASET = "drkvcsstvn/TEST_smearshare_distribution_activity_lims" |
| |
|
| | ALLOCATION_ACTIVITY_DATASET = "drkvcsstvn/TEST_smearshare_allocation_activity_lims" |
| |
|
| | def restart_space(): |
| | response = api.restart_space(repo_id=repo_id) |
| |
|
| | def delete_dataset(): |
| | try: |
| | response = api.delete_repo(repo_id=DATASET_NAME, repo_type="dataset") |
| | except Exception: |
| | print("Dataset not found or already deleted.") |
| | return None |
| |
|
| |
|
| | def schedule_restart(): |
| | |
| | |
| | |
| | schedule.every(5).minutes.do(push_daily_activity) |
| | |
| | |
| | |
| | |
| | |
| | while True: |
| | schedule.run_pending() |
| | time.sleep(60) |
| |
|
| | def start_scheduler(): |
| | |
| | scheduler_thread = threading.Thread(target=schedule_restart, daemon=True) |
| | scheduler_thread.start() |
| |
|
| |
|
| | |
| | def save_cumulative_distribution(cumulative_distribution): |
| | |
| | df = pd.DataFrame(list(cumulative_distribution.items()), columns=['Peeler', 'Total']) |
| | |
| | dataset = Dataset.from_pandas(df) |
| | |
| | dataset.push_to_hub(DATASET_NAME, split="train", token=api_token) |
| | print(f"Dataset pushed to {DATASET_NAME}") |
| |
|
| | |
| | daily_distribution_activity = {} |
| | daily_allocation_activity = [] |
| |
|
| | def save_distribution_activity(cumulative_distribution): |
| | """ Accumulate activity instead of pushing immediately """ |
| | global daily_distribution_activity |
| | for peeler, total in cumulative_distribution.items(): |
| | daily_distribution_activity[peeler] = daily_distribution_activity.get(peeler, 0) + total |
| |
|
| | def save_allocation_activity(onbehalf_of_user, target_selection): |
| | """ Accumulate allocation activity instead of pushing immediately """ |
| | global daily_allocation_activity |
| | daily_allocation_activity.append({ |
| | "Timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), |
| | "User": onbehalf_of_user, |
| | "Választó": target_selection |
| | }) |
| |
|
| | def push_daily_activity(): |
| | """ Pushes accumulated activities to the Hub once per day """ |
| | global daily_distribution_activity, daily_allocation_activity |
| |
|
| | |
| | if daily_distribution_activity: |
| | df = pd.DataFrame(list(daily_distribution_activity.items()), columns=['Peeler', 'Total']) |
| | try: |
| | existing_dataset = load_dataset(DISTRIBUTION_ACTIVITY_DATASET, split="train", token=api_token) |
| | existing_df = existing_dataset.to_pandas() |
| | updated_df = pd.merge(existing_df, df, on='Peeler', how='outer', suffixes=('_existing', '_new')) |
| | updated_df['Total'] = updated_df['Total_existing'].fillna(0) + updated_df['Total_new'].fillna(0) |
| | updated_df = updated_df[['Peeler', 'Total']] |
| | except Exception: |
| | updated_df = df |
| |
|
| | Dataset.from_pandas(updated_df).push_to_hub(DISTRIBUTION_ACTIVITY_DATASET, split="train", token=api_token) |
| |
|
| | |
| | if daily_allocation_activity: |
| | df = pd.DataFrame(daily_allocation_activity) |
| | try: |
| | existing_dataset = load_dataset(ALLOCATION_ACTIVITY_DATASET, split="train", token=api_token) |
| | existing_df = existing_dataset.to_pandas() |
| | updated_df = pd.concat([existing_df, df], ignore_index=True) |
| | except Exception: |
| | updated_df = df |
| |
|
| | Dataset.from_pandas(updated_df).push_to_hub(ALLOCATION_ACTIVITY_DATASET, split="train", token=api_token) |
| |
|
| | |
| | daily_distribution_activity.clear() |
| | daily_allocation_activity.clear() |
| | print("Daily activity pushed and cleared.") |
| |
|
| | |
| | |
| |
|
| | def load_cumulative_distribution(): |
| | try: |
| | dataset = load_dataset(DATASET_NAME, split="train", token=api_token) |
| | if len(dataset) == 0: |
| | print("Dataset is empty. Initializing cumulative_distribution from medserv.") |
| | return cumulative_distribution_from_medserv |
| | df = dataset.to_pandas() |
| | cumulative_distribution = dict(zip(df['Peeler'], df['Total'])) |
| | return cumulative_distribution |
| | except Exception as e: |
| | print(f"Error loading cumulative distribution: {e}") |
| | return {peeler: 0 for peeler in peeler_capacities} |
| |
|
| |
|
| | def update_cumulative_distribution(new_distribution): |
| | cumulative_dict = cumulative_distribution |
| | |
| | for name, count in new_distribution.items(): |
| | |
| | peeler_id = next((id for id, peeler_name in peeler_names.items() if peeler_name == name), None) |
| | if peeler_id is not None: |
| | |
| | if peeler_id in cumulative_dict: |
| | cumulative_dict[peeler_id] += count |
| | else: |
| | cumulative_dict[peeler_id] = count |
| |
|
| | return cumulative_dict |
| |
|
| |
|
| | |
| | peeler_names = dict(zip(smearshare_config_df['peelers'], smearshare_config_df['peeler_names'])) |
| | |
| | peeler_capacities = dict(zip(smearshare_config_df['peelers'], smearshare_config_df['peeler_capacities'])) |
| | peeler_azo = dict(zip(smearshare_config_df['peeler_azo'], smearshare_config_df['peelers'])) |
| | peeler_name_azo = dict(zip(smearshare_config_df['peeler_names'], smearshare_config_df['peeler_azo'])) |
| | |
| | |
| | cumulative_include_list = [ |
| | 'id', |
| | 'lelet_kelte', |
| | 'adatok_taj', |
| | 'lelet_eloszuro_nev', |
| | 'lelet_leletezo_orvos_1_nev' |
| | ] |
| |
|
| | |
| | half_year_earlier = tomorrow_date - timedelta(days=180) |
| | lelet_kelte_start_date = half_year_earlier.strftime("%Y-%m-%d") |
| |
|
| | |
| | cumulative_distribution_url = f"https://api.lelet.medserv.hu/cytology/grid?include={url_conversion(cumulative_include_list)}&sort=%5B%7B%22letrehozva%22:-1%7D%5D&where=%7B%22and%22:%5B%7B%22lelet_kelte%22:%7B%22gte%22:%22{lelet_kelte_start_date}%22%7D%7D%5D%7D" |
| |
|
| | def fetch_all_data(url, headers, batch_size=12000): |
| | """Fetch all paginated data from the specified URL.""" |
| | all_data = [] |
| | skip = 0 |
| | |
| | while True: |
| | paginated_url = f"{url}&skip={skip}&limit={batch_size}" |
| | response = requests.get(paginated_url, headers=headers) |
| | |
| | if response.status_code == 200: |
| | data = response.json() |
| | rows = data.get('rows', []) |
| | |
| | if not rows: |
| | break |
| | |
| | all_data.extend(rows) |
| | skip += batch_size |
| | else: |
| | print(f"Error: {response.status_code}") |
| | print(response.text) |
| | break |
| | |
| | return all_data |
| |
|
| | |
| | fetched_df_for_cumulative = pd.DataFrame(fetch_all_data(cumulative_distribution_url, headers)) |
| | format_fetched_df_for_cumulative = fetched_df_for_cumulative.groupby(['lelet_leletezo_orvos_1_nev']).count()['id'].reset_index() |
| |
|
| | |
| | def format_name(name): |
| | |
| | parts = name.replace("Dr. ", "").split() |
| | |
| | return ' '.join(parts[0:2]) if len(parts) >= 2 else '' |
| |
|
| | |
| | format_fetched_df_for_cumulative['peeler_names'] = format_fetched_df_for_cumulative['lelet_leletezo_orvos_1_nev'].apply(format_name) |
| | format_fetched_df_for_cumulative = format_fetched_df_for_cumulative[['peeler_names','id']] |
| | format_fetched_df_for_cumulative.rename(columns={'id': 'Total'}, inplace=True) |
| |
|
| | |
| | format_fetched_df_for_cumulative = format_fetched_df_for_cumulative[format_fetched_df_for_cumulative['peeler_names'] != 'Kiss Zsombor'] |
| |
|
| |
|
| | |
| | merged_cumulative = pd.merge(format_fetched_df_for_cumulative, smearshare_config_df, on= 'peeler_names',how='left')[['peelers','peeler_names','Total']] |
| |
|
| | |
| | |
| | merged_cumulative['numeric_part'] = merged_cumulative['peelers'].str.extract('(\d+)').astype(float) |
| |
|
| | |
| | pmax = merged_cumulative['numeric_part'].max() |
| |
|
| | |
| | na_indices = merged_cumulative[merged_cumulative['peelers'].isna()].index |
| |
|
| | |
| | for i, index in enumerate(na_indices): |
| | merged_cumulative.at[index, 'peelers'] = f'P{int(pmax) + i + 1}' |
| |
|
| | |
| | merged_cumulative = merged_cumulative.drop(columns='numeric_part') |
| |
|
| | |
| | cumulative_distribution_from_medserv = dict(zip(merged_cumulative['peelers'], merged_cumulative['Total'])) |
| |
|
| | save_cumulative_distribution(cumulative_distribution_from_medserv) |
| | |
| | try: |
| | |
| | update_smearshare_config_df = pd.merge(merged_cumulative, smearshare_config_df, on=['peelers', 'peeler_names'], how='outer') |
| | |
| | |
| | numeric_columns = ['peeler_azo','peeler_capacities', 'capacity_weight', 'vacation_buffer', 'cumulative_impact', 'tomorrow_date'] |
| | update_smearshare_config_df[numeric_columns] = update_smearshare_config_df[numeric_columns].fillna(0) |
| | |
| | |
| | update_smearshare_config_df = update_smearshare_config_df.drop('Total', axis=1) |
| | |
| | |
| | update_smearshare_config_df = update_smearshare_config_df.sort_values(by=['peelers']) |
| |
|
| | |
| | smearshare_config.clear() |
| |
|
| | |
| | data_to_update = [update_smearshare_config_df.columns.values.tolist()] + update_smearshare_config_df.values.tolist() |
| |
|
| | |
| | response = smearshare_config.update(data_to_update) |
| | print("Update response:", response) |
| |
|
| | except Exception as e: |
| | print("An error occurred:", e) |
| | |
| | print("Error details:") |
| | |
| | |
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | |
| |
|
| | |
| | cal = Hungary() |
| | current_year = datetime.now().year |
| |
|
| | special_days_include_list = ['datum','tipus','megnevezes'] |
| | special_days_url = f"https://api.lelet.medserv.hu/special-day/grid?include={url_conversion(special_days_include_list)}&sort=%5B%7B%22datum%22:-1%7D%5D&where=%7B%22and%22:%5B%7B%22datum%22:%7B%22contain%22:%22{current_year}%22%7D%7D%5D%7D&skip=0&limit=100" |
| |
|
| | response = requests.get(special_days_url, headers=headers) |
| | data = response.json() |
| | |
| | if isinstance(data, dict) and 'rows' in data and len(data['rows']) == 0: |
| | print("Problem: No data found.") |
| | else: |
| | print("Data retrieved successfully.") |
| | |
| | |
| | custom_holidays = pd.to_datetime([item['datum'] for item in data['rows'] if item['tipus'] == 'Munkaszüneti nap']).tolist() |
| | additional_workdays = pd.to_datetime([item['datum'] for item in data['rows'] if item['tipus'] == 'Munkanap']) |
| |
|
| | |
| | hungarian_holidays = cal.holidays(int(current_year)) |
| | hungarian_holidays_dates = [pd.Timestamp(date) for date, _ in hungarian_holidays] |
| |
|
| | combined_holidays = sorted(set(hungarian_holidays_dates + custom_holidays) - set(additional_workdays)) |
| | custom_business_day = CustomBusinessDay(holidays=combined_holidays) |
| |
|
| | |
| | remaining_workdays = pd.date_range(start=current_date_dt, end=f"{str(int(current_year)+1)}-12-31", freq=custom_business_day) |
| |
|
| | |
| | |
| |
|
| |
|
| |
|
| | |
| | workdays_df_start_date = datetime.now() - pd.DateOffset(months=6) |
| | workdays_df_end_date = datetime.now() + pd.DateOffset(months=6) |
| |
|
| | |
| | workdays_df_all_days = pd.date_range(start=workdays_df_start_date, end=workdays_df_end_date) |
| |
|
| | |
| | workdays_df = pd.DataFrame(workdays_df_all_days, columns=["Workday"]) |
| |
|
| | |
| | workdays_df["Workday"] = workdays_df["Workday"].dt.strftime('%Y-%m-%d') |
| |
|
| | |
| | workdays_df["Workday"] = pd.to_datetime(workdays_df["Workday"]) |
| |
|
| | |
| | |
| |
|
| | workers = smearshare_config_df['peeler_names'].to_list() |
| |
|
| | |
| | def shuffle_workers(workers): |
| | shuffled = workers.copy() |
| | np.random.shuffle(shuffled) |
| | return shuffled |
| |
|
| | |
| | workdays_df['Active'] = workdays_df['Workday'].apply(lambda x: shuffle_workers(workers)) |
| | |
| | |
| |
|
| |
|
| |
|
| |
|
| | |
| | szabik_df["Szabi Kezdete"] = pd.to_datetime(szabik_df["Szabi Kezdete"]) |
| | szabik_df["Szabi Vége"] = pd.to_datetime(szabik_df["Szabi Vége"]) |
| |
|
| | |
| | szabik_df["Two Days Before Start Date"] = szabik_df["Szabi Kezdete"] - custom_business_day - timedelta(days=1) |
| |
|
| | |
| | szabik_df["One Week Before Vacation"] = szabik_df["Szabi Kezdete"] - timedelta(weeks=1) |
| |
|
| | |
| | |
| | def remove_vacationers(workday, active_workers): |
| | |
| | for _, vacation in szabik_df.iterrows(): |
| | |
| | if vacation["Two Days Before Start Date"] <= workday < vacation["Szabi Vége"]: |
| | if vacation["Név"] in active_workers: |
| | active_workers.remove(vacation["Név"]) |
| | |
| | |
| | if vacation["Név"] in ["Bori Rita", "Serényi Péter"]: |
| | if vacation["One Week Before Vacation"] <= workday < vacation["Szabi Vége"]: |
| | if vacation["Név"] in active_workers: |
| | active_workers.remove(vacation["Név"]) |
| |
|
| | |
| | |
| | |
| | |
| | return active_workers |
| |
|
| | |
| | workdays_df['Active'] = workdays_df.apply( |
| | lambda row: remove_vacationers(row['Workday'], row['Active']), axis=1 |
| | ) |
| |
|
| | |
| | |
| |
|
| |
|
| |
|
| | |
| | |
| | def get_active_peelers(workdays_df, current_date): |
| | try: |
| | return workdays_df.loc[workdays_df['Workday'] == current_date, 'Active'].values[0] |
| | except IndexError: |
| | return [] |
| |
|
| |
|
| |
|
| | |
| | def dynamic_vacation_buffer(worker_name, workday, szabik_df, custom_business_day, grace_period_days=4, default_vacation_buffer=smearshare_config_df['vacation_buffer'].iloc[0]): |
| | |
| | |
| | |
| | worker_vacations = szabik_df[szabik_df["Név"] == worker_name] |
| | |
| | for _, vacation in worker_vacations.iterrows(): |
| | if vacation["Szabi Vége"] < workday <= vacation["Szabi Vége"] + timedelta(days=grace_period_days): |
| | |
| | vacation_length = (vacation["Szabi Vége"] - vacation["Szabi Kezdete"]).days |
| | if vacation_length > 7: |
| | grace_period_days += 1 |
| |
|
| | |
| | grace_period_start = vacation["Szabi Vége"] + timedelta(days=1) |
| | valid_workdays = pd.date_range(start=grace_period_start, end=workday, freq=custom_business_day) |
| | days_since_vacation = len(valid_workdays) |
| |
|
| | |
| | if days_since_vacation == 1: |
| | |
| | return default_vacation_buffer |
| | |
| | |
| | diminishing_buffer = max(1, default_vacation_buffer - ((days_since_vacation / grace_period_days) * (default_vacation_buffer - 1))) |
| | |
| | return diminishing_buffer |
| |
|
| | |
| | return 1 |
| |
|
| | |
| | cumulative_distribution = load_cumulative_distribution() |
| |
|
| | |
| | def calculate_distribution(num_potatoes, active_peelers, peeler_names, workday, szabik_df): |
| | |
| | valid_active_peelers = [p for p in active_peelers if p in peeler_capacities] |
| | |
| | |
| | if not valid_active_peelers: |
| | return {peeler_names[p]: 0 for p in active_peelers} |
| | |
| | |
| | capacity_weight = smearshare_config_df['capacity_weight'].iloc[0] |
| | cumulative_impact = smearshare_config_df['cumulative_impact'].iloc[0] |
| | |
| | |
| | |
| | adjusted_weights = {} |
| | for peeler in valid_active_peelers: |
| | capacity = peeler_capacities[peeler] |
| | |
| | |
| | total_potatoes_processed = sum(cumulative_distribution.values()) or 1 |
| | peeler_proportion = cumulative_distribution.get(peeler, 0) / total_potatoes_processed |
| | |
| | |
| | non_linear_proportion = peeler_proportion ** cumulative_impact |
| | |
| | |
| | vacation_buffer = dynamic_vacation_buffer(peeler_names[peeler], workday, szabik_df, custom_business_day) |
| | |
| | adjusted_weight = (capacity * capacity_weight) / (1 + non_linear_proportion * vacation_buffer) |
| | adjusted_weights[peeler] = adjusted_weight |
| | |
| |
|
| | |
| | total_weight = sum(adjusted_weights.values()) |
| | if total_weight == 0: |
| | return {peeler_names[p]: 0 for p in valid_active_peelers} |
| | |
| | raw_distribution = {peeler: (num_potatoes * adjusted_weights[peeler]) / total_weight for peeler in valid_active_peelers} |
| | |
| | whole_distribution = {peeler: math.floor(potatoes) for peeler, potatoes in raw_distribution.items()} |
| | |
| | total_distributed = sum(whole_distribution.values()) |
| | remaining_potatoes = num_potatoes - total_distributed |
| | |
| | if remaining_potatoes > 0: |
| | for i in range(remaining_potatoes): |
| | peeler = valid_active_peelers[i % len(valid_active_peelers)] |
| | whole_distribution[peeler] += 1 |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | return {peeler_names[peeler]: whole_distribution[peeler] for peeler in valid_active_peelers} |
| |
|
| |
|
| | target_list = ['supervisor','orvos','special'] |
| |
|
| | def distribute_potatoes(onbehalf_of_user, target, spec_peeler_azo = None): |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | active_peelers = get_active_peelers(workdays_df, current_date_dt) |
| | |
| | |
| | |
| | if not active_peelers: |
| | return "A mai napon nem adható fel kenet konzultációra." |
| |
|
| | active_peeler_ids = {id: name for id, name in peeler_names.items() if name in active_peelers} |
| | |
| | red_peelers = [id for id in active_peeler_ids if id in ["P7", "P8"] and id != peeler_azo.get(onbehalf_of_user)] |
| | blue_peelers = [id for id in active_peeler_ids if id in ["P1", "P13", "P2","P3", "P4","P5","P6"] and id != peeler_azo.get(onbehalf_of_user)] |
| | |
| | random.shuffle(red_peelers) |
| | random.shuffle(blue_peelers) |
| | |
| | def calculate_initial_distribution(peelers): |
| | |
| | return calculate_distribution(1, peelers, peeler_names, current_date_dt, szabik_df) |
| | if target == 'supervisor': |
| | |
| | final_distribution = calculate_initial_distribution(red_peelers) |
| | cumulative_distribution = update_cumulative_distribution(final_distribution) |
| | print(cumulative_distribution) |
| | save_distribution_activity(final_distribution) |
| | save_allocation_activity(onbehalf_of_user, "general") |
| | |
| | final_name = [key for key, value in final_distribution.items() if value > 0] |
| | return peeler_name_azo.get(final_name[0]) |
| | |
| | elif target == 'orvos': |
| | |
| | final_distribution = calculate_initial_distribution(blue_peelers) |
| | cumulative_distribution = update_cumulative_distribution(final_distribution) |
| | print(cumulative_distribution) |
| | save_distribution_activity(final_distribution) |
| | save_allocation_activity(onbehalf_of_user, "general") |
| | final_name = [key for key, value in final_distribution.items() if value > 0] |
| | return peeler_name_azo.get(final_name[0]) |
| | |
| | elif target == 'special': |
| | if spec_peeler_azo is None: |
| | return "No spec_peeler_azo provided for special distribution." |
| | if spec_peeler_azo not in get_active_peelers_azo(): |
| | return " A kiválasztott konzulens nem aktív" |
| | |
| | azo_to_peeler_name = {v: k for k, v in peeler_name_azo.items()} |
| | |
| | |
| | spec_peeler_name = azo_to_peeler_name.get(spec_peeler_azo) |
| | |
| | |
| | final_distribution = {spec_peeler_name: 1} |
| |
|
| | cumulative_distribution = update_cumulative_distribution(final_distribution) |
| | print(cumulative_distribution) |
| | save_distribution_activity(final_distribution) |
| | save_allocation_activity(onbehalf_of_user, "special") |
| | return spec_peeler_azo |
| |
|
| |
|
| |
|
| | |
| |
|
| |
|
| | class Seafoam(Base): |
| | pass |
| |
|
| | gradio_theme = Seafoam() |
| |
|
| | def get_active_peelers_azo(): |
| | return [v for k,v in peeler_name_azo.items() if k in get_active_peelers(workdays_df, current_date_dt)] |
| |
|
| |
|
| | |
| | with gr.Blocks(theme=gradio_theme) as demo: |
| | gr.Markdown(f"<h1 style='text-align: center; font-weight: bold;'>Smear/Share - {current_date}</h1>") |
| |
|
| | with gr.Column(): |
| | user_input = gr.Number(label="Onbehalf_of_user") |
| | target_input = gr.Dropdown(choices=target_list, label="Target_list") |
| | special_input = gr.Dropdown(choices=get_active_peelers_azo(), label="Specifikus konzulens") |
| | with gr.Row(): |
| | share_button = gr.Button("Szétosztás", variant="primary") |
| | active_peelers_button = gr.Button("Aktív konzulensek", variant="primary") |
| | with gr.Column(): |
| | output_box = gr.Textbox(label="Konzulens") |
| | active_peelers_output = gr.Textbox(label="Aktív dolgozók") |
| |
|
| | |
| | share_button.click( |
| | fn=lambda user, target, special: ( |
| | distribute_potatoes(user, target, special) |
| | ), |
| | inputs=[user_input, target_input, special_input], |
| | outputs=[output_box] |
| | ).then( |
| | fn=lambda: (None, None, None), |
| | inputs=[], |
| | outputs=[user_input, target_input, special_input] |
| | ) |
| |
|
| | active_peelers_button.click( |
| | fn= get_active_peelers_azo, |
| | outputs=[active_peelers_output] |
| | ) |
| |
|
| | |
| |
|
| | |
| | demo.launch() |
| |
|
| |
|