seq_id
string
text
string
repo_name
string
sub_path
string
file_name
string
file_ext
string
file_size_in_byte
int64
program_lang
string
lang
string
doc_type
string
stars
int64
dataset
string
pt
string
api
list
73505701627
# coding=utf-8 # # /************************************************************************** # *** # *** File Author: Dell, 2018年 09月 18日 星期二 16:28:12 CST # *** # **************************************************************************/ # import os import sys import logging import argparse import model parser = argparse.ArgumentParser(description='Train Image Classificer Model') parser.add_argument( '-root-dir', type=str, default=model.DEFAULT_TRAIN_DATA_ROOT_DIR, help='train data root directory, default: ' + model.DEFAULT_TRAIN_DATA_ROOT_DIR) parser.add_argument( '-epochs', type=int, default=32, help='number of epochs for train, default: 32') parser.add_argument( '-batch-size', type=int, default=64, help='batch size for training, default: 64') parser.add_argument( '-device', type=str, default="cuda:0", help='cuda:0 or cpu, default: cuda:0') def makedirs(): for d in ["logs", "model"]: if not os.path.exists(d): os.mkdir(d) if not os.path.isdir(d): logging.error( "Please create dir 'logs' or 'model' under current directory.") raise Exception("logs or model is not directory.") if __name__ == '__main__': args = parser.parse_args() if (not os.path.exists(args.root_dir)) or (not os.path.isdir( args.root_dir)): logging.error(args.root_dir + ' is not director or not exists.') sys.exit(-1) makedirs() data = model.train_data_loader(args.root_dir, args.batch_size) net = model.load_model(args.device, model.DEFAULT_MODEL) model.train_model(args.device, net, data, args.epochs)
delldu/ImageCNN
train.py
train.py
py
1,707
python
en
code
4
github-code
6
[ { "api_name": "argparse.ArgumentParser", "line_number": 17, "usage_type": "call" }, { "api_name": "model.DEFAULT_TRAIN_DATA_ROOT_DIR", "line_number": 22, "usage_type": "attribute" }, { "api_name": "model.DEFAULT_TRAIN_DATA_ROOT_DIR", "line_number": 24, "usage_type": "attr...
27641421937
# by filtering stock that is in the range of 0.5 to 2 pct difference import os from dotenv import load_dotenv load_dotenv() import os from supabase import create_client import numpy as np import pandas as pd import requests from datetime import datetime from io import StringIO def preprocess_numeric_value(value): if pd.isna(value) or value == 0.0: return np.nan str_value = str(value) if 'T' in str_value.upper(): return float(str_value.upper().replace('T', '')) * 1e12 elif 'B' in str_value.upper(): return float(str_value.upper().replace('B', '')) * 1e9 elif 'M' in str_value.upper(): return float(str_value.upper().replace('M', '')) * 1e6 elif 'K' in str_value.upper(): return float(str_value.upper().replace('K', '')) * 1e3 else: return float(value) def preprocess_percentage_value(value): if pd.isna(value): return np.nan if '%' in str(value): return float(str(value).replace('%', '').replace(',', ''))/100 else: return float(str(value)) def calculate_growth(row, y, type=None): try: year2 = row[y] if type == 'revenue': year1 = row['total_revenue']/row['multiplier'] else: year1 = row['basic_eps'] if pd.isna(year1) or year1 == 0.0: return np.nan else: return (year2 - year1) / year1 except (ValueError, KeyError): return np.nan url = os.environ.get("SUPABASE_URL") key = os.environ.get("SUPABASE_KEY") supabase = create_client(url, key) key_data = supabase.table("idx_company_profile").select("symbol","sub_sector_id").execute() key_df = pd.DataFrame(key_data.data).sort_values(['symbol']) symbols = key_df['symbol'].to_list() headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/113.0'} all_list = { 'revenue_year_ago' : [], 'avg_estimate_earnings_current_year': [], 'avg_estimate_earnings_next_year': [], 'avg_estimate_revenue_current_year': [], 'avg_estimate_revenue_next_year': [], 'estimate_overall_growth_current_year': [], 'estimate_overall_growth_next_year': [], 'estimate_overall_growth_next_five_years': [], } for symbol in symbols: try: url = f'https://finance.yahoo.com/quote/{symbol}/analysis?p={symbol}' html_content = requests.get(url, headers=headers).text source_df = pd.read_html(StringIO(html_content)) earnings_df = source_df[0] revenue_df = source_df[1] overall_growth_df = source_df[5] avg_estimate_earnings_current_year = earnings_df.loc[earnings_df['Earnings Estimate'] == 'Avg. Estimate'].iloc[:, 3].values[0] avg_estimate_earnings_next_year = earnings_df.loc[earnings_df['Earnings Estimate'] == 'Avg. Estimate'].iloc[:, 4].values[0] avg_estimate_revenue_current_year = revenue_df.loc[revenue_df['Revenue Estimate'] == 'Avg. Estimate'].iloc[:, 3].values[0] year_ago = revenue_df.loc[revenue_df['Revenue Estimate'] == 'Year Ago Sales'].iloc[:, 3].values[0] avg_estimate_revenue_next_year = revenue_df.loc[revenue_df['Revenue Estimate'] == 'Avg. Estimate'].iloc[:, 4].values[0] estimate_overall_growth_current_year = overall_growth_df.loc[overall_growth_df['Growth Estimates'] == 'Current Year'].iloc[0, :].values[1] estimate_overall_growth_next_year = overall_growth_df.loc[overall_growth_df['Growth Estimates'] == 'Next Year'].iloc[0, :].values[1] estimate_overall_growth_next_five_years = overall_growth_df.loc[overall_growth_df['Growth Estimates'] == 'Next 5 Years (per annum)'].iloc[0, :].values[1] all_list['avg_estimate_earnings_current_year'].append(avg_estimate_earnings_current_year) all_list['avg_estimate_earnings_next_year'].append(avg_estimate_earnings_next_year) all_list['avg_estimate_revenue_current_year'].append(avg_estimate_revenue_current_year) all_list['avg_estimate_revenue_next_year'].append(avg_estimate_revenue_next_year) all_list['revenue_year_ago'].append(year_ago) all_list['estimate_overall_growth_current_year'].append(estimate_overall_growth_current_year) all_list['estimate_overall_growth_next_year'].append(estimate_overall_growth_next_year) all_list['estimate_overall_growth_next_five_years'].append(estimate_overall_growth_next_five_years) print(f"{symbol} data processed") except Exception as e: for key in all_list.keys(): all_list[key].append(np.nan) print(f"{symbol} no data") data_dict = { 'symbol': symbols, **all_list, } forecast_df = pd.DataFrame.from_dict(data_dict) current_year = datetime.now().year last_year= f"{current_year-1}-12-31" db_data = supabase.table("idx_financials_annual").select("symbol","total_revenue","basic_eps").eq("date", last_year).execute() db_df = pd.DataFrame(db_data.data).sort_values(['symbol']) df = forecast_df.merge(db_df, on='symbol', how='inner').merge(key_df, on='symbol', how='inner') numeric_columns = ['avg_estimate_earnings_current_year', 'avg_estimate_earnings_next_year', 'avg_estimate_revenue_current_year', 'avg_estimate_revenue_next_year','revenue_year_ago'] for column in numeric_columns: df[column] = df[column].apply(preprocess_numeric_value) percentage_columns = ['estimate_overall_growth_current_year', 'estimate_overall_growth_next_year', 'estimate_overall_growth_next_five_years'] for percentage_column in percentage_columns: df[str(percentage_column)] = df[str(percentage_column)].apply(preprocess_percentage_value) df['multiplier'] = 1 df_1000 = df.copy() df_1000['multiplier'] = 1000 df_1000['revenue_year_ago'] = df_1000['revenue_year_ago'] * df_1000['multiplier'] growth_forecast_df = pd.concat([df, df_1000], axis=0, ignore_index=True) growth_forecast_df = growth_forecast_df.sort_values(by=["symbol", "multiplier"]) growth_forecast_df['ratio_mult'] = growth_forecast_df['total_revenue']/ growth_forecast_df['revenue_year_ago'] growth_forecast_df = growth_forecast_df.query("ratio_mult > 0.5 and ratio_mult < 2") growth_forecast_df.to_csv('idx_company_rev_year_ago_filtered.csv',index = False) numeric_columns = ['avg_estimate_earnings_current_year', 'avg_estimate_earnings_next_year', 'avg_estimate_revenue_current_year', 'avg_estimate_revenue_next_year'] for column in numeric_columns: type = None if 'revenue' in column: type = 'revenue' growth_forecast_df[f'{column[4:12]}_growth_{column[13:]}'] = growth_forecast_df.apply(calculate_growth, y=column, type=type, axis=1) final_df = growth_forecast_df[['symbol','sub_sector_id','estimate_overall_growth_current_year','estimate_overall_growth_next_year','estimate_overall_growth_next_five_years','avg_estimate_earnings_current_year','avg_estimate_earnings_next_year','estimate_growth_earnings_current_year','estimate_growth_earnings_next_year','avg_estimate_revenue_current_year','avg_estimate_revenue_next_year','estimate_growth_revenue_current_year','estimate_growth_revenue_next_year']] final_df.columns = ['symbol','sub_sector_id','overall_growth_current_year_f','overall_growth_next_year_f','overall_growth_next_five_years_f','avg_eps_current_year','avg_eps_next_year','eps_growth_current_year_f','eps_growth_next_year_f','avg_revenue_current_year','avg_revenue_next_year','revenue_growth_current_year_f','revenue_growth_next_year_f'] final_df.to_csv('idx_company_growth_forecast.csv',index = False) try: result = supabase.table("idx_company_growth_forecast").upsert(final_df.to_dict(orient='records'), returning='minimal', on_conflict=['symbol']) print("Upsert operation successful.") except Exception as e: print(f"Error during upsert operation: {e}")
supertypeai/sectors_forecast_growth_rate
code/main_v2.py
main_v2.py
py
7,738
python
en
code
0
github-code
6
[ { "api_name": "dotenv.load_dotenv", "line_number": 5, "usage_type": "call" }, { "api_name": "pandas.isna", "line_number": 15, "usage_type": "call" }, { "api_name": "numpy.nan", "line_number": 16, "usage_type": "attribute" }, { "api_name": "pandas.isna", "line_...
45386146936
""" Serialize data to/from JSON """ # Avoid shadowing the standard library json module from __future__ import absolute_import from __future__ import unicode_literals import datetime import decimal import json import sys from theory.core.serializers.base import DeserializationError from theory.core.serializers.python import Serializer as PythonSerializer from theory.core.serializers.python import Deserializer as PythonDeserializer from theory.utils import six from theory.utils.timezone import isAware class Serializer(PythonSerializer): """ Convert a queryset to JSON. """ internalUseOnly = False def startSerialization(self): if json.__version__.split('.') >= ['2', '1', '3']: # Use JS strings to represent Python Decimal instances (ticket #16850) self.options.update({'useDecimal': False}) self._current = None self.jsonKwargs = self.options.copy() self.jsonKwargs.pop('stream', None) self.jsonKwargs.pop('fields', None) if self.options.get('indent'): # Prevent trailing spaces self.jsonKwargs['separators'] = (',', ': ') self.stream.write("[") def endSerialization(self): if self.options.get("indent"): self.stream.write("\n") self.stream.write("]") if self.options.get("indent"): self.stream.write("\n") def endObject(self, obj): # self._current has the field data indent = self.options.get("indent") if not self.first: self.stream.write(",") if not indent: self.stream.write(" ") if indent: self.stream.write("\n") json.dump(self.getDumpObject(obj), self.stream, cls=TheoryJSONEncoder, **self.jsonKwargs) self._current = None def getvalue(self): # Grand-parent super return super(PythonSerializer, self).getvalue() def Deserializer(streamOrString, **options): """ Deserialize a stream or string of JSON data. """ if not isinstance(streamOrString, (bytes, six.stringTypes)): streamOrString = streamOrString.read() if isinstance(streamOrString, bytes): streamOrString = streamOrString.decode('utf-8') try: objects = json.loads(streamOrString) for obj in PythonDeserializer(objects, **options): yield obj except GeneratorExit: raise except Exception as e: # Map to deserializer error six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2]) class TheoryJSONEncoder(json.JSONEncoder): """ JSONEncoder subclass that knows how to encode date/time and decimal types. """ def default(self, o): # See "Date Time String Format" in the ECMA-262 specification. if isinstance(o, datetime.datetime): r = o.isoformat() if o.microsecond: r = r[:23] + r[26:] if r.endswith('+00:00'): r = r[:-6] + 'Z' return r elif isinstance(o, datetime.date): return o.isoformat() elif isinstance(o, datetime.time): if isAware(o): raise ValueError("JSON can't represent timezone-aware times.") r = o.isoformat() if o.microsecond: r = r[:12] return r elif isinstance(o, decimal.Decimal): return str(o) else: return super(TheoryJSONEncoder, self).default(o) # Older, deprecated class name (for backwards compatibility purposes). DateTimeAwareJSONEncoder = TheoryJSONEncoder
grapemix/theory
theory/core/serializers/json.py
json.py
py
3,323
python
en
code
1
github-code
6
[ { "api_name": "theory.core.serializers.python.Serializer", "line_number": 21, "usage_type": "name" }, { "api_name": "json.__version__.split", "line_number": 28, "usage_type": "call" }, { "api_name": "json.__version__", "line_number": 28, "usage_type": "attribute" }, {...
9224541444
from flask import Flask from flask import request, jsonify import json, os, util, pickle app = Flask(__name__) SIMULATION_RESULT_PATH = './sim_result' from flask_cors import CORS CORS(app) def load(path): with open(path, 'rb') as f: obj = pickle.load(f) return obj @app.route("/") def hello_world(): return "<p>InterSim Beta Server is running.</p>" @app.route('/vis', methods=['GET', 'POST']) def visualization_get(): sim_name = request.args.get('sim') scene_id = request.args.get('sceneid') file_id = request.args.get('fileid') if request.method == 'GET': with open(f"sim_result/{sim_name}/json/{file_id}.json", "r") as json_file: my_dict = json.load(json_file) keys = list(my_dict.keys()) my_dict['selected_scene_index'] = keys.index(scene_id) return jsonify(my_dict) @app.route('/list_scenarios') def get_simulations(): return summary_simulations() @app.route('/list_simulation_selection') def get_simulation_selection(): dataset = request.args.get('dataset') return list_simulation_selection(dataset=dataset) @app.route('/list_senarios') def get_scenarios_list(): simulation_name = request.args.get('simulation') return list_scenarios(simulation_name) def check_path_valid(path): if not os.path.isdir(path): return False if not os.path.exists(os.path.join(path, 'sim.info')): return False return True def summary_simulations(path=SIMULATION_RESULT_PATH): html_str = "" # loop all simulations and load their info and add to the html table for each_path in os.listdir(path): if not check_path_valid(os.path.join(path, each_path)): continue sim_info = load(os.path.join(path, each_path, 'sim.info')) # dataset_with_map = '-' + sim_info['map_info'] if 'map_info' in sim_info and sim_info['map_info'] is not None else '' # dataset_with_map = sim_info['dataset'] + dataset_with_map dataset_with_map = sim_info['dataset'] html_str += f"<tr><td>{sim_info['name']}</td>" \ f"<td>{sim_info['task']}</td>" \ f"<td>{dataset_with_map}</td>" \ f"<td>{sim_info['planner']}</td>" \ f"<td>{sim_info['predictor']}</td>" \ f"<td>{sim_info['status']}</td>" \ f"<td>{sim_info['starting_time']}</td>" html_str += f"<td>{sim_info['ending_time']}</td>" if sim_info['ending_time'] is not None else "<td>-</td>" # add action drop html_str += f''' <td> <div class="dropdown"> <a class="dropdown-toggle icon-burger-mini" href="#" role="button" id="dropdownMenuLink" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false"> </a> <div class="dropdown-menu dropdown-menu-right" aria-labelledby="dropdownMenuLink"> <a class="dropdown-item" href="list.html?task={sim_info['task']}&dataset={dataset_with_map}&sim={each_path}">Detail</a> </div> </div> </td> ''' html_str += "</tr>" return html_str def list_simulation_selection(dataset=None, path=SIMULATION_RESULT_PATH): if dataset is None: return html_str = "" for each_path in os.listdir(path): if not check_path_valid(os.path.join(path, each_path)): continue if dataset not in each_path: continue html_str += f"<option value=\"{each_path}\">{each_path}</option>" return html_str def list_scenarios(simulation_name=None): if simulation_name is None: return simulation_path = os.path.join(SIMULATION_RESULT_PATH, simulation_name) if not os.path.exists(simulation_path): return html_str = """ <thead> <tr> <th>Scenario id</th> <th>Collsion Rate</th> <th>Progress (m)</th> <th></th> </tr> </thead> <tbody id="scenario_list"> """ for each_playback_path in os.listdir(os.path.join(simulation_path, 'playback')): loaded_playback = load(os.path.join(simulation_path, 'playback', each_playback_path)) for each_scenario_id in loaded_playback: html_str += "<tr class=\"list-group-item-action\">" metric_rst = loaded_playback[each_scenario_id]['metrics'] task = loaded_playback[each_scenario_id]['info']['task'] dataset = loaded_playback[each_scenario_id]['info']['dataset'] collision_rate = len(metric_rst['collided_pairs']) progress = metric_rst['progress'] # jerk = "N/A" # metric_rst['jerk'] html_str += f""" <td>{each_scenario_id}</td> <td>{collision_rate}</td> <td>{progress}</td> """ file_name = each_playback_path.split('.playback')[0] html_str += f""" <td> <div class="dropdown"> <a class="dropdown-toggle icon-burger-mini" href="#" role="button" id="dropdownMenuLink" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false"> </a> <div class="dropdown-menu dropdown-menu-right" aria-labelledby="dropdownMenuLink"> <a class="dropdown-item" href="visualization-detail.html?task={task}&sim={simulation_name}&fileid={file_name}&sceneid={each_scenario_id}">Visualize</a> </div> </div> </td> </tr> """ html_str += """</tbody> </table>""" return html_str
Tsinghua-MARS-Lab/InterSim
simulator/dashboard_server.py
dashboard_server.py
py
5,757
python
en
code
119
github-code
6
[ { "api_name": "flask.Flask", "line_number": 4, "usage_type": "call" }, { "api_name": "flask_cors.CORS", "line_number": 8, "usage_type": "call" }, { "api_name": "pickle.load", "line_number": 12, "usage_type": "call" }, { "api_name": "flask.request.args.get", "l...
72307131709
import apace as ap import matplotlib.pyplot as plt import numpy as np from fodo import make_fodo from master_thesis import figure_path angles = [0, np.pi / 8, np.pi / 4] fodo = make_fodo(angle=angles[1]) d1, b1, q1, q2 = (fodo[name] for name in ("d1", "b1", "q1", "q2")) twiss = ap.Twiss(fodo) steps = 1000 lengths = [0.5, 1.0, 1.5] n_rows, n_cols = len(lengths) + 1, len(angles) fig, axs = plt.subplots( nrows=n_rows, ncols=n_cols, figsize=(4 * n_cols, 2.5 * n_rows), gridspec_kw={"height_ratios": [0.01, 1, 1, 1]}, ) for ax, angle in zip(axs[0], angles): ax.axis("off") ax.set_title( f"Dipole angle ({np.degrees(angle)}° per cell)", fontweight="bold", pad=0 ) for column, angle in zip(axs[1:].T, angles): b1.angle = angle b1.e1 = b1.e2 = 0.5 * angle for ax, length in zip(column, lengths): d1.length = length # breakpoint() extent = 0, 2, 0, -2 results = np.empty((steps, steps)) for i, q1.k1 in enumerate(np.linspace(*extent[:2], steps)): for j, q2.k1 in enumerate(np.linspace(*extent[2:], steps)): try: results[i, j] = np.mean(twiss.beta_x) + np.mean(twiss.beta_y) except ap.UnstableLatticeError: results[i, j] = np.nan image = ax.imshow( results.T, extent=extent, origin="lower", vmin=0, vmax=30, cmap="cool" ) ax.set_xlabel(f"$k_\\mathrm{{{q1.name}}}$ / m$^{{-2}}$") ax.set_ylabel(f"$k_\\mathrm{{{q2.name}}}$ / m$^{{-2}}$") ax.set_title(f"cell length: {fodo.length} m") colorbar = fig.colorbar(image, ax=ax) colorbar.ax.set_title(r"$\beta_\mathrm{mean}$", fontsize=12, pad=10) plt.tight_layout() plt.savefig(figure_path / "necktie-plot.svg")
andreasfelix/master-thesis
code/lattice-design/fodo/necktie_plot.py
necktie_plot.py
py
1,796
python
en
code
0
github-code
6
[ { "api_name": "numpy.pi", "line_number": 8, "usage_type": "attribute" }, { "api_name": "fodo.make_fodo", "line_number": 9, "usage_type": "call" }, { "api_name": "apace.Twiss", "line_number": 11, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.subplots", ...
39159138446
from numpy import squeeze, real, mean, pi, float16, array, float16, reshape, float32 from scipy import ndimage as ndi from skimage.filters import gabor_kernel from skimage.feature import hog from skimage import feature import cv2 import numpy as np from skimage.transform import rescale, resize, downscale_local_mean import pywt def hogFeature(normalizedIrisPatch, regions): # regions: [(x1, x2), (x3, x4), (x5, x6), ...] upperCutHeight = 10 # HOG Features hogFea = [] for reg in regions: croppedImage = normalizedIrisPatch[upperCutHeight:, reg[0]:reg[1]] hog_cur = hog(croppedImage, orientations=6, pixels_per_cell=(32, 32), cells_per_block=(1, 1)) hog_cur = array(hog_cur, float32) hogFea.append(hog_cur) hogFea = array(hogFea, dtype=float32) hogFea = reshape(hogFea, (hogFea.shape[0] * hogFea.shape[1],1)) hogFea = hogFea.tolist() return hogFea def lbpFeature(normalizedIrisPatch, regions): # regions: [(x1, x2), (x3, x4), (x5, x6), ...] P = 16 upperCutHeight = 10 # LBP Features lbpFea = [] for reg in regions: croppedImage = normalizedIrisPatch[upperCutHeight:, reg[0]:reg[1]] lbp = feature.local_binary_pattern(croppedImage, 16, 2, method='uniform') hist, _ = np.histogram(lbp, normed=True, bins=P + 2, range=(0, P + 2)) lbpFea.append(hist) lbpFea = array(lbpFea, dtype=float32) lbpFea = reshape(lbpFea, (lbpFea.shape[0] * lbpFea.shape[1],1)) lbpFea = lbpFea.tolist() return lbpFea def gaborFeature(normalizedIrisPatch, regions): # regions: [(x1, x2), (x3, x4), (x5, x6), ...] upperCutHeight = 10 # Gabor Features kernels = [] freqs = [0.1, 0.2, 0.3, 0.4, 0.5] nTheta = 8 for theta in range(nTheta): theta = theta / float16(nTheta) * pi sigma = 1 for frequency in freqs: kernel = real(gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma)) kernels.append(kernel) gaborFea = [] for reg in regions: croppedImage = normalizedIrisPatch[upperCutHeight:, reg[0]:reg[1]] gaborFea_cur = [] for k, kernel in enumerate(kernels): filteredIris = ndi.convolve(croppedImage, kernel, mode='wrap') gaborFea_cur.append(mean(filteredIris * filteredIris)) gaborFea_cur = array(gaborFea_cur, float32) gaborFea.append(gaborFea_cur) gaborFea = array(gaborFea, dtype=float32) gaborFea = reshape(gaborFea, (gaborFea.shape[0] * gaborFea.shape[1],1)) gaborFea =gaborFea.tolist() return gaborFea def extract_image_feature(image, regions, downSampleSize): # regions: [(x1, x2), (x3, x4), (x5, x6), ...] upperCutHeight = 10 # Pixel Features pixelFea = [] for reg in regions: croppedImage = image[upperCutHeight:, reg[0]:reg[1]] downSampledReg = rescale(croppedImage, 1.0 / float16(downSampleSize), preserve_range=True) pixelFea.append(reshape(downSampledReg, (downSampledReg.shape[0]*downSampledReg.shape[1],))) pixelFea = array(pixelFea, dtype=float32) pixelFea = reshape(pixelFea, (pixelFea.shape[0]*pixelFea.shape[1], 1)) pixelFea = pixelFea.tolist() return pixelFea
NaghmeNazer/diabetes-iridology
featureExtraction.py
featureExtraction.py
py
3,245
python
en
code
6
github-code
6
[ { "api_name": "skimage.feature.hog", "line_number": 19, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": 20, "usage_type": "call" }, { "api_name": "numpy.float32", "line_number": 20, "usage_type": "argument" }, { "api_name": "numpy.array", "...
7233973656
import os import json import numpy as np from ortools.sat.python import cp_model def solve(all_block, wafer_width, wafer_height): ### wafer sampling # Number of blocks n = len(all_block) # wafer Variables all_wafer_x_st, all_wafer_y_st, all_wafer_x_ed, all_wafer_y_ed, sampled = [], [], [], [], [] for i, block in enumerate(all_block): if block['x'] == None: all_wafer_x_st.append(model.NewIntVar(0, wafer_width - block['w'], f"wafer_x{i}")) else: all_wafer_x_st.append(model.NewIntVar(block['x'], block['x'], f"wafer_x{i}")) all_wafer_x_ed.append(model.NewIntVar(-wafer_width, wafer_width, f"wafer_x_end{i}")) if block['y'] == None: all_wafer_y_st.append(model.NewIntVar(0, wafer_height - block['h'], f"wafer_y{i}")) else: all_wafer_y_st.append(model.NewIntVar(block['y'], block['y'], f"wafer_y{i}")) all_wafer_y_ed.append(model.NewIntVar(-wafer_height, wafer_height, f"wafer_y_end{i}")) if block['x'] == None and block['y'] == None: sampled.append(model.NewBoolVar(f"sampled_{i}")) else: sampled.append(model.NewConstant(1)) # wafer width & height constraints for i, block in enumerate(all_block): model.Add(all_wafer_x_ed[i] == all_wafer_x_st[i] + block['w']).OnlyEnforceIf(sampled[i]) model.Add(all_wafer_y_ed[i] == all_wafer_y_st[i] + block['h']).OnlyEnforceIf(sampled[i]) model.Add(all_wafer_x_ed[i] <= wafer_width).OnlyEnforceIf(sampled[i]) model.Add(all_wafer_y_ed[i] <= wafer_height).OnlyEnforceIf(sampled[i]) # wafer Non-overlapping constraints for i in range(n): for j in range(i + 1, n): wafer_bx_ij = model.NewBoolVar(f"wafer_bx_{i}_{j}") wafer_bx_ji = model.NewBoolVar(f"wafer_bx_{j}_{i}") wafer_by_ij = model.NewBoolVar(f"wafer_by_{i}_{j}") wafer_by_ji = model.NewBoolVar(f"wafer_by_{j}_{i}") model.Add(all_wafer_x_ed[i] <= all_wafer_x_st[j] + wafer_width * wafer_bx_ij) model.Add(all_wafer_x_ed[j] <= all_wafer_x_st[i] + wafer_width * wafer_bx_ji) model.Add(all_wafer_y_ed[i] <= all_wafer_y_st[j] + wafer_height * wafer_by_ij) model.Add(all_wafer_y_ed[j] <= all_wafer_y_st[i] + wafer_height * wafer_by_ji) model.AddBoolOr([wafer_bx_ij.Not(), wafer_bx_ji.Not(), wafer_by_ij.Not(), wafer_by_ji.Not()]) ### place to sample panel panel_width = 6 panel_height = 6 # panel Variables all_panel_x_st, all_panel_y_st, all_panel_x_ed, all_panel_y_ed, on_panel = [], [], [], [], [] for i, block in enumerate(all_block): all_panel_x_st.append(model.NewIntVar(0, panel_width - block['w'], f"panel_x{i}")) all_panel_x_ed.append(model.NewIntVar(-panel_width, panel_width, f"panel_x_end{i}")) all_panel_y_st.append(model.NewIntVar(0, panel_height - block['h'], f"panel_y{i}")) all_panel_y_ed.append(model.NewIntVar(-panel_height, panel_height, f"panel_y_end{i}")) # on_panel.append(model.NewBoolVar(f"on_panel_{i}")) # panel width & height constraints for i, block in enumerate(all_block): model.Add(all_panel_x_ed[i] == all_panel_x_st[i] + block['w']).OnlyEnforceIf(sampled[i]) model.Add(all_panel_y_ed[i] == all_panel_y_st[i] + block['h']).OnlyEnforceIf(sampled[i]) model.Add(all_panel_x_ed[i] <= panel_width).OnlyEnforceIf(sampled[i]) model.Add(all_panel_y_ed[i] <= panel_height).OnlyEnforceIf(sampled[i]) # panel Non-overlapping constraints for i in range(n): for j in range(i + 1, n): panel_bx_ij = model.NewBoolVar(f"panel_bx_{i}_{j}") panel_bx_ji = model.NewBoolVar(f"panel_bx_{j}_{i}") panel_by_ij = model.NewBoolVar(f"panel_by_{i}_{j}") panel_by_ji = model.NewBoolVar(f"panel_by_{j}_{i}") model.Add(all_panel_x_ed[i] <= all_panel_x_st[j] + panel_width * panel_bx_ij) model.Add(all_panel_x_ed[j] <= all_panel_x_st[i] + panel_width * panel_bx_ji) model.Add(all_panel_y_ed[i] <= all_panel_y_st[j] + panel_height * panel_by_ij) model.Add(all_panel_y_ed[j] <= all_panel_y_st[i] + panel_height * panel_by_ji) model.AddBoolOr([panel_bx_ij.Not(), panel_bx_ji.Not(), panel_by_ij.Not(), panel_by_ji.Not()]) # panel must be filled by blocks model.Add(sum(sampled[i] * block['w'] * block['h'] for i, block in enumerate(all_block)) == panel_width * panel_height) # Objective function wafer_area = wafer_width * wafer_height blocks_area = model.NewIntVar(0, wafer_area, "blocks_area") model.Add( blocks_area == sum( sampled[i] * block['w'] * block['h'] for i, block in enumerate(all_block))) num_blocks_sampled = model.NewIntVar(0, n, "num_blocks_sampled") model.Add(num_blocks_sampled == sum(sampled[i] for i, block in enumerate(all_block))) scale = 1000000 # wafer_coverage wafer_coverage = model.NewIntVar(0, 1 * scale, "wafer_coverage") model.AddDivisionEquality(wafer_coverage, blocks_area * scale, wafer_area) # block utilization block_utilization = model.NewIntVar(0, 1 * scale, "block_utilization") model.AddDivisionEquality( block_utilization, num_blocks_sampled * scale, n) # model.Maximize(wafer_coverage * (1 / scale) - # block_utilization * (1 / scale)) # model.Maximize(wafer_coverage * (1 / scale)) model.Maximize(-block_utilization * (1 / scale)) # Solve the model solver = cp_model.CpSolver() status = solver.Solve(model) # Print if status == cp_model.OPTIMAL: wafer_positions = [(solver.Value(all_wafer_x_st[i]), solver.Value(all_wafer_y_st[i])) for i in range(n)] panel_positions = [(solver.Value(all_panel_x_st[i]), solver.Value(all_panel_y_st[i])) for i in range(n)] print(f"wafer_positions: {wafer_positions}\n" f"panel_positions: {panel_positions}\n" f"num_blocks_sampled: {solver.Value(num_blocks_sampled)}\n" f"sampled: {[solver.Value(sampled[i]) for i in range(n)]}\n" f"wafer_coverage: {solver.Value(wafer_coverage) / scale}\n" f"block_utilization: {solver.Value(block_utilization) / scale}\n" f"objective: {solver.ObjectiveValue()}") all_block_sampled = [] for i, block in enumerate(all_block): if not solver.Value(sampled[i]): continue block['x'] = solver.Value(all_wafer_x_st[i]) block['y'] = solver.Value(all_wafer_y_st[i]) all_block_sampled.append(block) result = {} result['width'] = wafer_width result['height'] = wafer_height result["block"] = all_block_sampled with open(os.path.join(result_path, file_name), 'w') as fp: json.dump(result, fp, indent=4) elif cp_model.INFEASIBLE: print("INFEASIBLE") if __name__ == "__main__": data_path = "block_data" result_path = "result" # file_name = "0004.json" file_name = "0005.json" # file_name = "0001_d=10.json" # Create the model model = cp_model.CpModel() with open(os.path.join(data_path, file_name), 'r') as fp: data = json.load(fp) wafer_width = data["width"] wafer_height = data["height"] all_block = data["block"] solve(all_block, wafer_width, wafer_height)
Jerry-Github-Cloud/OR-Tools-Code
AdvacneProcess/advance_process_1.py
advance_process_1.py
py
7,607
python
en
code
0
github-code
6
[ { "api_name": "ortools.sat.python.cp_model.CpSolver", "line_number": 121, "usage_type": "call" }, { "api_name": "ortools.sat.python.cp_model", "line_number": 121, "usage_type": "name" }, { "api_name": "ortools.sat.python.cp_model.OPTIMAL", "line_number": 125, "usage_type"...
650430067
#! /bin/python import os import sys import json import numpy as np import luigi import vigra import nifty import nifty.tools as nt import cluster_tools.utils.volume_utils as vu import cluster_tools.utils.function_utils as fu from cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask # # Orphan Filter Tasks # class OrphanAssignmentsBase(luigi.Task): """ OrphanAssignments base class """ task_name = 'orphan_assignments' src_file = os.path.abspath(__file__) allow_retry = False graph_path = luigi.Parameter() graph_key = luigi.Parameter() assignment_path = luigi.Parameter() assignment_key = luigi.Parameter() output_path = luigi.Parameter() output_key = luigi.Parameter() relabel = luigi.BoolParameter(default=False) # dependency = luigi.TaskParameter() def requires(self): return self.dependency def run_impl(self): # get the global config and init configs shebang = self.global_config_values()[0] self.init(shebang) # load the task config config = self.get_task_config() # update the config with input and graph paths and keys # as well as block shape config.update({'assignment_path': self.assignment_path, 'assignment_key': self.assignment_key, 'graph_path': self.graph_path, 'graph_key': self.graph_key, 'output_path': self.output_path, 'output_key': self.output_key, 'relabel': self.relabel}) n_jobs = 1 # prime and run the jobs self.prepare_jobs(n_jobs, None, config) self.submit_jobs(n_jobs) # wait till jobs finish and check for job success self.wait_for_jobs() self.check_jobs(n_jobs) class OrphanAssignmentsLocal(OrphanAssignmentsBase, LocalTask): """ OrphanAssignments on local machine """ pass class OrphanAssignmentsSlurm(OrphanAssignmentsBase, SlurmTask): """ OrphanAssignments on slurm cluster """ pass class OrphanAssignmentsLSF(OrphanAssignmentsBase, LSFTask): """ OrphanAssignments on lsf cluster """ pass # # Implementation # def orphan_assignments(job_id, config_path): fu.log("start processing job %i" % job_id) fu.log("reading config from %s" % config_path) # get the config with open(config_path) as f: config = json.load(f) # load from config assignment_path = config['assignment_path'] assignment_key = config['assignment_key'] graph_path = config['graph_path'] graph_key = config['graph_key'] output_path = config['output_path'] output_key = config['output_key'] relabel = config['relabel'] n_threads = config.get('threads_per_job', 1) # load the uv-ids and assignments with vu.file_reader(graph_path) as f: ds = f['%s/edges' % graph_key] ds.n_threads = n_threads uv_ids = ds[:] with vu.file_reader(assignment_path) as f: ds = f[assignment_key] ds.n_threads = n_threads chunks = ds.chunks assignments = ds[:] n_new_nodes = int(assignments.max()) + 1 # find the new uv-ids edge_mapping = nt.EdgeMapping(uv_ids, assignments, numberOfThreads=n_threads) new_uv_ids = edge_mapping.newUvIds() # find all orphans = segments that have node degree one ids, node_degrees = np.unique(new_uv_ids, return_counts=True) orphans = ids[node_degrees == 1] n_orphans = len(orphans) fu.log("Found %i orphans of %i clusters" % (n_orphans, n_new_nodes)) # make graph for fast neighbor search graph = nifty.graph.undirectedGraph(n_new_nodes) graph.insertEdges(new_uv_ids) orphan_assignments = np.array([next(graph.nodeAdjacency(orphan_id))[0] for orphan_id in orphans],) assert len(orphan_assignments) == n_orphans, "%i, %i" % (len(orphan_assignments), n_orphans) assignments[orphans] = orphan_assignments.astype('uint64') if relabel: vigra.analysis.relabelConsecutive(assignments, out=assignments, start_label=1, keep_zeros=True) with vu.file_reader(output_path) as f: ds = f.require_dataset(output_key, shape=assignments.shape, chunks=chunks, compression='gzip', dtype='uint64') ds[:] = assignments fu.log_job_success(job_id) if __name__ == '__main__': path = sys.argv[1] assert os.path.exists(path), path job_id = int(os.path.split(path)[1].split('.')[0].split('_')[-1]) orphan_assignments(job_id, path)
constantinpape/cluster_tools
cluster_tools/postprocess/orphan_assignments.py
orphan_assignments.py
py
4,673
python
en
code
32
github-code
6
[ { "api_name": "luigi.Task", "line_number": 22, "usage_type": "attribute" }, { "api_name": "os.path.abspath", "line_number": 27, "usage_type": "call" }, { "api_name": "os.path", "line_number": 27, "usage_type": "attribute" }, { "api_name": "luigi.Parameter", "l...
26070327512
import re from validate_email import validate_email TAG_RE = re.compile(r'<[^>]+>') def remove_tags(text): return TAG_RE.sub('', text) class Email(): EMAIL_FIELDS = ["to", "from"] FIELDS = "to to_name from from_name subject body".split() def __init__(self, raw_data): self.populate_fields(raw_data) self.validate_emails() self.sanitize_body() def populate_fields(self, raw_data): fields = "to to_name from from_name subject body".split() for key in self.FIELDS: if raw_data.has_key(key) and isinstance(raw_data[key], basestring): setattr(self, key, str(raw_data[key])) else: raise Exception("Error, invalid data for '{}'.".format(key)) def validate_emails(self): for field in self.EMAIL_FIELDS: address = getattr(self, field) if not validate_email(address): raise Exception("Error, invalid email '{}'".format(address)) def sanitize_body(self): self.body = remove_tags(self.body)
jasonwang0/email-service
lib/email.py
email.py
py
976
python
en
code
0
github-code
6
[ { "api_name": "re.compile", "line_number": 4, "usage_type": "call" }, { "api_name": "validate_email.validate_email", "line_number": 28, "usage_type": "call" } ]
14095916252
#!/usr/bin/env python # coding: utf-8 # In[9]: import json with open('file1.json','r') as a: data1 = a.read() obj1 = json.loads(data1) with open('file2.json','r') as a: data2 = a.read() obj2 = json.loads(data2) dlt = {i: obj1[i] for i in obj1 if i in obj2 and obj1[i] != obj2[i]} if len(dlt): print ("Есть различие!\nJSON 1 | JSON 2") for key, value in dlt.items(): print (key, "->", value, '|',key, "->", obj2[key]) # In[ ]:
Ventelj/Test-Task
test.py
test.py
py
471
python
en
code
0
github-code
6
[ { "api_name": "json.loads", "line_number": 10, "usage_type": "call" }, { "api_name": "json.loads", "line_number": 13, "usage_type": "call" } ]
12960757699
from popsycle import synthetic import numpy as np import matplotlib.pyplot as plt from astropy.table import Table import h5py def test_h5_output(extra_col= True): """" Parameters ---------- extra_col : boolean, defaults to False Tells the code whether or not the new h5 file will have additional columns (ie does the new version of popsycle give more information than before """ #find the test files test_data_dir = '/u/samrose/scratch/test_files/' ebf_file= test_data_dir + 'h5_reference.ebf' reference_h5_file= test_data_dir + 'h5_reference.h5' #create the new h5 file by running popsycle synthetic.perform_pop_syn(ebf_file = ebf_file, output_root = 'test', iso_dir = '/u/casey/scratch/work/microlens/popsycle_test/isochrones/', bin_edges_number = None, overwrite = True, seed=42); #read in the data from the reference h5 file hfr = h5py.File(reference_h5_file, 'r') ref_dset = np.concatenate((hfr['l0b0'], hfr['l0b1'], hfr['l1b0'], hfr['l1b1']), axis=1) hfr.close() #read in the data from the test h5 file created by popsycle hft = h5py.File('test.h5', 'r') test_dset = np.concatenate((hft['l0b0'], hft['l0b1'], hft['l1b0'], hft['l1b1']), axis=1) hft.close() #see if we have the right number of columns if test_dset.shape[0] != ref_dset.shape[0] and not extra_col: assert test_dset.shape[0] == ref_dset.shape[0], "the h5 files are not the same size. Run again with extra_col=True if you have added columns)" #test to see whether the files are the same matched_col=0 #initialize matched_col counter for i in range(0, ref_dset.shape[0]): test_col = test_dset[i,:] ref_col = ref_dset[i, :] if test_col.all() == ref_col.all(): matched_col = matched_col+1 #check to see if disagreements are because of nans else: bad_idxs = np.where(ref_col != test_col) ref_nan_idx = np.where(ref_col == np.nan) test_nan_idx = np.where(test_col == np.nan) if test_nan_idx.all() == ref_nan_idx.all() and bad_idxs.all() == ref_nan_idx.all(): matched_col = matched_col+1 else: matched_col= matched_col assert test_nan_idx.all() == ref_nan_idx.all(), "Files do not have nan values at the same indices" assert bad_idxs.all() == ref_nan_idx.all(), "Coulumns disagree at non-nan values" assert matched_col == ref_dset.shape[0], "The new test h5 file does not match the reference file!" return
jluastro/PopSyCLE
popsycle/tests/test_h5_output.py
test_h5_output.py
py
2,763
python
en
code
13
github-code
6
[ { "api_name": "popsycle.synthetic.perform_pop_syn", "line_number": 23, "usage_type": "call" }, { "api_name": "popsycle.synthetic", "line_number": 23, "usage_type": "name" }, { "api_name": "h5py.File", "line_number": 29, "usage_type": "call" }, { "api_name": "numpy...
24200669844
import logging import threading import types from collections import namedtuple from hashlib import sha256 from time import sleep, time from goTenna.payload import BinaryPayload, CustomPayload from termcolor import colored import config from utilities import de_segment, naturalsize logger = logging.getLogger("MSGS") mesh_logger = logging.getLogger("MESH") def handle_message(conn, queue): """ Handle messages received over the mesh network :param conn: the lntenna.gotenna.Connection instance :param queue: a queue.Queue() containing messages :return: result of message handling """ while True: if queue.empty(): sleep(0.15) else: message = queue.get().message if isinstance(message.payload, CustomPayload): print(message) elif isinstance(message.payload, BinaryPayload): payload = message.payload._binary_data digest = sha256(payload).hexdigest() conn.bytes_received += len(payload) if config.DEBUG: mesh_logger.info( colored( f"Received {naturalsize(len(payload))} - {digest}", "cyan" ) ) else: mesh_logger.info( colored(f"Received {naturalsize(len(payload))}", "cyan") ) if not payload[0:4] in config.VALID_MSGS: logger.error( "Message magic not found in VALID_MSGS. Discarding message" ) return conn.events.send_via_socket.put(payload[4:]) else: payload = message.payload.message # test for jumbo: jumbo = True if payload.startswith("sm/") else False if jumbo: handle_jumbo_message(conn, message) return else: logger.error("Unhandled payload type received:") logger.error(payload) def handle_jumbo_message(conn, message): """Handle a jumbo message received. """ payload = message.payload.message # TODO: this cuts out all sender and receiver info -- ADD SENDER GID logger.info(f"Received jumbo message fragment") prefix, seq, length, msg = payload.split("/") # if a jumbo monitor thread is not running, start one if conn.jumbo_thread.is_alive(): pass else: conn.events.jumbo_len = length conn.jumbo_thread = None conn.jumbo_thread = threading.Thread( target=monitor_jumbo_msgs, daemon=True, args=[conn] ) conn.jumbo_thread.start() # add the message to the events.jumbo queue conn.events.jumbo.append(payload) return def monitor_jumbo_msgs(conn, timeout=210): logger.debug("Starting jumbo message monitor thread") start = time() missing = True while True and time() < start + timeout: # logger.info( # f"received: {len(conn.events.jumbo)} of {conn.events.jumbo_len} " # f"jumbo messages" # ) if ( len(conn.events.jumbo) == int(conn.events.jumbo_len) and len(conn.events.jumbo) is not 0 ): missing = False # give handle_message the attributes it expects jumbo_message = types.SimpleNamespace() jumbo_message.payload = types.SimpleNamespace() # reconstruct the jumbo message jumbo_message.payload.message = de_segment(conn.events.jumbo) # send it back through handle_message logger.info(f"Jumbo message payload reconstituted") handle_message(conn, jumbo_message) break sleep(0.2) # reset jumbo events after timeout conn.events.init_jumbo() if missing: logger.error( "Did not receive all jumbo messages require for re-assembly. " "Please request the message again from the remote host." ) return """ Message Structure: Size | Description ----------------------- 4 | Magic / Protocol 16 | Host 2 | Port 4 | Checksum / Peer (ID) This will associate this checksum (peer) with this ip address/port configuration, for this protocol. Future messages must all be prefixed with `Checksum`. Messages not prefixed with a valid Magic or Checksum will be discarded. """ # checksums = {} # Peer = namedtuple("Peer", ["host", "port", "protocol"]) # # # def handle_binary_msg(msg): # # throw away the message if it's not in magic or the checksum DB # prefix = msg[0:4] # if prefix not in MAGIC and checksums: # print(f"Message prefix unknown: {msg[0:4]}") # return # # if prefix in MAGIC: # if not len(msg) == 26: # print(f"Invalid message length for magic negotiation: {len(msg)}") # return # # add the host, port, protocol to the peer's entry in checksums # checksums[prefix] = Peer(msg[4:20], msg[20:22], msg[0:4]) # print(f"Peer {prefix} added to in-memory peer dictionary") # # elif prefix in checksums: # # if ltng protocol, just strip the header and return it for now # if checksums[prefix] == b"ltng": # print(f"Peer {prefix}'s message stripped and returned") # return msg[4:]
willcl-ark/lightningtenna
lightningtenna/messages.py
messages.py
py
5,494
python
en
code
10
github-code
6
[ { "api_name": "logging.getLogger", "line_number": 15, "usage_type": "call" }, { "api_name": "logging.getLogger", "line_number": 16, "usage_type": "call" }, { "api_name": "time.sleep", "line_number": 28, "usage_type": "call" }, { "api_name": "goTenna.payload.Custom...
24998792911
import time from osv import osv from osv import fields from tools import config from tools.translate import _ from datetime import datetime from datetime import timedelta class hr_payroll_declar(osv.osv): ''' Decleration Form ''' _name = 'hr.payroll.declare' _description = 'Decleration Form' _columns = { 'name':fields.char('Name', size=1024, required=False), 'company_id':fields.many2one('res.company', 'Company', required=True), 'employee_id':fields.many2one('hr.employee', 'Employee', required=True), 'income_sal': fields.float('Income by Salary', digits=(16, int(config['price_accuracy'])), readonly=True), 'income_ids':fields.one2many('hr.payroll.declare.line', 'income_id', 'Source Of Income', required=False), 'investment_ids':fields.one2many('hr.payroll.declare.line', 'invest_id', 'Investments', required=False), 'claim_ids':fields.one2many('hr.payroll.declare.line', 'claim_id', 'Allowance to Claime', required=False), 'date': fields.date('Date'), 'income': fields.float('Taxable Income', digits=(16, int(config['price_accuracy'])), readonly=True), 'investment': fields.float('Total Investment', digits=(16, int(config['price_accuracy'])), readonly=True), 'claims': fields.float('Total Allowance Claims', digits=(16, int(config['price_accuracy'])), readonly=True), 'state':fields.selection([ ('draft','Draft'), ('pending','Waiting for Review'), ('pending','Approved by HR'), ('done','Confirm'), ],'State', select=True, readonly=True), 'note': fields.text('Description'), } def get_basic(self, cr, uid, ids, context): res = {} for rs in self.browse(cr, uid, ids, context): period_id = self.pool.get('account.period').search(cr,uid,[('date_start','<=',time.strftime('%Y-%m-%d')),('date_stop','>=',time.strftime('%Y-%m-%d'))])[0] fiscalyear_id = self.pool.get('account.period').browse(cr, uid, period_id).fiscalyear_id sql_req= ''' SELECT c.id as id, c.wage as wage, function as function, c.date_start as start, c.date_end as end FROM hr_contract c LEFT JOIN hr_employee emp on (c.employee_id=emp.id) LEFT JOIN hr_contract_wage_type cwt on (cwt.id = c.wage_type_id) LEFT JOIN hr_contract_wage_type_period p on (cwt.period_id = p.id) WHERE (emp.id=%s) AND (date_start >= %s) AND (date_end IS NULL OR date_end <= %s) ''' cr.execute(sql_req, (rs.employee_id.id, fiscalyear_id.date_start, fiscalyear_id.date_stop)) contracts = cr.dictfetchall() if not contracts: raise osv.except_osv(_('Contract Error !'), _('No Contract Defined for : %s ' % (rs.employee_id.name))) total = 0.0 line_ids = [] for lines in rs.claim_ids: line_ids += [lines.head_id.id] for ct in contracts: allow = 0.0 d1 = ct['start'] d2 = ct['end'] or fiscalyear_id.date_stop td = datetime.fromtimestamp(time.mktime(time.strptime(d2, '%Y-%m-%d'))) - datetime.fromtimestamp(time.mktime(time.strptime(d1, '%Y-%m-%d'))) total += (td.days / 30) * ct['wage'] # ct = self.pool.get('hr.contract').browse(cr, uid, ct['id']) # for line in ct.function.line_ids: # if line.category_id.id in line_ids: # if line.amount_type == 'fix': # allow += (td.days / 30) * line.amount # elif line.amount_type == 'per': # allow += (total * line.amount) # print 'XXXXXXXXXXXXXXXXXXXXXXX : ', line.name, allow res[rs.id] = total return res def write(self, cr, user, ids, vals, context=None): res = self.get_basic(cr, user, ids, context) for id in ids: vals['income_sal'] = res[id] super(hr_payroll_declar, self).write(cr, user, [id], vals, context) return res hr_payroll_declar() class hr_payroll_declare_line(osv.osv): ''' Decleration Line ''' _name = 'hr.payroll.declare.line' _description = 'Decleration Line' def _function_call(self, cr, uid, ids, field_names, arg, context={}): res = {} for rs in self.browse(cr, uid, ids, context): val = 0.0 if rs.income_id: pass elif rs.invest_id: pass elif rs.claim_id: if rs.head_id.calc_type == 'min_max': if rs.amount < rs.head_id.min: val = rs.head_id.min elif rs.amount >= rs.head_id.min and rs.amount <= rs.head_id.max: val = rs.amount elif rs.amount > rs.head_id.max: val = rs.head_id.max res[rs.id] = val return res _columns = { 'name':fields.char('Name', size=64, required=False), 'note': fields.text('Description'), 'income_id':fields.many2one('hr.payroll.declare', 'Income', required=False), 'invest_id':fields.many2one('hr.payroll.declare', 'Investment', required=False), 'claim_id':fields.many2one('hr.payroll.declare', 'Allowance Claims', required=False), 'amount': fields.float('Amount', digits=(16, int(config['price_accuracy']))), 'allow': fields.float('Allowence', digits=(16, int(config['price_accuracy']))), 'allow_amount': fields.function(_function_call, method=True, type='float', digits=(16, int(config['price_accuracy'])), string='Allow Amount'), 'head_id':fields.many2one('hr.allounce.deduction.categoty', 'Allowance / Deduction', required=True), } hr_payroll_declare_line() class payment_category(osv.osv): ''' Allowance Deduction Categoty ''' _inherit = 'hr.allounce.deduction.categoty' _columns = { 'calc_type':fields.selection([ ('min_max','Min / Max'), ('stmt','List of Calculations'), ('range','Selection from Range'), ],'Calculation Type', select=True, readonly=False), 'min': fields.float('Min Value', digits=(16, int(config['price_accuracy']))), 'max': fields.float('Max Value', digits=(16, int(config['price_accuracy']))), 'stmt_ids':fields.one2many('hr.payroll.declare.stmt', 'category_id', 'Functions', required=False), 'stmt_select':fields.selection([ ('min','Minimum'), ('max','Maximum'), ('avg','Average'), ],'Selection Method', select=True, readonly=False), } _defaults = { 'stmt_select': lambda *a: 'min', 'calc_type': lambda *a: 'min_max' } payment_category() class payment_stmt(osv.osv): ''' Open ERP Model ''' _name = 'hr.payroll.declare.stmt' _description = 'Payroll Calculations' _columns = { 'category_id':fields.many2one('hr.allounce.deduction.categoty', 'Category', required=True), 'name':fields.char('Expression', size=1024, required=True, readonly=False), 'sequence': fields.integer('Sequence'), 'active':fields.boolean('Active', required=False), } _defaults = { 'sequence': lambda *a: 5, 'active': lambda *a: True } payment_stmt()
factorlibre/openerp-extra-6.1
hr_payroll_declare/hr_payroll_declare.py
hr_payroll_declare.py
py
7,785
python
en
code
9
github-code
6
[ { "api_name": "osv.osv.osv", "line_number": 10, "usage_type": "attribute" }, { "api_name": "osv.osv", "line_number": 10, "usage_type": "name" }, { "api_name": "osv.fields.char", "line_number": 18, "usage_type": "call" }, { "api_name": "osv.fields", "line_numbe...
11370435084
import torch import torchvision import gym import random import torch.nn as nn import torch from torch.autograd import Variable import torch.autograd as autograd import torch.nn.functional as F import gym import random import heapq from gym.envs.registration import register register( id='FrozenLakeNotSlippery-v0', entry_point='gym.envs.toy_text:FrozenLakeEnv', kwargs={'map_name': '4x4', 'is_slippery': False}, max_episode_steps=100, reward_threshold=0.78, # optimum = .8196 ) # env = gym.make('FrozenLake8x8-v0') # env = gym.make('FrozenLake-v0') env = gym.make('FrozenLakeNotSlippery-v0') env.render() use_cuda = torch.cuda.is_available() FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor Tensor = FloatTensor class value_net(nn.Module): def __init__(self): super(value_net, self).__init__() bias_on = True self.linear1 = nn.Linear(16, 20, bias=bias_on) self.linear2 = nn.Linear(20, 40, bias=bias_on) self.linear3 = nn.Linear(40, 1, bias=bias_on) # self.dropout = nn.Dropout(p=0.5) def forward(self, x): # --- 0000 ---- 0000 >>> z-score normalization x = self.linear1(x) x_avg = torch.sum(x) / 20 x_minus_x_avg = x - x_avg x_std = torch.sum(torch.pow(x_minus_x_avg, 2)) / 20 epsilon = 0.0000001 x_norm = (x_minus_x_avg) / (torch.sqrt(x_std) + epsilon) x = torch.tanh(x_norm) x = self.linear2(x) # x_avg = torch.sum(x) / 40 # x_minus_x_avg = x - x_avg # x_std = torch.sum(torch.pow(x_minus_x_avg, 2)) / 40 # x_norm = (x_minus_x_avg) / (torch.sqrt(x_std) + epsilon) x = torch.tanh(x) x = self.linear3(x) return x.view(-1, 1) class policy_net(nn.Module): def __init__(self): super(policy_net, self).__init__() bias_on = True self.linear1 = nn.Linear(16, 20, bias=bias_on) self.linear2 = nn.Linear(20, 40, bias=bias_on) self.linear3 = nn.Linear(40, 4, bias=bias_on) # self.dropout = nn.Dropout(p=0.5) def forward(self, x): # --- 0000 ---- 0000 >>> z-score normalization x = self.linear1(x) x_avg = torch.sum(x) / 20 x_minus_x_avg = x - x_avg x_std = torch.sum(torch.pow(x_minus_x_avg, 2)) / 20 epsilon = 0.0000001 x_norm = (x_minus_x_avg) / (torch.sqrt(x_std) + epsilon) x = torch.tanh(x_norm) x = self.linear2(x) # x_avg = torch.sum(x) / 40 # x_minus_x_avg = x - x_avg # x_std = torch.sum(torch.pow(x_minus_x_avg, 2)) / 40 # x_norm = (x_minus_x_avg) / (torch.sqrt(x_std) + epsilon) x = torch.tanh(x) x = self.linear3(x) return x.view(-1, 4) from collections import namedtuple Transition = namedtuple('Transition', ('state', 'action', 'log_prob', 'action_prob', 'log_action_prob', 'next_state', 'reward', 'entropy_impact', 'done')) class ReplayMemory(object): def __init__(self, capacity): self.capacity = capacity self.memory = [] self.position = 0 def push(self, *args): """Saves a transition.""" if len(self.memory) < self.capacity: self.memory.append(None) self.memory[self.position] = Transition(*args) self.position = (self.position + 1) % self.capacity def sample(self, batch_size): return random.sample(self.memory, batch_size) def __len__(self): return len(self.memory) class ReplayMemoryNoReplacement(object): def __init__(self, capacity): self.h = [] def push(self, *args): random_index = random.random() heapq.heappush(self.h, (random_index, Transition(*args))) def sample(self, batch_size): result = [] for i in range(batch_size): result.append(heapq.heappop(self.h)[1]) return result def __len__(self): return len(self.h) class ReplayMemoryNew(object): def __init__(self, capacity): self.h = [] self.capacity = capacity def push(self, *args): tran = Transition(*args) self.push_transition(tran) def push_transition(self, tran): if self.capacity <= len(self.h): heapq.heappop(self.h) random_index = random.random() heapq.heappush(self.h, (random_index, tran)) def sample(self, batch_size): result = [] for i in range(batch_size): el = heapq.heappop(self.h)[1] result.append(el) heapq.heappush(self.h, (random.random(), el)) return result def __len__(self): return len(self.h) def print_v_table(): for i in range(16): # st = np.array(get_state_repr(i)) # st = np.expand_dims(st, axis=0) st = get_state_repr(i) v_net.eval() action_probs = v_net(FloatTensor(st)) # action_probs = F.softmax(action_probs, dim=1) outp = " state (" + str(i) + ") " n = 0 for tensr in action_probs: for cell in tensr: outp = outp + " A[" + str(n) + "]:(" + str(cell.item()) + ")" n += 1 print(outp) def print_pi_table(): for i in range(16): # st = np.array(get_state_repr(i)) # st = np.expand_dims(st, axis=0) st = get_state_repr(i) pi_net.eval() action_probs = pi_net(FloatTensor(st)) action_probs = F.softmax(action_probs, dim=1) outp = " state (" + str(i) + ") " n = 0 for tensr in action_probs: for cell in tensr: outp = outp + " A[" + str(n) + "]:(" + str(cell.item()) + ")" n += 1 print(outp) # def get_state_repr(state_idx): # return state_idx * 13 import gym import numpy as np import torch.optim as optim from torch.distributions import Categorical import random random.seed(1999) import math import torch from torch.optim.lr_scheduler import StepLR # custom weights initialization def weights_init(m): classname = m.__class__.__name__ # print classname # print q_net if classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.02) if not m.bias is None: m.bias.data.normal_(0.0, 0.02) def get_state_repr(state_idx): state = np.zeros(16) state[state_idx] = 1 return state def get_index_repr(state): return np.argwhere(state==1).item() # if gpu is to be used device = torch.device("cuda" if torch.cuda.is_available() else "cpu") BATCH_SIZE = 300 GAMMA = 0.99 TARGET_UPDATE = 1000 PRINT_OUT_TIMES = 1000 ENTROPY_REDUCTION_STEPS = 100000.0 NUM_EPISODES = 10000000 # NUM_STEPS_VALUE_FUNCTION_LEARNS = NUM_EPISODES #NUM_STEPS_VALUE_FUNCTION_LEARNS = (ENTROPY_REDUCTION_STEPS * 1) NUM_STEPS_VALUE_FUNCTION_LEARNS = 1 v_net = value_net() v_net.apply(weights_init) v_net.to(device) target_v_net = value_net() target_v_net.load_state_dict(v_net.state_dict()) target_v_net.to(device) pi_net = policy_net() pi_net.apply(weights_init).to(device) # prepare for optimizer, merge both networks parameters # parameters = set() # for net_ in [v_net, pi_net]: # parameters |= set(net_.parameters()) # optimizer = optim.RMSprop(online_net.parameters(), lr=0.001) # optimizer = optim.Adam(parameters, lr=0.0001) v_optimizer = optim.Adam(v_net.parameters(), lr=0.0001) pi_optimizer = optim.Adam(pi_net.parameters(), lr=0.00001) # scheduler = StepLR(v_optimizer, step_size=10000, gamma=0.5) MEMORY_SIZE = 2000 # memory = ReplayMemoryNoReplacement(MEMORY_SIZE) memory = ReplayMemoryNew(MEMORY_SIZE) # memory = ReplayMemory(MEMORY_SIZE) value_loss_cum = [] def optimize(k): if len(memory) < BATCH_SIZE: return transitions = memory.sample(BATCH_SIZE) # Transpose the batch (see http://stackoverflow.com/a/19343/3343043 for # detailed explanation). batch = Transition(*zip(*transitions)) # Compute a mask of non-final states and concatenate the batch elements # final_mask = torch.tensor(tuple(map(lambda d: d is True,batch.done)), device=device, dtype=torch.bool).unsqueeze(1) # final_mask_list = [d for d in batch.done if d is True] final_mask = torch.tensor(tuple(map(lambda d: get_index_repr(d) in [5,7,11,12], batch.next_state)), device=device, dtype=torch.bool).unsqueeze(1) final_mask_list = [d for d in batch.done if get_index_repr(d) in [5,7,11,12]] # Compute states that are final. # next_state_final_mask = torch.tensor(tuple(map(lambda d: (d) in [5,7,11,12,15], # batch.next_state)), device=device, dtype=torch.uint8).unsqueeze(1) # next_state_finak_list = [d for d in batch.next_state if d in [5,7,11,12,15] ] # Unpack the parameters from the memory state_batch = FloatTensor(batch.state) state_batch = state_batch.view(BATCH_SIZE, 16) next_state_batch = FloatTensor(batch.next_state) next_state_batch = next_state_batch.view(BATCH_SIZE, 16) action_batch = LongTensor(batch.action).view(BATCH_SIZE, 1) reward_batch = Tensor(batch.reward).view(BATCH_SIZE, 1) entropy_impact_batch = FloatTensor(batch.entropy_impact).view(BATCH_SIZE, 1) # log_prob_batch = torch.cat(batch.log_prob).view(BATCH_SIZE, 1) # action_probs_batch = torch.cat(batch.action_prob).view(BATCH_SIZE,4) # log_action_probs_batch = torch.cat(batch.log_action_prob).view(BATCH_SIZE,4) # FIRST , calculate V(next_state)and backpropagate MSE on V target_v_net.eval() v_next = target_v_net(next_state_batch).detach() # v_next[next_state_final_mask] = torch.zeros(len(next_state_finak_list), device=device).view(len(next_state_finak_list)) v_next[final_mask] = torch.zeros(len(final_mask_list), device=device).view(len(final_mask_list)) ##HACK FIXING expected value # v_current_fixed = [get_expected_value_fixed(_st) for _st in batch.state] # v_current_fixed = FloatTensor(v_current_fixed).view(BATCH_SIZE,1) ##HACK FIXING expected value ##HACK FIXING current value # v_next_fixed = [get_expected_value_fixed(_st) for _st in batch.next_state] # v_next_fixed = FloatTensor(v_next_fixed).view(BATCH_SIZE,1) # v_next = v_next_fixed ##HACK FIXING current value expected_value = reward_batch + v_next * GAMMA ##HACK FIXING expected value # expected_value = expected_value_fixed ##HACK FIXING expected value # calculate V(current_state) #if k <= NUM_STEPS_VALUE_FUNCTION_LEARNS: # v_net.train() #else: # v_net.eval() v_net.train() v_current = v_net(state_batch) # backpropagate: value_loss = torch.sum((expected_value - v_current) ** 2) v_optimizer.zero_grad() value_loss.backward() # keep graph for policy net optimizer v_optimizer.step() # if k <= NUM_STEPS_VALUE_FUNCTION_LEARNS: # v_optimizer.zero_grad() # # value_loss.backward(retain_graph=True) # keep graph for policy net optimizer # value_loss.backward() # keep graph for policy net optimizer # v_optimizer.step() # scheduler.step() value_loss_cum.append(value_loss.item()) v_current = v_current.detach() ##HACK FIXING expected value # v_current = v_current_fixed ##HACK FIXING expected value # SECOND, calculate gradient loss: # H(X) = P(X) log ( P(X) ) # calculate the action probability actions_distr = pi_net(state_batch) actions_prob_batch = torch.softmax(actions_distr, dim=1) log_actions_prob_batch = torch.log_softmax(actions_distr, dim=1) action_batch = action_batch action_mask = FloatTensor(BATCH_SIZE, 4).zero_() action_mask.scatter_(1, action_batch, 1) # This will have shape (BATCH_SIZE, 4), and its contents will be # like : [[0,0,1,0],[1,0,0,0],...] # log_prob_batch = log_actions_prob_batch.gather(1,action_batch) log_prob_batch = torch.sum(log_actions_prob_batch * action_mask, dim=1).view(BATCH_SIZE, 1) # sum up across rows (ending tensor is shape (BATCH_SIZE, 1)) entropy = entropy_impact_batch * torch.sum(actions_prob_batch * log_actions_prob_batch) #policy_loss = torch.sum(-log_prob_batch * (expected_value - v_current) + entropy) policy_loss = torch.sum(-log_prob_batch * (expected_value - v_current)) pi_optimizer.zero_grad() policy_loss.backward() pi_optimizer.step() return policy_loss.item(), value_loss.item() score = [] times_trained = 0 times_reach_goal = 0 steps_done = 0 policy_loss_avg = [1.0] v_loss_avg = [1.0] TARGET_UPDATE = 1000 for k in range(NUM_EPISODES): done = False observation = env.reset() # observation, reward, done, info = env.step(env.action_space.sample()) # take a random action reward = 0 episode_step = 0 # print("b") I = 1.0 # entropy_impact = (ENTROPY_REDUCTION_STEPS - k) / ENTROPY_REDUCTION_STEPS if k == 0: entropy_impact = 1.0 else: entropy_impact = min(1, (1 / (k * 0.005))) if k > ENTROPY_REDUCTION_STEPS: entropy_impact = 0.0 # test entropy always 0 # entropy_impact = 0.0 # entropy_impact = 0.0 # if entropy_impact < 0.0: # entropy_impact = 0 while not done: # print("c") steps_done += 1 # Get action from pi # np_observation = np.array(get_state_repr(observation)) # np_observation = np.expand_dims(np_observation, axis=0) np_observation = get_state_repr(observation) # print(np_observation) observation_tensor = FloatTensor(np_observation) # action distribution pi_net.eval() action_distr = pi_net(observation_tensor) action_probs = torch.softmax(action_distr, dim=1) log_action_probs = 0 # log_action_probs = F.log_softmax(action_distr, dim=1) # Decide on an action based on the distribution m = Categorical(action_probs) action = m.sample() log_prob = m.log_prob(action).unsqueeze(1) # break # Execute action in environment. old_state = observation observation, reward, done, info = env.step(action.item()) new_state = observation if k % 5000 == 0: # print("old_state != new_state") # print(old_state != new_state) # print("oldstate " + str(old_state) + " newstate " + str(new_state)) print("action_dist ") print(action_probs) print("On state=" + str(old_state) + ", selected action=" + str(action.item())) print("new state=" + str(new_state) + ", done=" + str(done) + \ ". Reward: " + str(reward)) # Perform one step of the optimization # policy_loss, value_loss = optimize_model(I, \ # old_state, \ # log_prob, \ # log_actions_probs, \ # action_probs, \ # reward, \ # new_state, \ # entropy_impact, \ # done) # I = I * GAMMA # if (not done) or (done and new_state in [5,7,11,12,15]): memory.push(get_state_repr(old_state), action.item(), log_prob, action_probs, log_action_probs, get_state_repr(new_state), reward, entropy_impact, done) if len(memory) >= MEMORY_SIZE: policy_loss, value_loss = optimize(k) if len(policy_loss_avg) < PRINT_OUT_TIMES: policy_loss_avg.append(policy_loss) v_loss_avg.append(value_loss) else: policy_loss_avg[episode_step % PRINT_OUT_TIMES] = policy_loss v_loss_avg[episode_step % PRINT_OUT_TIMES] = value_loss times_trained = times_trained + 1 episode_step += 1 # env.render() if k % PRINT_OUT_TIMES == 0: print_pi_table() print_v_table() if len(score) < 100: score.append(reward) else: score[k % 100] = reward if k % TARGET_UPDATE == 0: target_v_net.load_state_dict(v_net.state_dict()) if k % PRINT_OUT_TIMES == 0: print("Episode {} finished after {} . Running score: {}. Policy_loss: {}, Value_loss: {}. Times trained: \ {}. Times reached goal: {}. \ Steps done: {}.".format(k, episode_step, np.mean(score), np.mean(policy_loss_avg), np.mean(v_loss_avg), times_trained,times_reach_goal, steps_done)) # print("policy_loss_avg") # print(policy_loss_avg) # print("value_loss_avg") # print(v_loss_avg) # print("times_reach_goal") # print(times_reach_goal) times_trained = 0 times_reach_goal = 0 # print("Game finished. " + "-" * 5) # print(len(episode_series)) # for param in net.parameters(): # print(param.data) if reward > 0.0: times_reach_goal = times_reach_goal + 1
ssainz/reinforcement_learning_algorithms
non_jupyter/Frozen_Lake_Actor_Critic_Batch_NoReplacement.py
Frozen_Lake_Actor_Critic_Batch_NoReplacement.py
py
17,568
python
en
code
0
github-code
6
[ { "api_name": "gym.envs.registration.register", "line_number": 17, "usage_type": "call" }, { "api_name": "gym.make", "line_number": 27, "usage_type": "call" }, { "api_name": "torch.cuda.is_available", "line_number": 30, "usage_type": "call" }, { "api_name": "torch...
28969370099
from django.urls import path from apps.cafes.urls import CAFE_URL_KEYWORD from apps.products import views CATEGORY_LIST_URL_NAME = "category-list" CATEGORY_DETAIL_URL_NAME = "category-detail" CATEGORY_URL_KEYWORD = "category_id" OPTION_GROUP_LIST_URL_NAME = "optiongroup-list" OPTION_GROUP_DETAIL_URL_NAME = "optiongroup-detail" OPTION_GROUP_URL_KEYWORD = "optiongroup_id" PRODUCT_LIST_URL_NAME = "product-list" PRODUCT_DETAIL_URL_NAME = "product-detail" PRODUCT_URL_KEYWORD = "product_id" urlpatterns = [ path( f"<uuid:{CAFE_URL_KEYWORD}>/categories/", views.CategoryAPIViewSet.as_view({"get": "list", "post": "create"}), name=CATEGORY_LIST_URL_NAME, ), path( f"<uuid:{CAFE_URL_KEYWORD}>/categories/<int:{CATEGORY_URL_KEYWORD}>/", views.CategoryAPIViewSet.as_view( {"get": "retrieve", "put": "update", "delete": "destroy"} ), name=CATEGORY_DETAIL_URL_NAME, ), path( f"<uuid:{CAFE_URL_KEYWORD}>/optiongroups/", views.OptionGroupAPIViewSet.as_view({"get": "list", "post": "create"}), name=OPTION_GROUP_LIST_URL_NAME, ), path( f"<uuid:{CAFE_URL_KEYWORD}>/optiongroups/<int:{OPTION_GROUP_URL_KEYWORD}>/", views.OptionGroupAPIViewSet.as_view( {"get": "retrieve", "put": "update", "delete": "destroy"} ), ), path( f"<uuid:{CAFE_URL_KEYWORD}>/products/", views.ProductAPIViewSet.as_view({"get": "list", "post": "create"}), name=PRODUCT_LIST_URL_NAME, ), path( f"<uuid:{CAFE_URL_KEYWORD}>/products/<int:{PRODUCT_URL_KEYWORD}>/", views.ProductAPIViewSet.as_view( {"get": "retrieve", "put": "update", "delete": "destroy"} ), name=PRODUCT_DETAIL_URL_NAME, ), ]
TGoddessana/cafehere
apps/products/urls.py
urls.py
py
1,800
python
en
code
0
github-code
6
[ { "api_name": "django.urls.path", "line_number": 19, "usage_type": "call" }, { "api_name": "apps.cafes.urls.CAFE_URL_KEYWORD", "line_number": 20, "usage_type": "name" }, { "api_name": "apps.products.views.CategoryAPIViewSet.as_view", "line_number": 21, "usage_type": "call...
31434243930
#!/usr/bin/python3 # Coding: utf-8 # Author: Rogen # Description: 專家系統功能集 from os import walk from tkinter import * from PIL import ImageTk, Image from tkinter import ttk, messagebox, font, filedialog from tkintertable.TableModels import TableModel from tkintertable.Tables import TableCanvas from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk from matplotlib.figure import Figure from GUI_language import * from Table4Results import TkSheet from openpyxl import load_workbook import matplotlib.pyplot as plt import matplotlib.backends._tkagg import pandas as pd import graphviz as gz import os, re, subprocess, csv, shutil class RuleStruct(object): """docstring for RuleStruct""" def __init__(self, sheet_name='SiteEvaluation'): os.environ["PATH"] += os.pathsep + './Graphviz2.38/bin/' self.rules_database_path = './Diagnosis_Rules.xlsm' self.rule_srtucture_graph = './RuleGraph/' self.dataset_path = './Dataset/' self.sheet_name = sheet_name self.all_diagnosis = [] self.OpenRuleFile() # 讀取規則庫excel def OpenRuleFile(self): rule_counter = 0 del_nodes = [] self.subtitle = {} self.subtitle[1] = {} # Storage chinese subtitle self.subtitle[2] = {} # Storage english subtitle self.df = pd.read_excel(self.rules_database_path, header=0, sheet_name=self.sheet_name, encoding='utf_8_sig', converters = {'Rule':str, 'Node':int, 'Question':str, 'Yes':str, 'No':str, 'Unknown':str, 'Pictures':str}) self.rulebase_list = pd.ExcelFile(self.rules_database_path).sheet_names for rule_num in map(str, self.df.iloc[:, 0]): # Rule column if re.search(r'Main Branch', rule_num): del_nodes.append(rule_counter) sp_rule_num = rule_num.split(': ') en_subtitle = sp_rule_num[1] ch_subtitle = self.df.iloc[rule_counter,9] # Chinese question column elif rule_num == 'nan': del_nodes.append(rule_counter) else: self.subtitle[1][self.df.Node[rule_counter]] = ch_subtitle self.subtitle[2][self.df.Node[rule_counter]] = en_subtitle rule_counter += 1 self.df.drop(del_nodes, inplace=True) self.df.reset_index(drop=True, inplace=True) # 規則關係圖形化 (會搭配Graphviz2.38套件來畫圖,只會在新增、篩除、修改規則時使用) def __graph__(self, master): def senten_cut(sentence): line = [] temp = [] count = 1 senten = '' sp_sen = sentence.split(' ') for word in sp_sen: if count == len(sp_sen): temp.append(word) line.append(' '.join(temp)) senten = '\n'.join(line) elif count % 6 != 0: temp.append(word) else: line.append(' '.join(temp)) temp = [] temp.append(word) count += 1 return(senten) # 規則關係圖x、y軸 def scrolled_canvas(frame): w,h = frame.maxsize() frame.title('Rule Structure Graph') canv = Canvas(frame, relief=SUNKEN) sbarV = Scrollbar(frame, orient=VERTICAL, command=canv.yview) sbarH = Scrollbar(frame, orient=HORIZONTAL, command=canv.xview) im = Image.open(self.rule_srtucture_graph + 'Graph.png') im = ExpertSystemFunctions(1,self.sheet_name).image_resize(im, w) im2 = ImageTk.PhotoImage(im) width,height = self.im.size canv.config(scrollregion=(0,0,width,height), width=width, height=height, yscrollcommand=sbarV.set, xscrollcommand=sbarH.set, highlightthickness=0) canv.create_image(0,0,anchor="nw",image=im2) sbarV.pack(side=RIGHT, fill=Y) sbarH.pack(side=BOTTOM, fill=X) canv.pack(side=LEFT, expand=YES, fill=BOTH) dot = gz.Digraph() for row in range(len(self.df.index)): l = self.df.iloc[row].tolist() dot.node(str(l[1]), senten_cut(str(l[1])+': '+l[2])) # Original_Node if str(l[3]) == 'nan' or str(l[4]) == 'nan': pass else: l[3] = l[3].replace('#','') l[4] = l[4].replace('#','') if re.search(r'\d+:.+', l[3]): # Yes_Node sp_ = l[3].split(':') dot.node(sp_[0], senten_cut(l[3])) dot.edge(str(l[1]), sp_[0], label='yes') else: dot.edge(str(l[1]), l[3], label='yes') if re.search(r'\d+:.+', l[4]): # No_Node sp_ = l[4].split(':') dot.node(sp_[0], senten_cut(l[4])) dot.edge(str(l[1]), sp_[0], label='No') else: dot.edge(str(l[1]), l[4], label='No') dot.render(self.rule_srtucture_graph + 'Graph', format='png') # dot.view('test') self.rule_win = Toplevel(master) scrolled_canvas(self.rule_win) # 診斷結果表格顯示 def __table__(self, master): try: if self.rule_win.state() == 'normal': pass except: data = {} colnums = ['Rule','Node','Question','Yes','No'] rule_dict = self.df.ix[:,'Rule':'No'] for r in range(len(rule_dict.index)): plice = {} for c in range(len(rule_dict.columns)): if rule_dict.iloc[r,c] == 'nan': plice[rule_dict.columns[c]] = ' ' else: plice[rule_dict.columns[c]] = rule_dict.iloc[r,c] data[str(r)] = plice self.rule_win = Toplevel(master) frame = Frame(self.rule_win) frame.pack() model = TableModel() for key in colnums: model.addColumn(key) #sort the columns model.importDict(data) table = TableCanvas(frame, model=model, width=800, height=500, rowheight=20, editable=False, cellbackgr='#E3F6CE', reverseorder=1, rowselectedcolor='yellow') table.createTableFrame() table.sortTable(columnName='Rule') def __destroy__(self): try: if self.rule_win.state() == 'normal': self.rule_win.destroy() except: pass class ExpertSystemFunctions(RuleStruct): # 全域變數宣告 global _rulebase_diagnosis_recode, _rulebase_diagnosis_done, _answer_dict _rulebase_diagnosis_recode = {} _rulebase_diagnosis_done = [] _answer_dict = {} def __init__(self, ver, sheet_name, code): super(ExpertSystemFunctions, self).__init__(sheet_name) self.sheet_name = sheet_name self.language = ver self.internal_code = code self.answer_store = {} self.answer_diagnosis = '' self.query = '' self.Yes_score = 0 self.No_score = 0 self.tree_iterater = 0 self.optiontree_iterater = 0 self.photo_image_counter = 0 self.note_pointer = 0 self.save_path = '.\\Save' self.photo_path = '.\\Photo\\' + self.sheet_name self.photo_temp = '.\\Photo\\TempFile' self.image_nonavailable = '.\\Photo\\Interface\\img_not_available.png' self.hidden_answer = '.\\Temp\\Answers.TXT' self.GuiInitiation() # 專家系統啟動前超參數設定 def GuiInitiation(self): if self.language == 1: ch = Chinese() self.interface = ch.ES_GUI_Interface() self.Q_list = self.df.中文問題 elif self.language == 2: en = English() self.interface = en.ES_GUI_Interface() self.Q_list = self.df.Question self.cur_node = self.df.Node[0] self.query = self.Q_list[0] # 專家系統離開設定 def GuiClose(self): # Update the SiteEva_Table.csv self.siteval_df = pd.read_csv('./Dataset/SiteEva_Table.csv') with open('CropVISTMapInfo.txt','r',encoding='utf-8') as file: lines = file.readlines() inter_code = lines[6].split(r' = ')[1].replace('\n','') index = self.siteval_df.index[self.siteval_df['Internal_Code'] == inter_code] center_location = re.split(r'[,|=| ]+',lines[3]) NS_direction = lines[4].split(' = ')[1].replace('\n','') EW_direction = lines[5].split(' = ')[1].replace('\n','') self.siteval_df.iloc[index,2:5] = center_location[2:5] self.siteval_df.iloc[index,6:9] = center_location[6:9] self.siteval_df.iloc[index,13:15] = [EW_direction,NS_direction] self.siteval_df.to_csv('./Dataset/SiteEva_Table.csv', index=False, encoding='utf_8_sig') print(center_location,EW_direction,NS_direction,inter_code) # Delete the figures of TempFile folder for root, dirs, files in walk(self.photo_temp): for f in files: fullpath = os.path.join(root,f) if os.path.isfile(fullpath): os.remove(fullpath) # Delete the diagnosed csv files for root, dirs, files in walk(self.save_path): for f in files: fullpath = os.path.join(root,f) if os.path.isfile(fullpath) and re.search(r'.+_Diagnosis\.csv', f): os.remove(fullpath) # Listing the full diagnosed results of rulebases into excel file # self.diagnosis_export() #---------- Photograph Controled Area ----------# # 第一張圖片設定 def pri_photo(self): if str(self.df.Pictures[0]) == 'nan': self.photo_images = [self.image_nonavailable] else: priphoto_folder = os.path.join(self.photo_path, self.df.Pictures[0]) exist = self.node_folder_exist(priphoto_folder) if exist: self.photo_images = [os.path.join(priphoto_folder, _) for _ in os.listdir(priphoto_folder)] if len(self.photo_images) == 0: self.photo_images = [self.image_nonavailable] else: self.photo_images = [self.image_nonavailable] self.im = Image.open(self.photo_images[0]) image_file = ImageTk.PhotoImage(self.image_resize(self.im)) self.figure_title(self.photo_images[0]) return(image_file) # 專家系統第一張圖片設定 def figure_iterator(self,state): if len(self.photo_images) < 2: pass else: if state == 'forward' and self.photo_image_counter < len(self.photo_images)-1: self.photo_image_counter += 1 elif state == 'back' and self.photo_image_counter > 0: self.photo_image_counter -= 1 self.im = Image.open(self.photo_images[self.photo_image_counter]) self.iterative_image = ImageTk.PhotoImage(self.image_resize(self.im)) self.fig_label.config(image=self.iterative_image) self.fig_label.update_idletasks() self.figure_title(self.photo_images[self.photo_image_counter]) # 下一張圖片 def next_figure(self): self.photo_image_counter = 0 self.photo_folder = self.df.Pictures[self.df.Node == self.cur_node].tolist()[0] if str(self.photo_folder) == 'nan': self.photo_images = [self.image_nonavailable] else: self.photo_fullpath = os.path.join(self.photo_path, self.photo_folder) if re.match(r'^N\d\d$',self.photo_folder): try: self.temp_path = os.path.join(self.photo_temp, self.photo_folder) self.photo_images = [os.path.join(self.temp_path, _) for _ in os.listdir(self.temp_path)] except FileNotFoundError as e: self.photo_images = '' else: exist = self.node_folder_exist(self.photo_fullpath) if exist: self.photo_images = [os.path.join(self.photo_fullpath, _) for _ in os.listdir(self.photo_fullpath)] else: self.photo_images = [self.image_nonavailable] # If the node's folder exists, there are no picture in folder if len(self.photo_images) == 0: self.photo_images = [self.image_nonavailable] self.im = Image.open(self.photo_images[0]) self.next_image = ImageTk.PhotoImage(self.image_resize(self.im)) self.fig_label.config(image=self.next_image) self.fig_label.update_idletasks() self.figure_title(self.photo_images[0]) # 圖片控制(放大、截圖、移動等) def figure_magnification(self, image): # Setting figure size and quality f = Figure(figsize=(5,3), dpi=150) a = f.add_subplot(111) # Plotting figure # img_arr = matplotlib.image.imread('figure path') a.imshow(image) a.axis('off') a.axes.get_xaxis().set_visible(False) a.axes.get_yaxis().set_visible(False) # Display the graphics on the tkinter window canvas = FigureCanvasTkAgg(f, master=self.photo_win) canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=1) # Putting the toolbar of matplotlib graphics on the tkinter window toolbar = NavigationToolbar2Tk(canvas, self.photo_win) toolbar.update() canvas._tkcanvas.pack(side=BOTTOM, fill=BOTH, expand=True) # 判斷資料夾是否存在 def node_folder_exist(self, path): result = 1 if os.path.exists(path) else 0 return(result) # 圖像尺寸縮放 def image_resize(self, image): w, h = image.size f1 = self.label_width/w f2 = self.label_height/h factor = min([f1, f2]) width = int(w*factor) height = int(h*factor) return(image.resize((width, height), Image.ANTIALIAS)) #---------- Query Controled Area ----------# # 圖片路徑 def figure_title(self,name): self.figframe.config(text=name) # 問題標題 def query_title(self,note): def branch(n): return(self.subtitle[self.language][n]) if len(str(note)) == 1: #main branch return(branch(note)) else: #secondary branch m = int(re.match(r'^(\d)',str(note)).group(0)) main_branch = branch(m) sec_branch = branch(note) if sec_branch == '': return(main_branch) else: return('%s-%s' % (main_branch,sec_branch)) # 初始診斷問題 def pri_query(self): self.unknown_button_control(self.cur_node) # The node is been hidden if str(self.df.Hidden_Answer[self.df.Node == self.cur_node].tolist()[0]) != 'nan': ans, error = self.note_hidden(0) if error == 0: # Hidden node have answer self.user_answer.set(ans) self.next_step() else: # Hidden node don't have answer, but user loaded previous study self.loading_answer() # The node isn't been hidden else: self.loading_answer() # 上一步 def back_step(self): array = [k for k in sorted(self.answer_store.keys()) if int(k) % 100 != 0] self.cur_node = int(array[-1]) self.next_node = self.cur_node self.query = self.answer_store[str(self.cur_node)][1] self.querylabel.config(text=self.query) del self.answer_store[str(self.cur_node)] # delete the current node self.next_figure() self.figure_descript() self.query_descript() # Button crontrol self.r1.config(state=ACTIVE) self.r2.config(state=ACTIVE) self.next_button.config(state=ACTIVE) self.submit_button.config(state=DISABLED) self.unknown_button_control(self.cur_node) self.note_pointer -= 1 if self.note_pointer == 0: self.back_button.config(state=DISABLED) try: if self.rulepath_win.state() == 'normal': self.option_record('onebyone',-1) # Delete one answer of rule path except: pass #---- Record the current note info and give next note ----# # 下一步 def next_step(self): yesScore = '-'; noScore = '-'; diag = '-' if self.user_answer.get() == 'None': messagebox.showerror('ERROR', 'The option is empty!') else: curIdx = self.df.index[self.df.Node == self.cur_node].tolist()[0] # Cumulative yes/no score if self.user_answer.get() == 'yes': self.next_node = self.df.Yes[curIdx] if str(self.df.Yes診斷[curIdx]) != 'nan': if self.language == 1: diag = self.df.Yes診斷[curIdx] elif self.language == 2: # If the english version of diagnosis is presented, it will be changed... diag = self.df.Yes診斷[curIdx] if str(self.df.Yes_score[curIdx]) != 'nan': self.Yes_score += self.df.Yes_score[curIdx] yesScore = self.df.Yes_score[curIdx] elif self.user_answer.get() == 'no': self.next_node = self.df.No[curIdx] if str(self.df.No診斷[curIdx]) != 'nan': if self.language == 1: diag = self.df.No診斷[curIdx] elif self.language == 2: # If the english version of diagnosis is presented, it will be changed... diag = self.df.No診斷[curIdx] if str(self.df.No_score[curIdx]) != 'nan': self.No_score += self.df.No_score[curIdx] noScore = self.df.No_score[curIdx] else: self.next_node = self.df.Unknown[curIdx] # Recode the solution way of each note solution = '-' if str(self.df.處理對策[curIdx]) == 'nan' else self.df.處理對策[curIdx] diag = self.answer_diagnosis if self.answer_diagnosis != '' else diag self.answer_store[str(self.cur_node)] = [self.cur_node, self.query, str(self.user_answer.get()), yesScore, noScore, diag, solution] self.next_node = int(self.next_node.replace('#','')) # Button controled if self.next_node == 0: self.done() self.unknown_button_control(self.next_node) self.back_button.config(state=ACTIVE) self.analyze_button.config(state=DISABLED) try: if self.rulepath_win.state() == 'normal': self.option_record('onebyone',1) # Inserting one rule in the rule path except: pass # the Order of code can not change!!! self.answer_diagnosis = '' self.next_query(curIdx) self.query_descript() self.figure_descript() self.note_pointer += 1 #---- Goto the next query, and resetting the figures and buttons ----# # 下一個診斷問題 def next_query(self, curIdx): next_node_idx = self.df.index[self.df.Node == self.next_node].tolist()[0] if str(self.Q_list[next_node_idx]) == 'nan': # Ending this section of all diagnosis self.querylabel.config(text=self.interface['diag_complete']['done']) else: # Hidden node find corresponding answer if str(self.df.Hidden_Answer[next_node_idx]) != 'nan': ans, error = self.note_hidden(next_node_idx) if error == 0: # Entry the chapter of diagnosis after hidden node if self.next_node % 100 == 0: self.diag_summary(next_node_idx) else: self.query = self.Q_list[next_node_idx] self.cur_node = self.df.Node[next_node_idx] self.user_answer.set(ans) self.next_step() # Error replace hidden node not find corresponding answer if str(self.df.Hidden_Answer[next_node_idx]) == 'nan' or error == 1: # Entry the chapter of diagnosis if self.next_node % 100 == 0: self.diag_summary(next_node_idx) else: self.cur_node = self.df.Node[next_node_idx] self.query = self.Q_list[next_node_idx] self.querylabel.config(text=self.query) querytitle = self.query_title(self.cur_node) self.queryframe.config(text=querytitle) self.user_answer.set(None) self.next_figure() self.loading_answer() # 診斷結束 def done(self): self.submit_button.config(state=ACTIVE) self.next_button.config(state=DISABLED) self.r1.config(state=DISABLED) self.r2.config(state=DISABLED) self.r3.config(state=DISABLED) self.querylabel.config(text=self.interface['diag_complete']['done']) # "分析"按鈕控制 def analyze_button_control(self, option): if option == 1 and self.sheet_name == 'SiteEvaluation' and (self.cur_node == 1101 or self.cur_node == 1201): self.analyze_button.config(state=ACTIVE) self.next_button.config(state=DISABLED) elif option == 2: self.analyze_button.config(state=DISABLED) self.next_button.config(state=ACTIVE) # "未知"按鈕控制 def unknown_button_control(self, note): if str(self.df.Unknown[self.df.Node == note].tolist()[0]) == 'nan': self.r3.config(state=DISABLED) else: self.r3.config(state=ACTIVE) # 診斷結果分數計算 def diag_summary(self,next_node_idx): self.cur_node = self.df.Node[next_node_idx] self.query = self.Q_list[next_node_idx] if self.df.Yes_score[next_node_idx] != 0 and str(self.df.Yes_score[next_node_idx]) != 'nan': y = round(self.Yes_score*100/self.df.Yes_score[next_node_idx]) y = [100 if y >= 100 else y][0] else: y = 0 if self.df.No_score[next_node_idx] != 0 and str(self.df.No_score[next_node_idx]) != 'nan': n = round(self.No_score*100/self.df.No_score[next_node_idx]) n = [100 if n >= 100 else n][0] else: n = 0 self.Yes_score = 0 self.No_score = 0 self.answer_store[str(self.cur_node)] = [self.cur_node, self.query, '*', str(y)+'%', str(n)+'%', '-', '-'] # It's end note, but it is deprecated if self.df.No[next_node_idx] == 'Max Possible' and str(self.df.Yes[next_node_idx]) == 'nan': self.done() elif self.df.No[next_node_idx] == 'Max Possible' and str(self.df.Yes[next_node_idx]) != 'nan': self.next_node = self.df.Yes[next_node_idx] self.next_node = int(self.next_node.replace('#','')) next_node_idx = self.df.index[self.df.Node == self.next_node].tolist()[0] # It's end note (#node:0) if self.df.Node[next_node_idx] == 0: self.done() else: if str(self.df.Hidden_Answer[next_node_idx]) != 'nan': ans, error = self.note_hidden(next_node_idx) if error == 0: self.cur_node = self.df.Node[next_node_idx] self.query = self.Q_list[next_node_idx] self.user_answer.set(ans) self.next_step() if str(self.df.Hidden_Answer[next_node_idx]) == 'nan' or error == 1: self.query = self.Q_list[next_node_idx] self.cur_node = self.df.Node[next_node_idx] self.querylabel.config(text=self.query) self.unknown_button_control(self.next_node) self.loading_answer() #---------- Output Controled Area ----------# # 儲存診斷結果 def save_diagnosis(self): mode = 'a' if os.path.exists('%s/Diagnosis_%s.xlsx' % (self.save_path, self.internal_code)) else 'w' with pd.ExcelWriter('%s/Diagnosis_%s.xlsx' % (self.save_path, self.internal_code), engine='openpyxl', mode=mode) as writer: save_df = pd.read_csv('%s/%s_Diagnosis.csv' % (self.save_path, self.sheet_name), delimiter=",") save_df.to_excel(writer, sheet_name=self.sheet_name, index = None) writer.save() self.save.config(state=DISABLED) # 所有規則庫診斷結果匯出成一個CSV檔 def diagnosis_export(self): # Export full rulebased diagnosis into csv file file = self.save_path+'/Diagnosis-Export.csv' flag = [True for f in os.listdir(self.save_path) if re.search('tmp-.+',f)] if not True in flag: messagebox.showinfo('ERROR','No any diagnosed output!') else: if os.path.exists(file): os.remove(file) with open(file, 'w+', encoding='utf_8_sig', newline='') as d: out_csv = csv.writer(d, quoting=csv.QUOTE_ALL) for i in self.rulebase_list: if os.path.exists(self.save_path+'/tmp-'+i): out_lines = [ [self.interface['ruledb_name'][i]], ['-'*50], [self.interface['done_title']['diagnosis'], self.interface['done_title']['yescore'], self.interface['done_title']['noscore']], ['-'*50] ] out_csv.writerows(out_lines) with open(self.save_path+'/tmp-'+i, 'r', encoding='utf_8_sig', newline='') as t: data = csv.reader(t, delimiter='\t') out_csv.writerows(data) out_csv.writerow(['_'*50]) out_csv.writerow(['\n'*2]) os.remove(self.save_path+'/tmp-'+i) # Including the "Diagnosis-Export" file into excel file # with pd.ExcelWriter('%s/Diagnosis_%s.xlsx' % (self.save_path, self.internal_code), engine='openpyxl', mode='a') as writer: # diagnosis_df = pd.read_csv(file, delimiter='\t') # diagnosis_df.to_excel(writer, sheet_name='Diagnosis-Export', index = None) # writer.save() # os.remove(file) messagebox.showinfo('INFO','Output have done.') # 診斷結果表格設置 def diagnosis_done(self,tree): for key in sorted(self.answer_store.keys()): if self.answer_store[key][2] == '*' or len(key) == 1: branchs = 'main_branch' if len(key) == 1 else 'secondary_branch' tree.insert('',self.tree_iterater,values=['']*7) tree.insert('',self.tree_iterater+1,values=self.answer_store[key][:7],tags=(branchs,)) self.tree_iterater += 2 else: if self.answer_store[key][2] == 'no' or self.answer_store[key][2] == 'unknown': self.answer_store[key][6] = '-' tree.insert('',self.tree_iterater,values=self.answer_store[key][:7]) self.tree_iterater += 1 with open(self.save_path+'/tmp-'+self.sheet_name, 'w+', encoding='utf_8_sig') as f: tag = 0 for key in sorted(self.answer_store.keys()): ans = self.answer_store[key] if ans[5] != '-': f.write('\t'.join(map(str,[ans[5],ans[3],ans[4]])) + '\n') tag = 1 elif ans[2] == '*': # f.write('\n') f.write('\t'.join(map(str,[ans[1],ans[3],ans[4]])) + '\n') tag = 1 if tag == 0: f.write(self.interface['done_unknown']['unknown']) f.write('\n') # Save diagnosis results in csv file with open(self.save_path+'/'+self.sheet_name+'_Diagnosis.csv', 'w', encoding='utf_8_sig', newline='') as out_csv: out_writer = csv.writer(out_csv, quoting=csv.QUOTE_ALL) out_writer.writerow(['Node','Question','Answer','Yes score','No score','Diagnosis','Solution']) for key in sorted(self.answer_store.keys()): out_writer.writerow(map(str,self.answer_store[key])) # Rulebase OptioinMenu Controled Area if self.sheet_name == 'SiteEvaluation': for i in range(len(self.rulebase_list)): s = ACTIVE if i == 1 else DISABLED self.next_rulebase['menu'].entryconfigure(i, state=s) elif self.sheet_name == 'Soils': for j in range(len(self.rulebase_list)): s = DISABLED if j <= 1 else ACTIVE self.next_rulebase['menu'].entryconfigure(j, state=s) else: for index in _rulebase_diagnosis_done+[0]: # Adding "[0]" into list because the 'SiteEvaluate' rulebase does not include in list. self.next_rulebase['menu'].entryconfigure(index, state=DISABLED) if len(_rulebase_diagnosis_done) == 4: self.submit.config(state=DISABLED) # Recodeing this diagosed results _rulebase_diagnosis_recode[self.sheet_name] = self.answer_store # 診斷結果表格(使用TkSheet外部套件設置,沒用到) def table4result(self): result = []; tag = []; i = 0 for key in sorted(self.answer_store.keys()): if self.answer_store[key][2] == '*' or len(key) == 1: i+=1 branchs = 'main_branch' if len(key) == 1 else 'secondary_branch' result.append(['']*7) tag.append([i,branchs]) result.append(self.answer_store[key]) i+=1 ts = TkSheet(result, tag, self.interface) ts.mainloop() # 將儲存的結果用excel開啟 (方便使用者查閱) def open_csv_excel(self): os.startfile("%s/Save/%s_Diagnosis.csv" % (os.getcwd(),self.sheet_name)) # command_line = 'C:/Program Files/Microsoft Office/root/Office16/EXCEL.EXE %s/Save/%s_Diagnosis.csv' % (os.getcwd(),self.sheet_name) # subprocess.Popen(command_line) # 給新增、刪除、修改規則使用 (沒用到) def diagnosis_node_index(self,index): No_Node = self.df.No.dropna().tolist() No_Node.remove('Max Possible') yesDiag = [i for i in self.df.Yes.dropna().tolist() if int(re.search(r'#(\d+)',i).group(1)) == index.get()] noDiag = [i for i in No_Node if int(re.search(r'#(\d+)',i).group(1)) == index.get()] return(yesDiag, noDiag) #---------- Other Controled Area ----------# # 紀錄使用者回答規則路徑 def option_record(self, condiction, step=None): if condiction == 'showhand': count = 0 for key in sorted(self.answer_store.keys()): if int(key) % 100 != 0: self.optiontree.insert('','end',values=self.answer_store[key][:3]) else: count += 1 self.optiontree_iterater = len(self.answer_store)-count else: if step == 1: if self.cur_node % 100 != 0: self.optiontree.insert('',self.optiontree_iterater,values=self.answer_store[str(self.cur_node)][:3]) self.optiontree_iterater += 1 else: tree_items = self.optiontree.get_children() self.optiontree.delete(tree_items[-1]) self.optiontree_iterater -= 1 # 處理規則中隱藏的節點,加速專家系統的診斷 def note_hidden(self,note_index): note_hindden_error = 0 negative_flag = 0 answer = '' answer_list = {'Y':'yes','N':'no','U':'unknown'} opposite_answer = {'yes':'no','no':'yes','unknown':'unknown'} correspondence = self.df.Hidden_Answer[note_index] sp_correspondence = correspondence.split('-') if re.match('Answer', sp_correspondence[0]): csvfile = pd.read_csv(self.hidden_answer, delimiter=',') index = csvfile.index[csvfile['Q ID'] == int(sp_correspondence[1])].tolist()[0] answer = csvfile.iloc[index,3] answer = answer_list[answer] if re.match(r'[yes|no]',answer) and str(csvfile.iloc[index,4]) != 'nan': self.answer_diagnosis = csvfile.iloc[index,4] else: if re.match('Negative', sp_correspondence[0]): rulebase = sp_correspondence[0].split(' ')[1] negative_flag = 1 else: rulebase = sp_correspondence[0] note = sp_correspondence[1] # If the note's answer don't recode in the base, progress must search in the dictionary of rulebase diagnosis if rulebase != self.sheet_name: dictionary = _rulebase_diagnosis_recode[rulebase] # To confirm whether the correspond answer exists in dictory or not try: answer = opposite_answer[dictionary[note][2]] if negative_flag == 1 else dictionary[note][2] except: note_hindden_error = 1 else: try: answer = opposite_answer[self.answer_store[note][2]] if negative_flag == 1 else self.answer_store[note][2] except: note_hindden_error = 1 return(answer, note_hindden_error) # 問題的原因描述 def query_descript(self): self.query_desc_text.delete(1.0,END) texture = self.df.問題說明[self.next_node == self.df.Node].tolist()[0] if str(texture) == 'nan': self.query_desc_text.insert(1.0,'') else: self.query_desc_text.insert(1.0,texture) # 圖片的內容描述 def figure_descript(self): self.fig_desc_text.delete(1.0,END) texture = self.df.圖片說明[self.next_node == self.df.Node].tolist()[0] if str(texture) == 'nan': self.fig_desc_text.insert(1.0,'') else: self.fig_desc_text.insert(1.0,texture) # 程序重啟動 def program_restart(self): super().__destroy__() # Destroy rule structure graph self.windows.destroy() # Destroy main windows self.__init__(self.language, self.sheet_name, self.internal_code) # To get the newest dataframe self.gui() # 下一個診斷規則庫 def next_rulesbase_diag(self): sheet = self.rulebase.get() if sheet == '': messagebox.showwarning('WARNNING','You must choose one of rulebases!') else: _rulebase_diagnosis_done.append(self.rulebase_list.index(sheet)) self.windows.destroy() # Destroy windows must be the first step self.sheet_name = sheet self.__init__(self.language, self.sheet_name, self.internal_code) self.gui() # 外部程式連接 (UVA跟衛星影像圖分析程式) def external_link(self): if self.language == 1: CropVISTMapInfo = 'CropVISTMapInfoTWN.exe' UVA_Analysis = 'UAV_Analysis.exe' elif self.language == 2: CropVISTMapInfo = 'CropVISTMapInfoENG.exe' UVA_Analysis = 'UAV_Analysis.exe' exe = UVA_Analysis if str(self.cur_node) == '1101' else CropVISTMapInfo # p = subprocess.run(exe, shell=True) p = subprocess.call(exe, shell=True) # Checking progress does exist or not # command_line = 'TASKLIST', '/FI', 'imagename eq %s.exe' % exe # output = subprocess.check_output(command_line).decode() # last_line = output.strip().split('\r\n')[-1] # if not last_line.lower().startswith(exe.lower()): # self.program_restart() # Move figure of UVA/VIST to TempFile folder for f in os.listdir(self.photo_temp): if os.path.isfile(os.path.join(self.photo_temp,f)): findout = re.search(r'[NDVI|NDWI|SWC|\d+]_(\d)-(N\d+)',f) if findout: note_folder = findout.group(2) if not os.path.exists('%s/%s'% (self.photo_temp,note_folder)): os.mkdir('%s/%s'% (self.photo_temp,note_folder)) shutil.move(os.path.join(self.photo_temp,f),os.path.join(self.photo_temp,note_folder,f)) self.next_figure() self.next_button.config(state=ACTIVE) # 外部診斷結果匯入 def save_import(self): temp = {} file = filedialog.askopenfilename(initialdir = "./Save", title='Select Input file', filetype=[("excel file","*.xls"),("excel file","*.xlsx")]) xl = pd.ExcelFile(file) for sheet in xl.sheet_names: df = xl.parse(sheet) df = df.drop(df.index[df.Answer == '*']) df = df.reset_index(drop=True) df = df[['Node','Answer']] for i in range(len(df)): temp[df.Node[i]] = df.Answer[i] _answer_dict[sheet] = temp self.program_restart() # 外部診斷結果查詢 def loading_answer(self): try: answer = _answer_dict[self.sheet_name][self.cur_node] self.user_answer.set(answer) except Exception as e: pass # 衛星航拍圖開啟介面 def open_temp_images(self): def openimage(): types = imagename.get() if types == 'None': messagebox.showerror('ERROR','Please choose which image would you like to view!') else: if types == 'uva': floder = 'N11' elif types == 'ndvi': floder = 'N12' elif types == 'ndwi': floder = 'N13' elif types == 'swc': floder = 'N15' elif types == 'irrig': floder = 'N16' else: floder = 'N17' images = [os.path.join(self.photo_temp,floder, _) for _ in os.listdir(os.path.join(self.photo_temp,floder))] for image in images: im = Image.open(image) im.show() showtempimages = Toplevel(self.windows) showtempimages.title('Show Figures') showtempimages.geometry('320x120') imagename = StringVar() imagename.set(None) option_frame = LabelFrame(showtempimages, text='Please choose one type of figure') option_frame.place(x=10,y=10) for folder in ['N11','N12','N13','N15','N16','N17']: if not os.path.exists(os.path.join(self.photo_temp, folder)): os.mkdir('./%s/%s' % (self.photo_temp,folder)) locals()['%s_state' % folder] = ACTIVE if os.listdir(os.path.join(self.photo_temp, folder)) else DISABLED uva = Radiobutton(option_frame, text='UVA', variable=imagename, value='uva', state=locals()['%s_state' % 'N11']) uva.grid(row=0, column=0, padx=3, pady=1) ndvi = Radiobutton(option_frame, text='NDVI', variable=imagename, value='ndvi', state=locals()['%s_state' % 'N12']) ndvi.grid(row=0, column=1, padx=3, pady=1) ndwi = Radiobutton(option_frame, text='NDWI', variable=imagename, value='ndwi', state=locals()['%s_state' % 'N13']) ndwi.grid(row=0, column=2, padx=3, pady=1) swc = Radiobutton(option_frame, text='SWC', variable=imagename, value='swc', state=locals()['%s_state' % 'N15']) swc.grid(row=1, column=0, padx=3, pady=1) irrig = Radiobutton(option_frame, text='Irrigation', variable=imagename, value='irrig', state=locals()['%s_state' % 'N16']) irrig.grid(row=1, column=1, padx=3, pady=1) msavi = Radiobutton(option_frame, text='MSAVI', variable=imagename, value='msavi', state=locals()['%s_state' % 'N17']) msavi.grid(row=1, column=2, padx=3, pady=1) showbutton = Button(showtempimages, text='Show Figure',command=openimage) showbutton.place(x=200,y=90)
NCHU-rogen/ExpertSystem_Project
ExpertSystem_Functions.py
ExpertSystem_Functions.py
py
33,877
python
en
code
0
github-code
6
[ { "api_name": "os.environ", "line_number": 28, "usage_type": "attribute" }, { "api_name": "os.pathsep", "line_number": 28, "usage_type": "attribute" }, { "api_name": "pandas.read_excel", "line_number": 44, "usage_type": "call" }, { "api_name": "pandas.ExcelFile", ...
41489732999
import numpy as np from gym import spaces import gym import json import pickle class StateNormWrapper(gym.Wrapper): """ Normalize state value for environments. """ def __init__(self, env, file_name): super(StateNormWrapper, self).__init__(env) with open(file_name, "r") as read_file: rl_confs = json.load(read_file) # hyperparameters for rl training print(env.spec.id) data_path_prefix = rl_confs["data_collect_confs"]["data_path"]+env.spec.id.split("-")[0].lower()+'/' with open(data_path_prefix+'state_info.pkl', 'rb') as f: self.state_stats=pickle.load(f) def norm(self, s): mean = self.state_stats['mean'] std = self.state_stats['std'] s = (s-mean)/std return s def step(self, a): observation, reward, done, info = self.env.step(a) return self.norm(observation), reward, done, info def reset(self, **kwargs): observation = self.env.reset(**kwargs) return self.norm(observation) def render(self, **kwargs): pass if __name__ == '__main__': import matplotlib.pyplot as plt # test # EnvName = 'CartPole-v1' EnvName = 'LunarLander-v2' env = StateNormWrapper(gym.make(EnvName), file_name="rl_train.json") for _ in range(10): env.reset() for _ in range(1000): # env.render() a = env.action_space.sample() s, r, d, _ = env.step(a) # take a random action if d: break print(s) # print(s.shape) env.close()
quantumiracle/Cascading-Decision-Tree
src/rl/env_wrapper.py
env_wrapper.py
py
1,617
python
en
code
32
github-code
6
[ { "api_name": "gym.Wrapper", "line_number": 7, "usage_type": "attribute" }, { "api_name": "json.load", "line_number": 14, "usage_type": "call" }, { "api_name": "pickle.load", "line_number": 18, "usage_type": "call" }, { "api_name": "gym.make", "line_number": 4...
19547650475
import logging import multiprocessing import os from subprocess import run from Bio import SeqIO, AlignIO from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from constants import CD_HIT_CLUSTER_REPS_OUTPUT_FILE, CLUSTERS_NT_SEQS_DIR, CLUSTERS_ALIGNMENTS_DIR, \ NUMBER_OF_PROCESSES, FASTA_FILE_TYPE, ALIGNMENTS_FOR_TREE_DIR, DATA_DIR, ALIGNMENT_STRAIN_PATTERN, STRAINS_COUNT from data_analysis import build_strain_names_map from logging_config import worker_configurer def perform_clustering_on_proteins(aggregated_proteins_file_path): """Run the CD-HIT program to perform clustering on the strains""" logger = logging.getLogger() logger.info("Running CD-HIT on combined proteins file to create clustering") cd_hit_args = " ".join(["cd-hit", "-i", aggregated_proteins_file_path, "-o", CD_HIT_CLUSTER_REPS_OUTPUT_FILE, "-c 0.70", "-n 5", "-M 16000", "-g 1", "-p 1"]) cd_hit_return_code = run(cd_hit_args, shell=True).returncode logger.info("Finished running CD-HIT with return code %d" % cd_hit_return_code) return cd_hit_return_code def perform_clustering_on_cds(input_file, output_file): """Run the CD-HIT-EST program to perform clustering on the strains representatives and pseudogenes""" logger = logging.getLogger() logger.info("Running CD-HIT-EST on combined representative and pseudogene cds file to create clustering") cd_hit_est_args = " ".join(["cd-hit-est", "-i", input_file, "-o", output_file, "-c 0.8", "-n 5", "-M 16000", "-g 1", "-p 1", "-d 30"]) cd_hit_est_return_code = run(cd_hit_est_args, shell=True).returncode logger.info("Finished running CD-HIT with return code %d" % cd_hit_est_return_code) return cd_hit_est_return_code def perform_alignment_on_core_clusters(log_queue): """Run MAFFT & Gblocks tools on fasta files of protein nucleotide seqs for each core cluster""" logger = logging.getLogger(__name__) logger.info("Running MAFFT & Gblocks on core clusters for alignment") if not os.path.exists(CLUSTERS_NT_SEQS_DIR): logger.error("No clusters dir found, exiting") exit(1) if not os.path.exists(CLUSTERS_ALIGNMENTS_DIR): os.makedirs(CLUSTERS_ALIGNMENTS_DIR) job_queue = multiprocessing.Queue() prepare_alignment_jobs(job_queue) workers = [ multiprocessing.Process(target=perform_alignment_and_pruning, args=(i, job_queue, worker_configurer, log_queue)) for i in range(NUMBER_OF_PROCESSES)] for w in workers: w.start() job_queue.put(None) for w in workers: w.join() logger.info("Finished running MAFFT for all clusters") def prepare_alignment_jobs(job_queue): """Put all downloaded strain dirs in job queue for workers""" core_clusters = os.listdir(CLUSTERS_NT_SEQS_DIR) for cluster_file in core_clusters: job_queue.put(cluster_file) def perform_alignment_and_pruning(worker_id, job_queue, configurer, log_queue): """ Perform MAFFT alignment and Gblocks pruning for a core cluster fasta file """ configurer(log_queue) logger = logging.getLogger(__name__ + "_worker_" + str(worker_id)) while True: cluster_file = job_queue.get() if cluster_file is None: job_queue.put(None) break logger.info("Running MAFFT for %s" % cluster_file) alignment_stdout = open("alignment_stdout.log", "w") alignment_stderr = open("alignment_stderr.log", "w") cluster_alignment_filename = cluster_file + "_alignment" if not os.path.exists(os.path.join(CLUSTERS_ALIGNMENTS_DIR, cluster_alignment_filename)): cluster_alignment_file = open(os.path.join(CLUSTERS_ALIGNMENTS_DIR, cluster_alignment_filename), 'w') mafft_args = " ".join(["mafft", "--auto", os.path.join(CLUSTERS_NT_SEQS_DIR, cluster_file)]) mafft_return_code = run(mafft_args, shell=True, stdout=cluster_alignment_file, stderr=alignment_stderr).returncode logger.info("Finished running MAFFT for %s with return code %d" % (cluster_file, mafft_return_code)) cluster_alignment_file.close() logger.info("Running GBlocks for %s" % cluster_file) gblocks_args = " ".join(["Gblocks", os.path.join(CLUSTERS_ALIGNMENTS_DIR, cluster_alignment_filename), "-t=d", "-b5=a", "-p=n"]) gblocks_return_code = run(gblocks_args, shell=True, stdout=alignment_stdout, stderr=alignment_stderr).returncode logger.info( "Finished running Gblocks for alignment %s with return code %d" % (cluster_alignment_filename, gblocks_return_code)) def prepare_alignments_for_tree(log_queue): """Edit each alignment to remove invariant positions, pad missing strain seqs & concatenate all alignments""" logger = logging.getLogger(__name__) logger.info("Preparing core clusters alignments for tree") if not os.path.exists(CLUSTERS_ALIGNMENTS_DIR): logger.error("No alignments dir found, exiting") exit(1) if not os.path.exists(ALIGNMENTS_FOR_TREE_DIR): os.makedirs(ALIGNMENTS_FOR_TREE_DIR) job_queue = multiprocessing.Queue() prepare_alignment_editing_jobs(job_queue) workers = [ multiprocessing.Process(target=perform_alignment_editing, args=(i, job_queue, worker_configurer, log_queue)) for i in range(NUMBER_OF_PROCESSES)] for w in workers: w.start() job_queue.put(None) for w in workers: w.join() logger.info("Finished editing all alignments, concatenating") edited_alignment_files = os.listdir(ALIGNMENTS_FOR_TREE_DIR) concatenated_alignment = None concatenated_alignment_file = os.path.join(DATA_DIR, "all_alignments") for edited_alignment_file in edited_alignment_files: logger.info("Concatenating alignment %s" % edited_alignment_file) with open(os.path.join(ALIGNMENTS_FOR_TREE_DIR, edited_alignment_file), "r") as f: edited_alignment = AlignIO.read(f, FASTA_FILE_TYPE) if not concatenated_alignment: concatenated_alignment = edited_alignment[:, :] else: concatenated_alignment += edited_alignment[:, :] AlignIO.write(concatenated_alignment, open(concatenated_alignment_file, "w"), FASTA_FILE_TYPE) logger.info("Finished concatenating all alignments, written to %s" % concatenated_alignment_file) def prepare_alignment_editing_jobs(job_queue): """Put all downloaded strain dirs in job queue for workers""" alignments = os.listdir(CLUSTERS_ALIGNMENTS_DIR) for alignment_file in alignments: if alignment_file.endswith("-gb"): job_queue.put(alignment_file) def perform_alignment_editing(worker_id, job_queue, configurer, log_queue): """ Perform alignment editing """ configurer(log_queue) logger = logging.getLogger(__name__ + "_worker_" + str(worker_id)) while True: alignment_file = job_queue.get() if alignment_file is None: job_queue.put(None) break logger.info("Editing alignment %s" % alignment_file) alignment = AlignIO.read(open(os.path.join(CLUSTERS_ALIGNMENTS_DIR, alignment_file), "r"), FASTA_FILE_TYPE) edited_alignment = None for col_idx in range(alignment.get_alignment_length()): col = alignment[:, col_idx:col_idx + 1] col_str = alignment[:, col_idx] if not all(c == col_str[0] for c in col_str): if not edited_alignment: edited_alignment = col else: edited_alignment += col alignment_seq_len = edited_alignment.get_alignment_length() logger.info("alignment_seq_len = %d" % alignment_seq_len) strain_idx = 0 while strain_idx < STRAINS_COUNT: logger.info("in while - strain_idx = %d" % strain_idx) if len(edited_alignment) > strain_idx: seq = edited_alignment[strain_idx] seq_strain_idx = int(ALIGNMENT_STRAIN_PATTERN.match(seq.id).group(1)) logger.info("checking if strain idx %d < seq_strain_idx %d" % (strain_idx, seq_strain_idx)) if strain_idx < seq_strain_idx: for i in range(seq_strain_idx - strain_idx): logger.info("adding padded seq at idx %d" % (strain_idx + i)) edited_alignment._records.insert(strain_idx + i, SeqRecord(Seq(alignment_seq_len * '-'), id="[%d] padding" % (strain_idx + i))) strain_idx += (seq_strain_idx - strain_idx + 1) continue strain_idx += 1 else: logger.info("adding padded seq at end of alignment list") edited_alignment.append(SeqRecord(Seq(alignment_seq_len * '-'), id="[%d] padding" % strain_idx)) strain_idx += 1 alignment_file_edited = os.path.join(ALIGNMENTS_FOR_TREE_DIR, alignment_file) logger.info("Finished padding alignment - writing to file %s" % alignment_file_edited) AlignIO.write(edited_alignment, open(alignment_file_edited, "w"), FASTA_FILE_TYPE) def format_concatenated_alignment(): logger = logging.getLogger(__name__) strain_names_map = build_strain_names_map() tree_alignment = AlignIO.read(open(os.path.join(DATA_DIR, "all_alignments"), "r"), FASTA_FILE_TYPE) tree_alignment_filtered = AlignIO.MultipleSeqAlignment([]) for id, strain in zip(range(STRAINS_COUNT), tree_alignment): if all(c == '-' for c in strain.seq): logger.info("skipping filtered strain %d" % id) else: logger.info("adding id to strain %d" % id) strain.id = "[" + str(id) + "]" + strain_names_map[id] strain.description = '' tree_alignment_filtered.append(strain) AlignIO.write(tree_alignment_filtered, open(os.path.join(DATA_DIR, "filtered_tree_alignment"), "w"), FASTA_FILE_TYPE)
yarivz/pa-pseudogene
external_tools.py
external_tools.py
py
9,994
python
en
code
0
github-code
6
[ { "api_name": "logging.getLogger", "line_number": 18, "usage_type": "call" }, { "api_name": "constants.CD_HIT_CLUSTER_REPS_OUTPUT_FILE", "line_number": 20, "usage_type": "name" }, { "api_name": "subprocess.run", "line_number": 22, "usage_type": "call" }, { "api_na...
41491508331
# ----------- Import statements ------------ import math; import numpy; import matplotlib.pyplot as plt; # ------------ Custom functions ------------ # A-F sums def A(xlist, ylist, y_uncert): A = 0; for i in range(len(xlist)): A += xlist[i] / (y_uncert[i])**2; return A; def B(xlist, ylist, y_uncert): B = 0; for i in range(len(xlist)): B += 1.0 / (y_uncert[i])**2; return B; def C(xlist, ylist, y_uncert): C = 0; for i in range(len(xlist)): C += ylist[i] / (y_uncert[i])**2; return C; def D(xlist, ylist, y_uncert): D = 0; for i in range(len(xlist)): D += (xlist[i])**2 / (y_uncert[i])**2; return D; def E(xlist, ylist, y_uncert): E = 0; for i in range(len(xlist)): E += (xlist[i]) * (ylist[i]) / (y_uncert[i])**2; return E; def F(xlist, ylist, y_uncert): F = 0; for i in range(len(xlist)): F += (ylist[i])**2 / (y_uncert[i])**2; return F; # chi-square def s_m(xlist, ylist, y_uncert, a, b): s_m = 0; for i in range(len(xlist)): s_m += (ylist[i] - a*xlist[i] - b)**2 / (y_uncert[i])**2; return s_m; # average y-value def avg(alist): avg = 0; for i in range(len(alist)): avg += ylist[i]; return avg; # coefficient of determination (r^2) def r2(xlist, ylist, y_uncert, a, b, y_avg): r2 = 0; num = 0; denom = 0; for i in range(len(xlist)): num += (a*xlist[i] + b - y_avg)**2; denom += (ylist[i] - y_avg)**2; r2 = num / denom; return r2; # ------------ Hardcode section ------------ # Hardcode these values xlist = [65, 75, 85, 95, 105]; x_uncert = []; ylist = [-20, 17, 42, 94, 127]; y_uncert = [1, 1, 1, 1, 1]; # -------------- Main program -------------- # Calculate average y-value y_avg = avg(ylist); # Assign the A-F sum values to variables A-F A = A(xlist, ylist, y_uncert); B = B(xlist, ylist, y_uncert); C = C(xlist, ylist, y_uncert); D = D(xlist, ylist, y_uncert); E = E(xlist, ylist, y_uncert); F = F(xlist, ylist, y_uncert); # y = ax + b is the best-fit line a = (B*E - A*C) / (B*D - A*A) b = (C*D - A*E) / (B*D - A*A) # Calculate chi-square s_m = s_m(xlist, ylist, y_uncert, a, b); # Calculate degrees of freedom ndf = len(xlist) - 2; # 2 parameters: a,b # Calculate closeness of fit (should be as close to 1 as possible) fit = s_m / ndf; # Calculate coefficient of determination r^2 and r r2 = r2(xlist, ylist, y_uncert, a, b, y_avg); if (a > 0): r = math.sqrt(r2); else: r = -math.sqrt(r2); # ------------ Console output ------------- # Print the linear regression model print('T = ' + str(b) + ' + ' + str(a) +'P'); # equation y=ax+b print("A = " + str(b)); # a-value print("B = " + str(a)); # b-value print("S_m = " + str(s_m) + " (chi-square)"); # chi-square print("ndf = " + str(ndf)); # degrees of freedom print("S_m/ndf = " + str(fit)); # closeness of fit (chi-square) print("p-value ~ 0 due to large S_m"); # p-value print("coeff. of determ.: r^2 = " + str(r2)); # coefficient of determination print("correlation coeff.: r = " + str(r)); # correlation coefficient print("\n"); print("Absolute zero (accepted): -273.15 C"); # theoretical value of absolute zero print("Absolute zero (fitted): " + str(b) + " C"); # experimental value of absolute zero # ------------- File output --------------- f = open("shi_homework05_results.txt", "w+") # Print the linear regression model f.write('T = ' + str(b) + ' + ' + str(a) +'P' + "\n"); # equation y=ax+b f.write("A = " + str(b) + "\n"); # a-value f.write("B = " + str(a) + "\n"); # b-value f.write("S_m = " + str(s_m) + " (chi-square)" + "\n"); # chi-square f.write("ndf = " + str(ndf) + "\n"); # degrees of freedom f.write("S_m/ndf = " + str(fit) + "\n"); # closeness of fit (chi-square) f.write("p-value ~ 0 due to large S_m" + "\n"); # p-value f.write("coeff. of determ.: r^2 = " + str(r2) + "\n"); # coefficient of determination f.write("correlation coeff.: r = " + str(r) + "\n"); # correlation coefficient f.write("\n"); f.write("Absolute zero (accepted): -273.15 C" + "\n"); # theoretical value of absolute zero f.write("Absolute zero (fitted): " + str(b) + " C" + "\n"); # experimental value of absolute zero f.close(); # ------------ Plotting output ------------- # Set parameters for plot print("range of x-values: [" + str(min(xlist)) + " , " + str(max(xlist)) + "]"); print("range of y-values: [" + str(min(ylist)) + " , " + str(max(ylist)) + "]"); print("Enter min and max for axes of plot:"); xmin = float(input("xmin: ")); xmax = float(input("xmax: ")); ymin = float(input("ymin: ")); ymax = float(input("ymax: ")); equation = 'y=' + str(a) + 'x+' + str(b); # Plot the axes and labels (need to hardcode xlabel and ylabel) plt.title("Temperature vs Pressure"); plt.xlabel("P (mm Hg)"); plt.ylabel("T (degrees C)"); plt.axis([xmin, xmax, ymin, ymax]); # Plot the data points plt.plot(xlist, ylist, 'ro'); # Plot the best-fit line x = numpy.linspace(xmin,xmax,100); y = a*x+b; plt.plot(x, y, '-r', label=equation); plt.legend(loc='upper left'); plt.show();
henryshi1/phy-153
Homework/hw05/shi_homework05.py
shi_homework05.py
py
5,372
python
en
code
0
github-code
6
[ { "api_name": "math.sqrt", "line_number": 107, "usage_type": "call" }, { "api_name": "math.sqrt", "line_number": 109, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.title", "line_number": 162, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", ...
35696320275
from flask import Flask ,request,Response,session,jsonify,render_template,redirect,url_for from flask.json import JSONDecoder from google.protobuf import message from keras.utils.generic_utils import default from db import create_db,db from models import imgModel,User from flask_restful import marshal_with,fields,abort import os from werkzeug.utils import redirect, secure_filename from keras.models import load_model from keras.preprocessing import image import keras import numpy as np import pandas as pd from flask_cors import CORS,cross_origin import base64 from io import BytesIO from PIL import Image from datetime import datetime from sqlalchemy import desc import matplotlib.pyplot as plt import seaborn as sns from pyrebase import pyrebase import pathlib import urllib.request import matplotlib matplotlib.use('Agg') app = Flask(__name__) CORS(app) cors = CORS(app, resources={r"/mobile/*": {"origins": '*'}}) UPLOAD_FOLDER=os.path.join('static','images') app.config['CORS HEADERS'] = 'Content-Type' app.config['SQLALCHEMY_DATABASE_URI']="sqlite:///imgDb.db" app.config['SQLALCHEMY_TRACK_MODIFICATIONS']= True app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER app.config['SECRET_KEY']="gyankoissah" firebaseConfig = { "apiKey": "AIzaSyDbZhN0J_vIeursbhHDLC0Byze4-CM_WR4", "authDomain": "dronetry-cbc09.firebaseapp.com", "databaseURL": "https://dronetry-cbc09-default-rtdb.firebaseio.com", "projectId": "dronetry-cbc09", "storageBucket": "dronetry-cbc09.appspot.com", "messagingSenderId": "475234377420", "appId": "1:475234377420:web:de636bed729d33c4ccac69", "measurementId": "G-EGHW1E7PFH", "serviceAccount": "service.json" }; firebase=pyrebase.initialize_app(firebaseConfig) database=firebase.database() storage=firebase.storage() create_db(app) resource_fields ={ "id":fields.Integer, "name":fields.String, "mimetype":fields.String, "img":fields.String } coffeModel =keras.models.load_model("files/CoffeModel.h5") cottonModel =keras.models.load_model("files/CottonModel.h5") cocoaModel = keras.models.load_model("files/CocoaModel.h5") def getPrediction(plant,filename): test_image = keras.preprocessing.image.load_img("static/images/"+filename,target_size=(256,256,3)) test_image = keras.preprocessing.image.img_to_array(test_image) test_image = np.expand_dims(test_image,axis=0) if plant == "coffe": prediction = coffeModel.predict(test_image) return prediction elif plant == "cotton": prediction = cottonModel.predict(test_image) return prediction elif plant =="cocoa": prediction = cocoaModel.predict(test_image) return prediction def getUserPosts(id): posts=imgModel.query.filter(imgModel.user==id).order_by(desc(imgModel.id)) data=[] for image in posts: data.append({'id':str(image.id),'image':image.name,'prediction':image.prediction,"crop":image.crop}) print(len(data)) return data def dataToDataframe(plant): user_id=session['user_info']['id'] posts=imgModel.query.filter((imgModel.user==user_id) & (imgModel.crop==plant)) predictions=[] for data in posts: predictions.append(data.prediction) if len(predictions) == 0: return "No file" else: if plant =='cotton': if os.path.exists("static/graphs/{}cotton.png".format(user_id)): os.remove("static/graphs/{}cotton.png".format(user_id)) picture=sns.countplot(x=predictions) plt.title("cotton") plt.xticks(rotation=25) plt.savefig("static/graphs/{}cotton.png".format(user_id)) return "file" else: picture=sns.countplot(x=predictions) plt.title("cotton") plt.xticks(rotation=25) plt.savefig("static/graphs/{}cotton.png".format(user_id)) return "file" elif plant == 'coffe': if os.path.exists("static/graphs/{}coffe.png".format(user_id)): os.remove("static/graphs/{}coffe.png".format(user_id)) picture=sns.countplot(x=predictions) plt.title("coffe") plt.xticks(rotation=25) plt.savefig("static/graphs/{}coffe.png".format(user_id)) return "file" else: picture=sns.countplot(x=predictions) plt.title("coffe") plt.xticks(rotation=25) plt.savefig("static/graphs/{}coffe.png".format(user_id)) return "file" elif plant=='cocoa': if os.path.exists("static/graphs/{}cocoa.png".format(user_id)): os.remove("static/graphs/{}cocoa.png".format(user_id)) picture=sns.countplot(x=predictions) plt.xticks(rotation=25) plt.title("cocoa") plt.savefig("static/graphs/{}cocoa.png".format(user_id)) return "file" else: picture=sns.countplot(x=predictions) plt.title("cocoa") plt.xticks(rotation=25) plt.savefig("static/graphs/{}cocoa.png".format(user_id)) return "file" @app.route("/home",methods=['GET','POST']) def home(): if request.method == 'POST': mail=request.form.get('email') passw=request.form.get('password') user = User.query.filter((User.email==mail) & (User.password==passw)).first() if user: session['user_info']={'id':user.id,'username':user.fullname,'contact':user.contact,'town':user.town} data=getUserPosts(user.id) return render_template('index.html',user_data=session['user_info'],posts=data) else: return render_template("login.html") else: if 'user_info' in session: id=session['user_info']['id'] data=getUserPosts(id) return render_template('index.html',user_data=session['user_info'],posts=data) else: return render_template('login.html') @app.route("/figure/<int:num>") def getFigure(num): user_id=session['user_info']['id'] if num==1: file=dataToDataframe("cocoa") if file == "file": return render_template("figure.html",crop='cocoa',user_data=session['user_info'],path="static/graphs/{}cocoa.png".format(user_id)) else: return render_template("figure.html",crop='no crop',user_data=session['user_info']) elif num==2: file=dataToDataframe("cotton") if file =='file': return render_template("figure.html",crop='cotton',user_data=session['user_info'],path="static/graphs/{}cotton.png".format(user_id)) else: return render_template("figure.html",crop='no crop',user_data=session['user_info']) elif num == 3: file=dataToDataframe("coffe") if file =='file': return render_template("figure.html",crop='coffe',user_data=session['user_info'],path="static/graphs/{}coffe.png".format(user_id)) else: return render_template("figure.html",crop='no crop',user_data=session['user_info']) else: return("index.html") @app.route("/web/login",methods=['GET','POST']) def Login(): return render_template("login.html") @app.route("/web/register",methods=['GET','POST']) def webRegister(): if request.method =='POST': username=request.form.get("username") phone=request.form.get("contact") city = request.form.get('town') mail = request.form.get('email') passw = request.form.get('password') new_user=User(email=mail,password=passw,fullname=username,contact=phone,town=city) db.session.add(new_user) db.session.commit() return render_template("login.html") else: return render_template("register.html") @app.route("/crop/<int:num>",methods=['GET']) def handleCrop(num): if 'user_info' in session: if num == 1: return render_template('upload.html',crop='cocoa',user_data=session['user_info']) elif num == 2: return render_template('upload.html',crop='cotton',user_data=session['user_info']) elif num == 3: return render_template('upload.html',crop='coffe',user_data=session['user_info']) else: return "sorry" @app.route("/upload",methods=['POST']) def upload(): if request.method=="POST": picture = request.files['photo'] plant = str(request.form['crop']) if plant == "cotton": classes=["diseased cotton leaf","diseased cotton plant","fresh cotton leaf","fresh cotton plant"] elif plant == "coffe": classes=["cercospora","healthy","miner","phoma","rust"] elif plant=="cocoa": classes=["blackpod ","frosty pod rot","healthy"] if not picture: return {"results":"No is file"} filename=secure_filename(picture.filename) picture.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) fullname=os.path.join(app.config['UPLOAD_FOLDER'],filename) prediction = getPrediction(plant,filename) pred= classes[prediction[0].argmax()] user_id=session['user_info']['id'] img=imgModel(img=picture.read(),name=filename,mimetype=picture.mimetype,crop=plant,user=int(user_id),prediction=pred) db.session.add(img) db.session.commit() return {"status":pred} else: return render_template("home.html") @app.route("/droneimages",methods=['GET']) def handleDroneImages(): images=[] files = storage.list_files() for file in files: url=storage.child(file.name).get_url(None) images.append(url) if len(images)==0: image='' else: image=images[-1] user_id=session['user_info']['id'] return render_template("droneimages.html",user_data=session['user_info'],link=image,id=user_id) @app.route("/logout",methods=['GET']) def logout(): if 'user_info' in session: session.pop("user_info",default=None) return redirect(url_for('Login')) else: return redirect(url_for('Login')) @app.route("/mobile/upload",methods=['POST','GET']) @cross_origin() def uploadMobile(): if request.method=="POST": data = request.get_json(force=True) picture=data['imageUrl'] plant=data['crop'] starter = picture.find(',') image_data = picture[starter+1:] image_data = bytes(image_data, encoding="ascii") picture = Image.open(BytesIO(base64.b64decode(image_data))) now=datetime.now() date_time = now.strftime("%m%d%Y%H%M%S") filename=str(date_time)+"image.jpg" picture.save('static/images/'+filename) if plant == "cotton": classes=["diseased cotton leaf","diseased cotton plant","fresh cotton leaf","fresh cotton plant"] elif plant == "coffe": classes=["cercospora","healthy","miner","phoma","rust"] elif plant=="cocoa": classes=["blackpod","frosty pod rot","healthy"] if not picture: return {"results":"No is file"} prediction = getPrediction(plant,filename) pred= classes[prediction[0].argmax()] img=imgModel(img=image_data,name=filename,mimetype='jpg',crop=plant,user=int(data['user_id']),prediction=pred) db.session.add(img) db.session.commit() data={"status":pred} return jsonify(data),200 if request.method=="GET": data = request.get_json(force=True) picture=data['imageUrl'] plant=data['crop'] starter = picture.find(',') image_data = picture[starter+1:] image_data = bytes(image_data, encoding="ascii") picture = Image.open(BytesIO(base64.b64decode(image_data))) now=datetime.now() date_time = now.strftime("%m%d%Y%H%M%S") filename=str(date_time)+"image.jpg" picture.save('static/images/'+filename) if plant == "cotton": classes=["diseased cotton leaf","diseased cotton plant","fresh cotton leaf","fresh cotton plant"] elif plant == "coffe": classes=["cercospora","healthy","miner","phoma","rust"] elif plant=="cocoa": classes=["blackpod","frosty pod rot","healthy"] if not picture: return {"results":"No is file"} prediction = getPrediction(plant,filename) pred= classes[prediction[0].argmax()] img=imgModel(img=image_data,name=filename,mimetype='jpg',crop=plant,user=int(session['user_info']['id']),prediction=pred) db.session.add(img) db.session.commit() data={"status":pred} return jsonify(data),200 @app.route("/mobile/droneImage",methods=['POST']) @cross_origin() def handleDroneImageCapture(): if request.method=='POST': data = request.get_json(force=True) plant=data['crop'] now=datetime.now() date_time = now.strftime("%m%d%Y%H%M%S") filename=str(date_time)+"image.jpg" urllib.request.urlretrieve(data['imageUrl'],'static/images/'+filename) picture=Image.open('static/images/'+filename) if plant == "cotton": classes=["diseased cotton leaf","diseased cotton plant","fresh cotton leaf","fresh cotton plant"] elif plant == "coffe": classes=["cercospora","healthy","miner","phoma","rust"] elif plant=="cocoa": classes=["blackpod","frosty pod rot","healthy"] if not picture: return {"results":"No is file"} prediction = getPrediction(plant,filename) pred= classes[prediction[0].argmax()] img=imgModel(img=filename,name=filename,mimetype='jpg',crop=plant,user=int(data['user_id']),prediction=pred) db.session.add(img) db.session.commit() data={"status":pred} return jsonify(data),200 @app.route('/mobile/create-user-mobile',methods=['POST']) @cross_origin() def createUser(): if request.method == "POST": data = request.get_json(force=True) user = User.query.filter_by(email=data['email']).first() if user: return {"error":"User already exist"} new_user=User(email=data['email'],password=data['password'],fullname=data['fullName'],contact=data['contact'],town=data['town']) db.session.add(new_user) db.session.commit() return {'id':new_user.id,'email':new_user.email,'password':new_user.password} @app.route('/mobile/user-mobile-login',methods=['POST']) @cross_origin() def logUserIn(): if request.method=="POST": data=request.get_json(force=True) user = User.query.filter((User.email==data['email']) & (User.password==data['password'])).first() if user : return {'id':user.id,'email':user.email} else: return {"error":"user not found"} @app.route("/mobile/get-user-posts/<int:id>",methods=['GET']) @cross_origin() def getUserImages(id): posts=imgModel.query.filter_by(user=id).order_by(desc(imgModel.id)) data=[] for image in posts: data.append({'id':str(image.id),'image':image.name,'prediction':image.prediction,"crop":image.crop}) return {'data':data} @app.route("/mobile/get-user-graph",methods=['GET','POST']) @cross_origin() def getUsergraphMobile(): if request.method =='POST': data=request.get_json(force=True) plant=str(data['plant']) id=int(data['user_id']) posts=imgModel.query.filter((imgModel.user==id) & (imgModel.crop==plant)) predictions=[] for data in posts: predictions.append(data.prediction) if len(predictions) == 0: return {'path':'no file'} else: if plant =='cotton': if os.path.exists("static/graphs/{}cotton.png".format(id)): os.remove("static/graphs/{}cotton.png".format(id)) picture=sns.countplot(x=predictions) plt.title("cotton") plt.xticks(rotation=20, ha='right') plt.savefig("static/graphs/{}cotton.png".format(id)) return {'path':'static/graphs/{}cotton.png'.format(id)} else: picture=sns.countplot(x=predictions) plt.title("cotton") plt.xticks(rotation=20, ha='right') plt.savefig("static/graphs/{}cotton.png".format(id)) return {'path':'static/graphs/{}cotton.png'.format(id)} elif plant == 'coffe': if os.path.exists("static/graphs/{}coffe.png".format(id)): os.remove("static/graphs/{}coffe.png".format(id)) picture=sns.countplot(x=predictions) plt.title("coffe") plt.savefig("static/graphs/{}coffe.png".format(id)) return {'path':'static/graphs/{}coffe.png'.format(id)} else: picture=sns.countplot(x=predictions) plt.title("coffe") plt.savefig("static/graphs/{}coffe.png".format(id)) return {'path':'static/graphs/{}coffe.png'.format(id)} elif plant=="cocoa": if os.path.exists("static/graphs/{}cocoa.png".format(id)): os.remove("static/graphs/{}cocoa.png".format(id)) picture=sns.countplot(x=predictions) plt.title("coffe") plt.savefig("static/graphs/{}cocoa.png".format(id)) return {'path':'static/graphs/{}cocoa.png'.format(id)} else: picture=sns.countplot(x=predictions) plt.title("coffe") plt.savefig("static/graphs/{}cocoa.png".format(id)) return {'path':'static/graphs/{}cocoa.png'.format(id)} if __name__ == "__main__": app.run(debug=True)
yussif-issah/finalwork
main.py
main.py
py
18,083
python
en
code
0
github-code
6
[ { "api_name": "matplotlib.use", "line_number": 27, "usage_type": "call" }, { "api_name": "flask.Flask", "line_number": 29, "usage_type": "call" }, { "api_name": "flask_cors.CORS", "line_number": 30, "usage_type": "call" }, { "api_name": "flask_cors.CORS", "lin...
41380069765
from functools import wraps import time from utils.mics import colorstr def fun_run_time(func): ''' 装饰器,用于获取函数的执行时间 放在函数前,如 @fun_run_time() def xxx(): ''' @wraps(func)#可删去,是用来显示原始函数名的 def _inner(*args, **kwargs): s_time = time.time() ret = func(*args, **kwargs) e_time = time.time() # print(colorstr("\t----function [{}] costs {} s".format(func.__name__, e_time-s_time), 'yellow')) return ret return _inner def tic(): ''' 开始计时。 t = tic() ''' s_time = time.time() return s_time def toc(s_time, word='tic-toc', act_number = 1, mute=True): ''' 结束计时,返回毫秒数。 t = toc(t, '模块函数名', '处理次数', True)\n mute代表不打印。 ''' e_time = time.time() temp = int((e_time-s_time)*1000) if not mute: if act_number > 1: print(colorstr(f"\t----module [{word}] costs {temp} ms, for {act_number} actions, ({int(temp/act_number)}ms/action)", 'yellow')) else: print(colorstr(f"\t----module [{word}] costs {temp} ms", 'yellow')) return temp
Backlory/motionDetection
utils/timers.py
timers.py
py
1,229
python
en
code
0
github-code
6
[ { "api_name": "time.time", "line_number": 14, "usage_type": "call" }, { "api_name": "time.time", "line_number": 16, "usage_type": "call" }, { "api_name": "utils.mics.colorstr", "line_number": 18, "usage_type": "call" }, { "api_name": "functools.wraps", "line_n...
39005364665
import numpy as np import datetime import math def anagram(s1,s2): s1=list(s1) s2=list(s2) if len(s1)==(len(s2)): s1=set(s1) s2=set(s2) s3=set() if s1^s2==s3: print("Anagram") else: print("not an anagram") else: print("String are ****NOT*** Anagram") def primerange(num): newarr=[] for num in range(0,num+1): if num>1: for i in range(2,num): if num%i==0: break else: #print(num,' ',sep=',',end='') newarr.append(num) print(newarr) def primeanagram(num): cnt=0 newarr=[] new=[] for num in range(0,num+1): if num>1: for i in range(2,num): if num%i==0: break else: #print(num,' ',sep=',',end='') newarr.append(num) print(newarr) for i in range(0,len(newarr)): for j in range(i+1,len(newarr)): newarr[i]=str(newarr[i]) newarr[j]=str(newarr[j]) if len(newarr[i])== len(newarr[j]): s1=set(newarr[i]) s2=set(newarr[j]) s3=set() if s1^s2==s3: print(" ******Anagram******") cnt+=1 new.append(s1) new.append(s2) print("Anagram:-",new) print("total count",cnt) else: print("not an anagram") else: print("String are ****NOT*** Anagram") for i in range(0,len(newarr)): newarr[i]=str(newarr[i]) newarr[j]=newarr[::-1] if len(newarr[i])==len(newarr[j]): if newarr[i]==newarr[j]: print("palindrome") cnt+=1 print(cnt) else: print() else: print() def insertionsort(alist): # alist=alist.split(" ") for i in range(0,len(alist)): print(len(alist)) current=alist[i] while i>0 and alist[i-1]>current: alist[i]=alist[i-1] i=i-1 alist[i]=current print (alist) def bubblesort(alist): # alist=alist.split(" ") for i in range(1,len(alist)): for j in range(i): if alist[j]>alist[j+1]: temp=alist[j] alist[j]=alist[j+1] alist[j+1]=temp print(alist) print(len(alist)) def convert(string): li=list(string.split(" ")) return li def binaryserach(alist,key,length): start=0 end=length-1 mid=0 print(start,end) while start<=end: mid=end//2 if key == (alist[mid]): print("\nEntered number is present at position",key,mid) return -1 elif key<alist[mid]: end=mid-1 elif key > alist[mid]: start=mid +1 print("\n Element not found") def dayofweek(m,d,y): # m=int(input("Enter the month :")) # d=int(input("Enter the date :")) # y=int(input("Enter the year :")) today=datetime.datetime(y,m,d) Day=today.weekday() print(Day) yo=y-(14-m)/12 x=yo +(yo/4)-(yo/100)+(yo/400) print(yo,x) mo= m+12*((14-m)/12)-2 do=(d+x+(31*mo/12))%7 print(x,mo,do) d1=math.floor(do) print(d1) if Day==0: print("Monday") elif Day ==1: print("Tuesday") elif Day ==2: print("Wednesday") elif Day ==3: print("Thursday") elif Day ==4: print("Friday") elif Day ==5: print("Saturday") else: print("Sunday") if d1==1: print("Monday") elif d1 ==2: print("Tuesday") elif d1 ==3: print("Wednesday") elif d1 ==4: print("Thursday") elif d1 ==5: print("Friday") elif d1 ==6 : print("Saturday") else: print("Sunday") def tempCon(c,f): a=c*9/5 +32 print("Celsius to fahrenheit: ",a) b = (f-32)*5/9 print("fahrenheit to Celsius: ",b) def monpay(Y,R,P): r=R/(12*100) n=Y*12 p1=P*r p2=math.pow(1/(1+r),n) p3=1-p2 print("Enter the number of years in months :- ",n) print("Enter the rate of interset ") print("Payment to be paid monthly:",p1/p3) print("Total amount to be paid back all together",(p1/p3)*n) print(n,r) print(p1,p2) def dectobinary(n): binaryarr=[0]*8 i=0 while n>0: binaryarr[i]=n%2 n=int(n/2) i+=1 for j in range(7,-1,-1): print(binaryarr[j],end=" ") return binaryarr def swap(dec): j=7 for i in range(3,-1,-1): temp=dec[i] dec[i]=dec[j] dec[j]=temp j-=1 print() for j in range(7,-1,-1): print(dec[j],end=" ") def bintodec(binaryarr): for i in range(0,len(binaryarr)): if binaryarr[i]==1: k=math.pow(2,i) print(k) elif binaryarr[i]==0: print() def mergesort(alist): if len(alist)>1: mid=len(alist)//2 lefthalf=alist[:mid] righthalf=alist[mid:] mergesort(lefthalf) mergesort(righthalf) print(mid) print(lefthalf) print(righthalf) for i in range(1,len(lefthalf)): for j in range(i): if lefthalf[j]> lefthalf[j+1]: temp=lefthalf[j] lefthalf[j]=lefthalf[j+1] lefthalf[j+1]=temp i+=1 print(lefthalf) for i in range(1,len(righthalf)): for j in range(i): if righthalf[j] > righthalf[j+1]: temp=righthalf[j] righthalf[j]=righthalf[j+1] righthalf[j+1]=temp print(righthalf) for i in range(1,len(alist)): for j in range(0,i): if alist[j]>alist[j+1]: temp=alist[j] alist[j]=alist[j+1] alist[j+1]=temp print(alist) def vendmac(notes): print("Amount Enterds into vebding machine",notes) no=[] n1=[1000,500,200,100,50,20,10,5,2,1] i=-1 while notes>=0: if i<len(n1)-1: i+=1 if notes>= n1[i]: notes=notes-n1[i] print(n1[i]) i=-1
Rohan2596/Python_1_moth
Python_1_Month/Algorithms_programs/AlogoUtility.py
AlogoUtility.py
py
4,962
python
en
code
0
github-code
6
[ { "api_name": "datetime.datetime", "line_number": 123, "usage_type": "call" }, { "api_name": "math.floor", "line_number": 132, "usage_type": "call" }, { "api_name": "math.pow", "line_number": 174, "usage_type": "call" }, { "api_name": "math.pow", "line_number"...
27213609715
from collections import deque, defaultdict def bfs(n, adj): visited = [False] * (n+1) min_dist = [1e9] * (n+1) visited[1] = True min_dist[1] = 0 q = deque([1]) while q: cur = q.popleft() for a in adj[cur]: if not visited[a]: q.append(a) visited[a] = True min_dist[a] = min_dist[cur]+1 max_dist = max(min_dist[1:]) return min_dist.count(max_dist) def solution(n, edge): edge.sort() adj = defaultdict(list) for start, end in edge: adj[start].append(end) adj[end].append(start) return bfs(n, adj)
hammii/Algorithm
Programmers_python/가장_먼_노드.py
가장_먼_노드.py
py
677
python
en
code
2
github-code
6
[ { "api_name": "collections.deque", "line_number": 8, "usage_type": "call" }, { "api_name": "collections.defaultdict", "line_number": 25, "usage_type": "call" } ]
33344135925
import os import logging from pathlib import Path from llama_index import ( GPTSimpleVectorIndex, GPTSimpleKeywordTableIndex, SimpleDirectoryReader ) from llama_index.indices.composability import ComposableGraph # Initialise Logger logging.basicConfig(level=logging.INFO, format="[{asctime}] - {funcName} - {message}", style='{') logger = logging.getLogger("BUILD_INDEX") openai_api_key = os.environ.get('OPENAI_API_KEY') # Load Documents cv_root_directory = Path()/'data' for directory_index in range(1,4): document = SimpleDirectoryReader(cv_root_directory/f'cv{directory_index}').load_data() index = GPTSimpleVectorIndex.from_documents(document) index_file = Path()/'data'/f'cv_{directory_index}_index.json' # save index to disk index.save_to_disk(index_file) # Select one index to prove need for composability # load index from disk index = GPTSimpleVectorIndex.load_from_disk(cv_root_directory/'cv_1_index.json') # Query index spock_address = index.query("Where does Spock Sarek Live ?") logger.info(spock_address) uhura_address = index.query("Where does Uhura Live ?") logger.info(uhura_address) # Compose indices for query # Generate indices from files index_cv_1 = GPTSimpleVectorIndex.load_from_disk(cv_root_directory/'cv_1_index.json') index_cv_2 = GPTSimpleVectorIndex.load_from_disk(cv_root_directory/'cv_2_index.json') index_cv_3 = GPTSimpleVectorIndex.load_from_disk(cv_root_directory/'cv_3_index.json') # Write up summaries cv_1_summary="Curriculum Vitae of Nyota Uhura" cv_2_summary="Curriculum Vitae of Spock Sarek" cv_3_summary="Curriculum Vitae of James T. Kirk" # set query config query_configs = [ { "index_struct_type": "simple_dict", "query_mode": "default", "query_kwargs": { "similarity_top_k": 1 } }, { "index_struct_type": "keyword_table", "query_mode": "simple", "query_kwargs": {} }, ] index_all_cvs = ComposableGraph.from_indices( GPTSimpleKeywordTableIndex, [index_cv_1, index_cv_2, index_cv_3], index_summaries=[cv_1_summary, cv_2_summary, cv_3_summary], max_keywords_per_chunk=50 ) # Query again across indices spock_address = index_all_cvs.query("Where does Spock Sarek Live ?") uhura_actress = index_all_cvs.query("Who played Nyota Uhura ?") kirk_players = index_all_cvs.query("Where has James Kirk been portrayed ?") logger.info(spock_address) logger.info(uhura_actress) logger.info(kirk_players)
gilgamesh7/iliad_llama
04_local_data_update_index.py
04_local_data_update_index.py
py
2,482
python
en
code
0
github-code
6
[ { "api_name": "logging.basicConfig", "line_number": 13, "usage_type": "call" }, { "api_name": "logging.INFO", "line_number": 13, "usage_type": "attribute" }, { "api_name": "logging.getLogger", "line_number": 14, "usage_type": "call" }, { "api_name": "os.environ.ge...
73439288188
import argparse from subcommands.setup.parser import parser as setup_parser from subcommands.export.parser import parser as export_parser from subcommands.info.parser import parser as info_parser from subcommands.process.parser import parser as process_parser from subcommands.prune.parser import parser as prune_parser from subcommands.version.parser import parser as version_parser if __name__ == "__main__": parser = argparse.ArgumentParser( description='Runs data processing live for incoming data' ) subparsers = parser.add_subparsers() subparsers.add_parser( name='setup', help='Generate config files for setting up Hotspur', parents=[setup_parser] ) subparsers.add_parser( name='process', help='Automatically find and process EM data', parents=[process_parser] ) subparsers.add_parser( name='info', help='Retrieve info about projects and sessions', parents=[info_parser] ) subparsers.add_parser( name='export', help='Export data alongside Relion metadata star files', parents=[export_parser] ) subparsers.add_parser( name='prune', help='Remove processed data and databases for projects or sessions', parents=[prune_parser] ) subparsers.add_parser( name='version', help='Print the current version', parents=[version_parser] ) args = parser.parse_args() if 'config' in args: from utils.config import load_config load_config(args.config) if 'func' in args: args.func(args) else: parser.print_help()
zruan/hotspur_command
hotspur.py
hotspur.py
py
1,681
python
en
code
0
github-code
6
[ { "api_name": "argparse.ArgumentParser", "line_number": 13, "usage_type": "call" }, { "api_name": "subcommands.setup.parser.parser", "line_number": 21, "usage_type": "name" }, { "api_name": "subcommands.process.parser.parser", "line_number": 26, "usage_type": "name" }, ...
20289549716
from stat_arb.src.data_loader.dao.dataframe.RawPostgresSampledDataLoader import RawPostgresSampledDataLoader from stat_arb.src.data_loader.dao.dataframe.ClickhouseTradesDataLoader import ClickhouseTradesDataLoader from stat_arb.src.data_loader.database import database_config from datetime import datetime from stat_arb.src.data_loader.general.Interval import Interval from stat_arb.src.data_loader.general.SamplingSchemas import SamplingSchemas from static_data import PATH # queries = [ # {'source': 'MOEX_DIRECT', 'instrument': 'USD/RUB_T+1', 'size': 1_000_000}, # {'source': 'MOEX_DIRECT', 'instrument': 'EUR/USD_T+1', 'size': 1_000_000}, # {'source': 'MOEX_DIRECT', 'instrument': 'CNH/RUB_T+1', 'size': 1_000_000}, # {'source': 'RBI', 'instrument': 'EUR/USD_T+2', 'size': 1_000_000}, # {'source': 'RBI', 'instrument': 'USD/CNH_T+2', 'size': 1_000_000}, # ] queries = [ {'source': 'MOEX', 'instrument': 'USD/RUB_T+1', 'size': 3_000_000} ] interval = Interval(datetime(2021, 1, 1), datetime(2021, 12, 31)) def load_data(query: dict, interval: Interval): print('loading:\n', query, '\n', interval, '\n') with database_config.sql_engine_fxet_db1.connect() as connection: loader = RawPostgresSampledDataLoader(connection.connection.connection) vwap = loader.load_vwap_for_interval(query['source'], query['instrument'], interval, SamplingSchemas.FIRST_PRICE_PREDICTION_SCHEMA, query['size']) return vwap if __name__ == '__main__': for q in queries: source = q['source'].split('_')[0].lower() instrument = q['instrument'].split('_')[0].replace('/', '').upper() spot_data = load_data(q, interval) spot_data.to_csv(f'{PATH}/{source}/{instrument}.csv')
v-buchkov/statistical_arbitrage_backtester
download_hourly_data.py
download_hourly_data.py
py
1,939
python
en
code
2
github-code
6
[ { "api_name": "stat_arb.src.data_loader.general.Interval.Interval", "line_number": 21, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 21, "usage_type": "call" }, { "api_name": "stat_arb.src.data_loader.general.Interval.Interval", "line_number": 24, ...
22799615615
from django.shortcuts import render from django.http import HttpResponse from myapp.models import City,Country,Person from myapp.forms import PersonForm from django.shortcuts import redirect # Create your views here. def index(request): country=Country.objects.all() context={ 'country':country, } #return HttpResponse("hey%s"%slug) return render(request, 'myapp/home.html', context) def add_person(request): if request.method=="POST": form=PersonForm(request.POST) if form.is_valid(): form.save() return redirect("/") else: form=PersonForm() context={ 'form':form, } return render(request, 'myapp/person.html', context) def get_city(request,id): opt2_html = "" try: country=Country.objects.get(pk = id) city = City.objects.filter(country_id = country.id) # make_models = company.makemodel_set.all() for c in city: opt2_html += "<option value='"+str(c.id)+"'>"+c.name+"</option>" print(opt2_html) context={ 'country':country, 'city':city, } except: write_exception("Error in fetching options 2") return HttpResponse(opt2_html) # return render(request, 'myapp/home.html', context)
pappubca005/dynamic-dropdown
myapp/views.py
views.py
py
1,346
python
en
code
0
github-code
6
[ { "api_name": "myapp.models.Country.objects.all", "line_number": 10, "usage_type": "call" }, { "api_name": "myapp.models.Country.objects", "line_number": 10, "usage_type": "attribute" }, { "api_name": "myapp.models.Country", "line_number": 10, "usage_type": "name" }, ...
27578228523
#!/usr/bin/env python3 import argparse import configparser from pathlib import Path from rich import console import sys sys.path.append("/home/vermin/IdeaProjects/summalarva") from summalarva.openai_client import OpenAIClient from summalarva.orgnoter import OrgNoter console = console.Console() config = configparser.ConfigParser() argparser = argparse.ArgumentParser() argparser.add_argument("input", type=str, help="Input file") argparser.add_argument("--config", type=str, help="Config file") args = argparser.parse_args() input_path = Path(args.input).expanduser() if args.config: config.read(args.config) else: config.read(Path("~/.config/summalarva.ini").expanduser()) openai_api_key = config["openai"]["api_key"] if config["openai"]["host"]: openai_host = config["openai"]["host"] openai_client = OpenAIClient(openai_api_key, openai_host) else: openai_client = OpenAIClient(openai_api_key) console.print("Start processing file", args.input) summarises = openai_client.summarize_document(args.input) try: org_noter = OrgNoter(args.input) for page_num,summary in summarises.items(): org_noter.page_summarize_model_append(page_num, summary) console.print("Start create org noter") org_noter.create_note() except Exception as e: raise e summary_text = "" for page_num, summary in summarises.items(): summary_text += f"Page {page_num}\n\n{summary}\n\n" with open("summary.txt", "w") as f: f.write(summary_text)
nhannht/summalarva
summalarva/summarize_pdf.py
summarize_pdf.py
py
1,484
python
en
code
1
github-code
6
[ { "api_name": "sys.path.append", "line_number": 9, "usage_type": "call" }, { "api_name": "sys.path", "line_number": 9, "usage_type": "attribute" }, { "api_name": "rich.console", "line_number": 13, "usage_type": "name" }, { "api_name": "rich.console.Console", "...
10548067106
#!/usr/bin/env python3 """ From a set of zone transits representing trips between stops, work out the effective trip time for a passenger arriving at the the origin every minute from the departure time of the first bus to the departure time of the last one """ import collections import datetime import logging import sys import csv import isodate zones = ['milton_pandr_south', 'milton_pandr_north'] logger = logging.getLogger('__name__') header = [ 'Passenger_Arrival', 'Passenger_Wait', 'Bus_Departure', 'Bus_Arrival', 'Bus_Duration', 'Bus_Interval', 'Passenger_Duration', ] def process_zones(): for zone in zones: logger.debug('Processing %s', zone) # Read in... in_filename = 'transits-{}.csv'.format(zone) logger.info('Reading %s', in_filename) with open(in_filename, 'r', newline='') as in_file: input = csv.reader(in_file, dialect='excel', quoting=csv.QUOTE_ALL) next(input) # Skip headers previous_depart = None trip_table = collections.OrderedDict() for row in input: trip = {} raw_arrive, raw_duration, raw_distance = row trip['arrive'] = isodate.parse_datetime(raw_arrive) trip['duration'] = datetime.timedelta(seconds=float(raw_duration)) trip['depart'] = trip['arrive'] - trip['duration'] day = trip['depart'].date() trip['distance'] = float(raw_distance) trip['interval'] = (trip['depart'] - previous_depart).total_seconds() if previous_depart else None if day not in trip_table: trip_table[day] = [] trip_table[day].append(trip) previous_depart = trip['depart'] # ... write out step = datetime.timedelta(minutes=1) out_filename = 'trips-{}.csv'.format(zone) logger.info('writing %s', out_filename) with open(out_filename, 'w', newline='') as out_file: output = csv.writer(out_file, dialect='excel', quoting=csv.QUOTE_ALL) output.writerow(header) for day in trip_table: logger.info('Processing %s %s', zone, day) todays_trips = trip_table[day] # Find the minute before the first bus of the day start = todays_trips[0]['depart'].replace(second=0) # And the last departure of the day end = todays_trips[-1]['depart'] logger.debug("Start %s, end %s, step %s", start, end, step) # Step through the day from 'start' to 'end' in steps of 'step' # Find the next bus to depart after 'start' while start < end: # Find first departure after 'start' for row in todays_trips: logger.debug("row depart: %s, start: %s", row['depart'], start) if row['depart'] > start: wait = int((row['depart'] - start).total_seconds()) traveling = int((row['duration']).total_seconds()) trip_duration = wait + traveling output.writerow([ start, wait, row['depart'], row['arrive'], traveling, row['interval'], trip_duration, ]) break else: logger.error("No bus for a departure at %s", start) start = start + step def main(): logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) logger.info('Start') process_zones() logger.info('Stop') if __name__ == "__main__": main()
SmartCambridge/milton_road_study
initial_investigation/expand_transits.py
expand_transits.py
py
4,037
python
en
code
0
github-code
6
[ { "api_name": "logging.getLogger", "line_number": 20, "usage_type": "call" }, { "api_name": "csv.reader", "line_number": 45, "usage_type": "call" }, { "api_name": "csv.QUOTE_ALL", "line_number": 45, "usage_type": "attribute" }, { "api_name": "collections.OrderedDi...
30704086585
from collections import deque with open('day6.txt') as day6: lines = day6.readlines() target_size = 14 current = 0 buffer = deque([''] * target_size) for line in lines: for char in line: current = current + 1 buffer.popleft() buffer.append(char) if current > target_size and len(set(buffer)) == target_size: print(current) break
shanetreacy/aoc2022
day6aoc.py
day6aoc.py
py
365
python
en
code
0
github-code
6
[ { "api_name": "collections.deque", "line_number": 9, "usage_type": "call" } ]
22734703340
# -*- coding: utf-8 -*- import json import os import sys import xbmc import xbmcvfs import xbmcaddon import xbmcgui import xbmcplugin import pyxbmct import requests import io import unicodedata import re import ast import sqlite3 import shutil import time from medias import Media, TMDB import medias import zipfile import threading import datetime from upNext import upnext_signal import widget from datetime import datetime from datetime import timedelta from bs4 import BeautifulSoup try: import iptv vIPTV = True except ImportError: vIPTV = False try: # Python 3 from urllib.parse import parse_qsl from util import * except ImportError: from urlparse import parse_qsl try: # Python 3 from urllib.parse import unquote, urlencode, quote unichr = chr except ImportError: # Python 2 from urllib import unquote, urlencode, quote try: # Python 3 from html.parser import HTMLParser except ImportError: # Python 2 from HTMLParser import HTMLParser pyVersion = sys.version_info.major pyVersionM = sys.version_info.minor if pyVersionM == 11: import cryptPaste11 as cryptage import scraperUPTO11 as scraperUPTO elif pyVersionM == 8: import cryptPaste8 as cryptage import scraperUPTO8 as scraperUPTO #import scraperUPTO elif pyVersionM == 9: import cryptPaste9 as cryptage import scraperUPTO9 as scraperUPTO elif pyVersionM == 10: import cryptPaste10 as cryptage import scraperUPTO10 as scraperUPTO else: notice(pyVersion) notice(pyVersionM) from cryptPaste import Crypt from pastebin import Pastebin from apiTraktHK import TraktHK import createbdhk import random import uptobox from strm import Strm, configureSTRM import feninfo try: BDMEDIA = xbmcvfs.translatePath('special://home/userdata/addon_data/plugin.video.sendtokodiU2P/medias.bd') BDMEDIANew = xbmcvfs.translatePath('special://home/userdata/addon_data/plugin.video.sendtokodiU2P/mediasNew.bd') except: pass import loadhk3 class FenInfo(pyxbmct.AddonFullWindow): #def __init__(self, numId, title="", typM="film"): def __init__(self, title=""): """Class constructor""" # Call the base class' constructor. self.numId = title.split("*")[0] title = title.split("*")[1] self.typM = "film" super(FenInfo, self).__init__(title) # Set width, height and the grid parameters self.setGeometry(1250, 700, 50, 30) self.setBackground(xbmcvfs.translatePath('special://home/addons/plugin.video.sendtokodiU2P/resources/png/fond.png')) # Call set controls method self.set_controls() # Call set navigation method. self.set_navigation() # Connect Backspace button to close our addon. self.connect(pyxbmct.ACTION_NAV_BACK, self.close) def set_controls(self): """Set up UI controls""" notice(self.numId) self.colorMenu = '0xFFFFFFFF' size = self.getTaille() sql = "SELECT title, overview, year, genres, backdrop, popu, numId, poster, runtime, saga FROM filmsPub WHERE numId={}".format(self.numId) liste = createbdhk.extractMedias(sql=sql) try: title, overview, year, genre, backdrop, popu, numId, poster, duration, saga = liste[0] backdrop = "http://image.tmdb.org/t/p/" + size[1] + backdrop poster = "http://image.tmdb.org/t/p/" + size[0] + poster except: return #backdrop #image = pyxbmct.Image(backdrop, colorDiffuse='0x22FFFFFF') image = pyxbmct.Image(backdrop) self.placeControl(image, 0, 7, rowspan=42, columnspan=23) f = xbmcvfs.translatePath('special://home/addons/plugin.video.sendtokodiU2P/resources/png/fond.png') fond = pyxbmct.Image(f) self.placeControl(fond, 0, 0, rowspan=54, columnspan=30) #f = xbmcvfs.translatePath('special://home/addons/plugin.video.sendtokodiU2P/resources/png/fond.png') #fond = pyxbmct.Image(f) #self.placeControl(fond, 0, 14, rowspan=25, columnspan=15) logo = "" #logo, clearart, banner = extractFanart(self.numId) #urlFanart = "http://assets.fanart.tv/fanart/movie/" #clearlogo ou title if logo: imageLogo = pyxbmct.Image(urlFanart + logo) self.placeControl(imageLogo, 0, 0, rowspan=8, columnspan=7) else: label = pyxbmct.Label(title, font='font_MainMenu') self.placeControl(label, 2, 0, columnspan=15) #poster #if clearart: # imageClearart = pyxbmct.Image(urlFanart + clearart) # self.placeControl(imageClearart, 35, 25, rowspan=12, columnspan=5) #else: #poster = pyxbmct.Image(poster) #self.placeControl(poster, 29, 25, rowspan=24, columnspan=5) """ #menu paramstring = self.links.split("*") cr = cryptage.Crypt() dictResos = cr.extractReso([(x.split("#")[0].split("@")[0], x.split("#")[0].split("@")[1]) for x in paramstring]) dictResos = {x.split("#")[0].split("@")[1]: dictResos[x.split("#")[0].split("@")[1]] if dictResos[x.split("#")[0].split("@")[1]] else x.split("#")[1] for x in paramstring} self.paramstring = orderLiens(dictResos, paramstring) tabRelease = [dictResos[x.split("#")[0].split("@")[1]][2] for i, x in enumerate(self.paramstring)] self.tabNomLien = ["#%d [COLOR red][%s - %.2fGo][/COLOR] -- [Release: %s]" %(i + 1, dictResos[x.split("#")[0].split("@")[1]][0], (int(dictResos[x.split("#")[0].split("@")[1]][1]) / 1000000000.0), tabRelease[i]) for i, x in enumerate(self.paramstring)] """ #labelMenu = pyxbmct.Label('MENU', textColor=self.colorMenu) #self.placeControl(labelMenu, 31, 0, columnspan=10) self.menu = pyxbmct.List('font13', _itemHeight=30, _alignmentY=90) self.placeControl(self.menu, 29, 0, rowspan=12, columnspan=30) #, "Acteurs & Réalisateur" self.menu.addItems(["[COLOR blue]Bande Annonce[/COLOR]", "[COLOR green]Links[/COLOR]", "Saga", "Suggestions", "Similaires", "Studio"]) #self.menu.addItems(self.tabNomLien) self.connect(self.menu, lambda: self.listFunction(self.menu.getListItem(self.menu.getSelectedPosition()).getLabel())) #overview #labelSynop = pyxbmct.Label('SYNOPSIS', textColor=colorMenu) #self.placeControl(labelSynop, 14, 0, columnspan=10) self.synop = pyxbmct.TextBox('font13', textColor='0xFFFFFFFF') self.placeControl(self.synop, 10, 0, rowspan=16, columnspan=18) self.synop.setText("[COLOR green]SYNOPSIS: [/COLOR]" + overview) self.synop.autoScroll(4000, 2000, 3000) #============================================================================ ligne notation duree ========================================= ligneNot = 26 #duree text = "0xFFFFFFFF" current_time = datetime.now() future_time = current_time + timedelta(minutes=duration) heureFin = future_time.strftime('%H:%M') label = pyxbmct.Label('%s %d mns (se termine à %s)' %(year, duration, heureFin), textColor=self.colorMenu) self.placeControl(label, ligneNot, 0, columnspan=12) #notation label = pyxbmct.Label('%0.1f/10' %float(popu),textColor=self.colorMenu) self.placeControl(label, ligneNot, 12, columnspan=12) #self.slider = pyxbmct.Slider(orientation=xbmcgui.HORIZONTAL) #self.placeControl(self.slider, ligneNot + 1, 14, pad_y=1, rowspan=2, columnspan=4) #self.slider.setPercent(media.popu * 10) """ #play p = 0 self.buttonLinks = pyxbmct.Button("Links") self.placeControl(self.buttonLinks, 31, p, columnspan=3, rowspan=5) self.connect(self.buttonLinks, self.buttonFx) p += 3 self.buttonBa = pyxbmct.Button("Bande Annonce") self.placeControl(self.buttonBa, 31, p, columnspan=3, rowspan=5) p += 3 self.buttonSaga = pyxbmct.Button("Saga") self.placeControl(self.buttonSaga, 31, p, columnspan=3, rowspan=5) p += 3 self.buttonReco = pyxbmct.Button("Recommendations") self.placeControl(self.buttonReco, 31, p, columnspan=3, rowspan=5) p += 3 self.buttonSimi = pyxbmct.Button("Similaires") self.placeControl(self.buttonSimi, 31, p, columnspan=3, rowspan=5) p += 3 self.buttonStudio = pyxbmct.Button("Studio") self.placeControl(self.buttonStudio, 31, p, columnspan=3, rowspan=5) """ #fav HK self.radiobutton = pyxbmct.RadioButton("Ajouter Fav's HK", textColor=self.colorMenu) self.placeControl(self.radiobutton, 26, 25, columnspan=5, rowspan=3) self.connect(self.radiobutton, self.radio_update) if ADDON.getSetting("bookonline") != "false": listeM = widget.responseSite("http://%s/requete.php?name=%s&type=favs&media=movies" %(ADDON.getSetting("bookonline_site"), ADDON.getSetting("bookonline_name"))) listeM = [int(x) for x in listeM] else: listeM = list(widget.extractFavs()) if int(self.numId) in listeM: self.radiobutton.setSelected(True) self.radiobutton.setLabel("Retirer Fav's HK") #============================================================================================================================================= #genres label = pyxbmct.FadeLabel() self.placeControl(label, 26, 17, columnspan=5) label.addLabel(genre) # sagas self.sagaOk = False #sql = "SELECT s.title, s.poster, s.numId FROM saga AS s WHERE s.numId=(SELECT t.numIdSaga FROM sagaTitle AS t WHERE t.numId={})".format(self.numId) #saga = extractMedias(sql=sql) #notice(saga) saga =0 if saga: self.sagaOk = True label = pyxbmct.Label("SAGA", textColor=self.colorMenu) self.placeControl(label, 43, 22, columnspan=4) if saga[0][1]: imFoc = "http://image.tmdb.org/t/p/w92" + saga[0][1] else: imFoc = "" txRea = saga[0][0] self.bSaga = pyxbmct.Button(txRea, focusTexture=imFoc, noFocusTexture=imFoc, font="font10", focusedColor='0xFFFF0000', alignment=pyxbmct.ALIGN_CENTER, shadowColor='0xFF000000') self.placeControl(self.bSaga, 40, 24, rowspan=10, columnspan=2) self.connect(self.bSaga, lambda: self.affSaga(str(saga[0][2]))) self.setCasting() # cast #mdb = TMDB(__keyTMDB__) #liste = mdb.castFilm(self.numId) #notice(liste) #label = pyxbmct.Label('REALISATEUR:',textColor=colorMenu) #self.placeControl(label, 8, 0, columnspan=5) #rea = ", ".join([x[0] for x in liste if x[1] == "Réalisateur"]) #label = pyxbmct.Label(rea) #self.placeControl(label, 8, 4, columnspan=8) #label = pyxbmct.Label('ACTEURS:',textColor=colorMenu) #self.placeControl(label, 11, 0, columnspan=5) #acteurs = ", ".join(["%s (%s)" %(x[0], x[1]) for x in liste if x[1] != "Réalisateur"][:3]) #label = pyxbmct.FadeLabel(font="font12") #self.placeControl(label, 11, 4, columnspan=10) #label.addLabel(acteurs) def buttonFx(self): sql = "SELECT GROUP_CONCAT(l.link, '*') FROM filmsPubLink as l WHERE l.numId={}".format(self.numId) links = createbdhk.extractMedias(sql=sql, unique=1)[0] #self.close() #affLiens2({"u2p":self.numId, "lien": links}) #xbmc.executebuiltin('Dialog.Close(busydialog)') #xbmc.executebuiltin("RunPlugin(plugin://plugin.video.sendtokodiU2P/?action=afficheLiens&lien={}&u2p={})".format(links, self.numId), True) #xbmc.executebuiltin("ActivateWindow(10025,plugin://plugin.video.sendtokodiU2P/?action=afficheLiens&lien={}&u2p={},return)".format(links, self.numId)) #xbmc.executebuiltin("ActivateWindow(10025,plugin://plugin.video.sendtokodiU2P/?action=playHK&lien=IBSfuEFS2%404U3iXKqCCQZq%231080.Multi.WEB&u2p=852830,return)") #self.close() def affSaga(self, numId): showInfoNotification(numId) mediasHK({"famille": "sagaListe", "u2p": numId}) self.close() def getBa(self, params): numId = params["u2p"] typMedia = params["typM"] mdb = TMDB(KEYTMDB) tabBa = mdb.getNumIdBA(numId, typMedia) if tabBa: dialog = xbmcgui.Dialog() selectedBa = dialog.select("Choix B.A", ["%s (%s)" %(x[0], x[1]) for x in tabBa], 0, 0) if selectedBa != -1: keyBa = tabBa[selectedBa][2] xbmc.executebuiltin("RunPlugin(plugin://plugin.video.youtube/?action=play_video&videoid={})".format(keyBa), True) self.close() def setCasting(self): mdb = TMDB(KEYTMDB) liste = mdb.castFilm(self.numId) nbListe = len(liste) posy = 0 self.tabCast = [None] * 20 self.tabCast2 = [None] * 20 for i, casting1 in enumerate(liste[:10]): self.tabCast2[i] = casting1 if self.tabCast2[i][2]: imFoc = "http://image.tmdb.org/t/p/w342" + self.tabCast2[i][2] else: imFoc = "" txRea = "%s (%s)" %(self.tabCast2[i][0], self.tabCast2[i][1]) self.tabCast[i] = pyxbmct.Button(txRea, focusTexture=imFoc, noFocusTexture=imFoc, font="font10", focusedColor='0xFFFF0000', alignment=pyxbmct.ALIGN_CENTER, shadowColor='0xFF000000') self.placeControl(self.tabCast[i], 38, posy, rowspan=15, columnspan=3) self.connect(self.tabCast[i], lambda: self.affCastingFilmo(str(self.tabCast2[i][3]))) posy += 3 def affCastingFilmo(self, numId): mediasHK({"famille": "cast", "u2p": numId}) self.close() def getTaille(self): dictSize = {"Basse": ("w185/", "w780/"), "Moyenne": ("w342/", "w1280/"), "Haute": ("w500/", "original/")} v = ADDON.getSetting("images_sizes") return dictSize[v] def setAnimation(self, control): # Set fade animation for all add-on window controls control.setAnimations([('WindowOpen', 'effect=fade start=0 end=100 time=500',), ('WindowClose', 'effect=fade start=100 end=0 time=500',)]) def set_navigation(self): """Set up keyboard/remote navigation between controls.""" self.menu.controlUp(self.radiobutton) if self.tabCast: self.menu.controlDown(self.tabCast[0]) self.menu.controlLeft(self.tabCast[0]) self.radiobutton.controlUp(self.tabCast[0]) self.menu.controlRight(self.radiobutton) self.radiobutton.controlRight(self.menu) self.radiobutton.controlLeft(self.menu) self.radiobutton.controlDown(self.menu) self.setFocus(self.menu) if self.tabCast: for i in range(len([x for x in self.tabCast if x])): self.tabCast[i].controlUp(self.menu) self.tabCast[i].controlDown(self.radiobutton) if (i + 1) < len([x for x in self.tabCast if x]): self.tabCast[i].controlRight(self.tabCast[i + 1]) else: if self.sagaOk: self.tabCast[i].controlRight(self.bSaga) if (i - 1) > -1: self.tabCast[i].controlLeft(self.tabCast[i - 1]) def radio_update(self): # Update radiobutton caption on toggle #liste favs if self.radiobutton.isSelected(): self.radiobutton.setLabel("Retirer Fav's HK") gestionFavHK({"mode": "ajout", "u2p": self.numId, "typM": "movies"}) else: self.radiobutton.setLabel("Ajouter Fav's HK") gestionFavHK({"mode": "sup", "u2p": self.numId, "typM": "movies"}) def listFunction(self, tx): #lsite fonction du menu #self.close() if "Bande" in tx: self.getBa({"u2p": self.numId, "typM": "movie"}) elif "Links" in tx: sql = "SELECT GROUP_CONCAT(l.link, '*') FROM filmsPubLink as l WHERE l.numId={}".format(self.numId) links = createbdhk.extractMedias(sql=sql, unique=1)[0] #affLiens2({"u2p": self.numId, "lien": links}) #xbmc.executebuiltin("ActivateWindow(10025,plugin://plugin.video.sendtokodiU2P/?action=playHK&lien=IBSfuEFS2%404U3iXKqCCQZq%231080.Multi.WEB&u2p=852830,return)") #xbmc.executebuiltin("ActivateWindow(10025,plugin://plugin.video.sendtokodiU2P/?action=afficheLiens&lien={}&u2p={},return)".format(links, self.numId)) playMediaHK({"lien": "IBSfuEFS2@4U3iXKqCCQZq#1080.Multi.WEB", "u2p": "852830"}) elif "Simi" in tx: loadSimReco2({"u2p": self.numId, "typ": "Similaires", "typM": "movie"}) elif "Sugg" in tx: loadSimReco2({"u2p": self.numId, "typ": "Recommendations", "typM": "movie"}) class FenFilmDetail(pyxbmct.AddonDialogWindow): def __init__(self, title='', numId="", links=""): """Class constructor""" # Call the base class' constructor. super(FenFilmDetail, self).__init__(title) # Set width, height and the grid parameters self.numId = numId self.links = links self.setGeometry(1000, 560, 50, 30) # Call set controls method self.set_controls() # Call set navigation method. self.set_navigation() # Connect Backspace button to close our addon. self.connect(pyxbmct.ACTION_NAV_BACK, self.close) def set_controls(self): """Set up UI controls""" self.colorMenu = '0xFFFFFFFF' sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE m.numId={}".format(self.numId) movies = extractMedias(sql=sql) media = Media("movie", *movies[0]) #backdrop image = pyxbmct.Image(media.backdrop, colorDiffuse='0x22FFFFFF') self.placeControl(image, 0, 0, rowspan=50, columnspan=30) logo, clearart, banner = extractFanart(self.numId) urlFanart = "http://assets.fanart.tv/fanart/movie/" #clearlogo ou title if logo: imageLogo = pyxbmct.Image(urlFanart + logo) self.placeControl(imageLogo, 0, 0, rowspan=8, columnspan=7) else: label = pyxbmct.Label(media.title, font='font_MainMenu') self.placeControl(label, 2, 0, columnspan=15) #poster #if clearart: # imageClearart = pyxbmct.Image(urlFanart + clearart) # self.placeControl(imageClearart, 35, 25, rowspan=12, columnspan=5) #else: poster = pyxbmct.Image(media.poster) self.placeControl(poster, 0, 25, rowspan=23, columnspan=5) #menu paramstring = self.links.split("*") cr = cryptage.Crypt() dictResos = cr.extractReso([(x.split("#")[0].split("@")[0], x.split("#")[0].split("@")[1]) for x in paramstring]) dictResos = {x.split("#")[0].split("@")[1]: dictResos[x.split("#")[0].split("@")[1]] if dictResos[x.split("#")[0].split("@")[1]] else x.split("#")[1] for x in paramstring} self.paramstring = orderLiens(dictResos, paramstring) tabRelease = [dictResos[x.split("#")[0].split("@")[1]][2] for i, x in enumerate(self.paramstring)] self.tabNomLien = ["#%d [COLOR red][%s - %.2fGo][/COLOR] -- [Release: %s]" %(i + 1, dictResos[x.split("#")[0].split("@")[1]][0], (int(dictResos[x.split("#")[0].split("@")[1]][1]) / 1000000000.0), tabRelease[i]) for i, x in enumerate(self.paramstring)] #labelMenu = pyxbmct.Label('MENU', textColor=self.colorMenu) #self.placeControl(labelMenu, 31, 0, columnspan=10) self.menu = pyxbmct.List('font13', _itemHeight=30) self.placeControl(self.menu, 29, 0, rowspan=24, columnspan=25) #, "Acteurs & Réalisateur" #self.menu.addItems(["[COLOR blue]Bande Annonce[/COLOR]", "[COLOR green]Lire[/COLOR]", "Suggestions", "Similaires"]) self.menu.addItems(self.tabNomLien) self.connect(self.menu, lambda: self.listFunction(self.menu.getListItem(self.menu.getSelectedPosition()).getLabel())) #overview #labelSynop = pyxbmct.Label('SYNOPSIS', textColor=colorMenu) #self.placeControl(labelSynop, 14, 0, columnspan=10) self.synop = pyxbmct.TextBox('font13', textColor='0xFFFFFFFF') self.placeControl(self.synop, 10, 0, rowspan=16, columnspan=25) self.synop.setText("[COLOR green]SYNOPSIS: [/COLOR]" + media.overview) self.synop.autoScroll(1000, 2000, 3000) #============================================================================ ligne notation duree ========================================= ligneNot = 26 #duree text = "0xFFFFFFFF" current_time = datetime.now() future_time = current_time + timedelta(minutes=media.duration) heureFin = future_time.strftime('%H:%M') label = pyxbmct.Label('%s Durée: %d mns (se termine à %s)' %(media.year, media.duration, heureFin), textColor=self.colorMenu) self.placeControl(label, ligneNot, 0, columnspan=12) #notation label = pyxbmct.Label('Note %0.1f/10' %float(media.popu),textColor=self.colorMenu) self.placeControl(label, ligneNot, 12, columnspan=12) #self.slider = pyxbmct.Slider(orientation=xbmcgui.HORIZONTAL) #self.placeControl(self.slider, ligneNot + 1, 14, pad_y=1, rowspan=2, columnspan=4) #self.slider.setPercent(media.popu * 10) #fav HK self.radiobutton = pyxbmct.RadioButton("Ajouter Fav's HK", textColor=self.colorMenu) self.placeControl(self.radiobutton, 26, 20, columnspan=5, rowspan=3) self.connect(self.radiobutton, self.radio_update) if __addon__.getSetting("bookonline") != "false": listeM = widget.responseSite("http://%s/requete.php?name=%s&type=favs&media=movies" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"))) listeM = [int(x) for x in listeM] else: listeM = list(widget.extractFavs()) if int(self.numId) in listeM: self.radiobutton.setSelected(True) self.radiobutton.setLabel("Retirer Fav's HK") #============================================================================================================================================= #genres label = pyxbmct.Label(' GENRES') self.placeControl(label, 23, 26, columnspan=3) genres = [x.strip() for x in media.genre.split(",")] x, y = 25, 25 for i, genre in enumerate(genres): f = xbmcvfs.translatePath('special://home/addons/plugin.video.sendtokodiU2P/resources/png/genre/%s.png' %genre) image = pyxbmct.Image(f) pas = 9 self.placeControl(image, x, y, rowspan=pas, columnspan=3) if i % 2: x += (pas - 2) y -= 2 else: y += 2 if i == 3: break def set_navigation(self): """Set up keyboard/remote navigation between controls.""" self.menu.controlUp(self.radiobutton) #self.menu.controlDown(self.tabCast[0]) #self.menu.controlLeft(self.tabCast[0]) #self.radiobutton.controlUp(self.tabCast[0]) self.menu.controlRight(self.radiobutton) self.radiobutton.controlRight(self.menu) self.radiobutton.controlLeft(self.menu) self.radiobutton.controlDown(self.menu) self.setFocus(self.menu) def radio_update(self): # Update radiobutton caption on toggle #liste favs if self.radiobutton.isSelected(): self.radiobutton.setLabel("Retirer Fav's HK") gestionFavHK({"mode": "ajout", "u2p": self.numId, "typM": "movies"}) else: self.radiobutton.setLabel("Ajouter Fav's HK") gestionFavHK({"mode": "sup", "u2p": self.numId, "typM": "movies"}) def listFunction(self, tx): #lsite fonction du menu self.close() link = self.paramstring[self.tabNomLien.index(tx)] #showInfoNotification(link) # playMediaHK({"u2p":self.numId, "typM": "movies", "lien": link, "skin": "1"}) """ if "Bande" in tx: getBa({"u2p": self.numId, "typM": "movie"}) elif "Lire" in tx: affLiens2({"u2p": self.numId, "lien": self.links}) elif "Simi" in tx: loadSimReco2({"u2p": self.numId, "typ": "Similaires", "typM": "movie"}) elif "Sugg" in tx: loadSimReco2({"u2p": self.numId, "typ": "Recommendations", "typM": "movie"}) elif "Act" in tx: affCast2({"u2p": self.numId, "typM": "movie"}) """ class replacement_stderr(sys.stderr.__class__): def isatty(self): return False sys.stderr.__class__ = replacement_stderr def debug(content): log(content, xbmc.LOGDEBUG) def linkDownload1Fichier(key, linkUrl): params = { 'url' : linkUrl, 'inline' : 0, 'cdn' : 0, 'restrict_ip': 0, 'no_ssl' : 0, } url = 'https://api.1fichier.com/v1/download/get_token.cgi' r = requests.post(url, json=params, headers={'Authorization':'Bearer {}'.format(key),'Content-Type':'application/json'}) try: o = r.json() except JSONDecodeError: pass message = "" url = "" if 'status' in o: if o['status'] != 'OK': message = r.json()['message'] o['url'] = "" return o["url"], message else: #key out => No such user return url, message def notice(content): log(content, xbmc.LOGINFO) def log(msg, level=xbmc.LOGINFO): addon = xbmcaddon.Addon() addonID = addon.getAddonInfo('id') xbmc.log('%s: %s' % (addonID, msg), level) def showInfoNotification(message): xbmcgui.Dialog().notification("U2Pplay", message, xbmcgui.NOTIFICATION_INFO, 5000) def showErrorNotification(message): xbmcgui.Dialog().notification("U2Pplay", message, xbmcgui.NOTIFICATION_ERROR, 5000) def getkeyUpto(): addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") Apikey = addon.getSetting("keyupto") return Apikey def getkey1fichier(): addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") Apikey = addon.getSetting("key1fichier") return Apikey def getresos(): addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") resos = [addon.getSetting("resos")] timing = addon.getSetting("autoplay_delay") return resos, timing """ def getresosOld(): addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") resos = addon.getSetting("resos") try: resos, timing = resos.split("-") resos = [x.strip() for x in resos.split("=")[1].split(",")] timing = timing.split("=")[1] except: resos = ("720", "1080", "2160") timing = 0 return resos, timing """ def getNbMedias(): addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") nb = addon.getSetting("nb_items") return nb def getOrderDefault(): dictTrie = {"Année": " ORDER BY m.year DESC", "Titre": " ORDER BY m.title COLLATE NOCASE ASC", "Popularité": " ORDER BY m.popu DESC", "Date Release": " ORDER BY m.dateRelease DESC", "Date Added": " ORDER BY m.id DESC"} addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") ordre = addon.getSetting("lists_orderby") return dictTrie[ordre] def getkeyTMDB(): addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") Apikey = addon.getSetting("apikey") return Apikey def getkeyAlldebrid(): addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") Apikey = addon.getSetting("keyalldebrid") return Apikey def getkeyRealdebrid(): addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") Apikey = addon.getSetting("keyrealdebrid") return Apikey def gestionBD(*argvs): cnx = sqlite3.connect(xbmcvfs.translatePath('special://home/addons/plugin.video.sendtokodiU2P/resources/serie.db')) cur = cnx.cursor() cur.execute("""CREATE TABLE IF NOT EXISTS serie( `id` INTEGER PRIMARY KEY, numId TEXT, title TEXT, saison TEXT, reso TEXT, pos INTEGER, UNIQUE (numId, title, saison)) """) cnx.commit() if argvs[0] == "update": cur.execute("REPLACE INTO serie (numId, title, saison, reso, pos) VALUES (?, ?, ?, ?, ?)", argvs[1:]) cnx.commit() return True elif argvs[0] == "get": cur.execute("SELECT reso FROM serie WHERE title=? AND saison=?", argvs[1:]) reso = cur.fetchone() return reso elif argvs[0] == "getHK": cur.execute("SELECT reso FROM serie WHERE numId=? AND saison=?", argvs[1:]) reso = cur.fetchone() return reso elif argvs[0] == "last": cur.execute("SELECT numId, title, saison, reso FROM serie ORDER BY id DESC LIMIT 1") liste = cur.fetchone() if liste: return liste else: return ["", "", "", ""] cur.close() cnx.close() ''' def detailsMediaOld(params): notice(params) paramstring = params["lien"].split("*") typMedia = xbmc.getInfoLabel('ListItem.DBTYPE') if not typMedia: xbmc.executebuiltin("Dialog.Close(busydialog)") xbmc.sleep(500) typMedia = xbmc.getInfoLabel('ListItem.DBTYPE') #typMedia = params["typM"] numId = params["u2p"] u2p = params["u2p"] dialog = xbmcgui.Dialog() if u2p and numId != "divers": choix = ["Bande Annonce", "Lire", "Acteurs", "Similaires", "Recommendations"] selected = dialog.select("Choix", choix, 0, 0) if selected != -1: if u2p and numId != "divers": if "Bande Annonce" == choix[selected]: mdb = TMDB(__keyTMDB__) tabBa = mdb.getNumIdBA(numId, typMedia) if tabBa: selectedBa = dialog.select("Choix B.A", ["%s (%s)" %(x[0], x[1]) for x in tabBa], 0, 0) if selectedBa != -1: keyBa = tabBa[selectedBa][2] xbmc.executebuiltin("RunPlugin(plugin://plugin.video.youtube/?action=play_video&videoid={})".format(keyBa), True) return elif choix[selected] in ["Similaires", "Recommendations"]: loadSimReco(numId, typMedia, choix[selected]) return elif "Acteurs" == choix[selected]: affCast(numId, typMedia) elif "Lire" == choix[selected]: cr = cryptage.Crypt() dictResos = cr.extractReso([(x.split("#")[0].split("@")[0], x.split("#")[0].split("@")[1]) for x in paramstring]) dictResos = {x.split("#")[0].split("@")[1]: dictResos[x.split("#")[0].split("@")[1]] if dictResos[x.split("#")[0].split("@")[1]] else x.split("#")[1] for x in paramstring} tabNomLien = ["Lien N° %d (%s - %.2fGo)" %(i + 1, dictResos[x.split("#")[0].split("@")[1]][0], (int(dictResos[x.split("#")[0].split("@")[1]][1]) / 1000000000.0)) for i, x in enumerate(paramstring)] tabRelease = [dictResos[x.split("#")[0].split("@")[1]][2] for i, x in enumerate(paramstring)] tabLiens = [(x, paramstring[i], tabRelease[i]) for i, x in enumerate(tabNomLien)] affLiens(numId, typMedia, tabLiens) ''' def getBa(params): numId = params["u2p"] typMedia = params["typM"] mdb = TMDB(__keyTMDB__) tabBa = mdb.getNumIdBA(numId, typMedia) if tabBa: dialog = xbmcgui.Dialog() selectedBa = dialog.select("Choix B.A", ["%s (%s)" %(x[0], x[1]) for x in tabBa], 0, 0) if selectedBa != -1: keyBa = tabBa[selectedBa][2] xbmc.executebuiltin("RunPlugin(plugin://plugin.video.youtube/?action=play_video&videoid={})".format(keyBa), True) def gestionMedia(params): typMedia = params["typM"] numId = params["u2p"] if typMedia == "movie": media = "movies" else: media = "tvshow" dictA = {} # on continue listeView = widget.extractOC() if int(numId) in listeView: #categories.append(("Retirer fav's-HK", {"action": "fav", "mode": "sup", "u2p": numId, "typM": "movies"})) dictA['Retirer de "On continue..."'] = (gestionoc, {"mode": "sup", "u2p": numId, "typM": media}) else: dictA['Ajouter à "On continue..."'] = (gestionoc, {"mode": "ajout", "u2p": numId, "typM": media}) #categories.append(("Ajouter fav's-HK", {"action": "fav", "mode": "ajout", "u2p": numId, "typM": "movies"})) #liste last view if __addon__.getSetting("bookonline") != "false": listeView = widget.responseSite("http://%s/requete.php?name=%s&type=view&media=%s" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), typMedia)) listeView = [int(x) for x in listeView] else: listeView = list(widget.extractIdInVu(t=media)) if int(numId) in listeView: dictA["Retirer de l'historique"] = (supView, {"u2p": numId, "typM": media}) #categories.append(("Retirer Last/View", {"action": "supView", "u2p": numId, "typM": "movies"})) #liste favs if __addon__.getSetting("bookonline") != "false": listeM = widget.responseSite("http://%s/requete.php?name=%s&type=favs&media=%s" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), media)) listeM = [int(x) for x in listeM] else: listeM = list(widget.extractFavs(t=media)) if int(numId) in listeM: #categories.append(("Retirer fav's-HK", {"action": "fav", "mode": "sup", "u2p": numId, "typM": "movies"})) dictA["Retirer des Favoris HK"] = (gestionFavHK, {"mode": "sup", "u2p": numId, "typM": media}) else: dictA["Ajouter aux Favoris HK"] = (gestionFavHK, {"mode": "ajout", "u2p": numId, "typM": media}) #categories.append(("Ajouter fav's-HK", {"action": "fav", "mode": "ajout", "u2p": numId, "typM": "movies"})) trk = actifTrakt() if trk and typMedia == "movie": dictA["Cocher Vu dans Trakt"] = (vuMovieTrakt, {"u2p": numId}) # listes vierge liste = widget.getListesV(typMedia) for l in liste: listeV = widget.getListesVdetail(l, typMedia) if int(numId) in listeV: dictA["Retirer de %s" %l] = (gestionListeV, {"mode": "sup", "u2p": numId, "typM": typMedia, "nom": l}) else: dictA["Ajouter à %s" %l] = (gestionListeV, {"mode": "ajout", "u2p": numId, "typM": typMedia, "nom": l}) dictA["Correction Certification"] = (correctCertif, {"u2p": numId, "typM": typMedia}) dictA['Vider Historique'] = (delView, {"typM": media}) dictA['Vider Favoris HK'] = (supFavHK, {"typM": media}) tab = list(dictA.keys()) dialog = xbmcgui.Dialog() ret = dialog.contextmenu(tab) if ret != -1: argv = dictA[tab[ret]][1] dictA[tab[ret]][0](argv) def gestionListeV(params): typMedia = params["typM"] numId = params["u2p"] mode = params["mode"] nom = params["nom"] widget.gestionListeVdetail(nom, numId, typMedia, mode) def fenInfo(params): u2p = params["u2p"] title = "" try: typM = params["typm"] except : typM = "film" #xbmc.executebuiltin('Dialog.Close(busydialog)') #window = feninfo.FenInfo(title) #window = FenInfo("%s*%s" %(u2p, title)) window = feninfo.FenInfo([u2p, title, typM]) # Show the created window. window.doModal() del window #def detailsMedia(params): # detailsMedia2(params) # time.sleep(0.5) # xbmc.executeJSONRPC('{"jsonrpc":"2.0","method":"Input.ExecuteAction","params":{"action":"back"},"id":1}') # xbmc.executeJSONRPC('{"jsonrpc":"2.0","method":"Input.ExecuteAction","params":{"action":"back"},"id":1}') # xbmc.executeJSONRPC('{"jsonrpc":"2.0","method":"Input.ExecuteAction","params":{"action":"back"},"id":1}') #xbmc.executebuiltin('ReloadSkin') # xbmc.executebuiltin("Input.Back") # xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "Input.Back", "id": 1}') def detailsMedia(params): notice("detailM") notice(params) if __addon__.getSetting("newfen") != "false": typM = "movie" title = "" numId = params["u2p"] #xbmc.executebuiltin('Dialog.Close(busydialog)') #window = feninfo.FenInfo(title) #window = FenInfo("%s*%s" %(u2p, title)) window = feninfo.FenInfo([numId, title, typM]) # Show the created window. window.doModal() del window #xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) #xbmc.executeJSONRPC('{"jsonrpc":"2.0","method":"Input.ExecuteAction","params":{"action":"back"},"id":1}') else: try: links = params["lien"] except: try: if __addon__.getSetting("actifnewpaste") != "false": sql = "SELECT GROUP_CONCAT(l.link, '*') FROM filmsPubLink as l WHERE l.numId={}".format(params["u2p"]) links = createbdhk.extractMedias(sql=sql, unique=1)[0] else: sql = "SELECT link FROM movieLink WHERE numId={}".format(params["u2p"]) links = extractMedias(sql=sql, unique=1)[0] except: return False #typMedia = params["typM"] typMedia = "movie" numId = params["u2p"] u2p = params["u2p"] try: sql = "SELECT title, overview, year, genre, backdrop, popu, numId, poster, runtime FROM movie WHERE numId={}".format(numId) liste = extractMedias(sql=sql) except: liste = [] if liste: title, overview, year, genre, backdrop, popu, numId, poster, runtime = liste[0] else: sql = "SELECT title, overview, year, genres, backdrop, popu, numId, poster, runtime FROM filmsPub WHERE numId={}".format(numId) liste = createbdhk.extractMedias(sql=sql) try: title, overview, year, genre, backdrop, popu, numId, poster, runtime = liste[0] except: return try: int(runtime) except: runtime = 0 overview = "%s\nsynopsis: %s \nAnnée: %s\nGenre: %s\nNote: %.2f\nDurée: %.d mns" %(title, overview[:150] + "...", year, genre, popu, runtime) #fnotice(overview) xbmcplugin.setPluginCategory(__handle__, "Menu") #xbmcplugin.setContent(__handle__, 'episodes') xbmcplugin.setContent(__handle__, 'videos') categories = [("[COLOR red]Bande Annonce[/COLOR]", {"action": "ba", "u2p": numId, "typM": typMedia}), ("[COLOR green]Lire[/COLOR]", {"action": "afficheLiens", "lien": links, "u2p": numId})] """ if __addon__.getSetting("actifnewpaste") != "false": sql = "SELECT t.saga FROM filmsPub AS t WHERE t.numId={}".format(u2p) saga = createbdhk.extractMedias(sql=sql, unique=1) if saga and saga[0]: categories += [("Saga", {"action": "mediasHKFilms", "famille": "sagaListe", "numIdSaga":saga[0]})] elif __addon__.getSetting("actifhk") != "false": sql = "SELECT s.title, s.poster, s.overview, s.numId FROM saga AS s WHERE s.numId=(SELECT t.numIdSaga FROM sagaTitle AS t WHERE t.numId={})".format(u2p) saga = extractMedias(sql=sql) if saga: categories += [("Saga", {"action": "MenuFilm", "famille": "sagaListe", "numIdSaga": saga[0][3]})] categories += [("Acteurs", {"action": "affActeurs", "u2p": numId, "typM": typMedia}),\ ("Similaires", {"action": "suggest", "u2p": numId, "typ": "Similaires", "typM": typMedia}), ("Recommandations", {"action": "suggest", "u2p": numId, "typ": "Recommendations", "typM": typMedia})] """ #liste lastview if __addon__.getSetting("bookonline") != "false": listeView = widget.responseSite("http://%s/requete.php?name=%s&type=view&media=movie" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"))) listeView = [int(x) for x in listeView] else: listeView = list(widget.extractIdInVu(t="movies")) if int(numId) in listeView: categories.append(("Retirer Last/View", {"action": "supView", "u2p": numId, "typM": "movies"})) #liste favs if __addon__.getSetting("bookonline") != "false": listeM = widget.responseSite("http://%s/requete.php?name=%s&type=favs&media=movies" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"))) listeM = [int(x) for x in listeM] else: listeM = list(widget.extractFavs(t="movies")) if int(numId) in listeM: categories.append(("Retirer fav's-HK", {"action": "fav", "mode": "sup", "u2p": numId, "typM": "movies"})) else: categories.append(("Ajouter fav's-HK", {"action": "fav", "mode": "ajout", "u2p": numId, "typM": "movies"})) categories += [("Acteurs", {"action": "affActeurs", "u2p": numId, "typM": typMedia}),\ ("Similaires", {"action": "suggest", "u2p": numId, "typ": "Similaires", "typM": typMedia}), ("Recommandations", {"action": "suggest", "u2p": numId, "typ": "Recommendations", "typM": typMedia})] #fenetre information categories.append(("Fenêtre Information", {"action": "feninfo", "u2p": numId, "typM": "movies", "title": title})) for cat in categories: if "Saga" in cat[0] and cat[1]["action"] == "MenuFilm": lFinale = [saga[0][0], saga[0][2], year, genre, backdrop, popu, numId, saga[0][1]] else: lFinale = [title, overview, year, genre, backdrop, popu, numId, poster] media = Media("menu", *lFinale) media.typeMedia = typMedia addDirectoryMenu(cat[0], isFolder=True, parameters=cat[1], media=media) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) return def addDirectoryMenu(name, isFolder=True, parameters={}, media="" ): ''' Add a list item to the XBMC UI.''' addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") li = xbmcgui.ListItem(label=name) updateInfoTagVideo(li,media,False,False,False,False,False) if "Saison" in name: commands = [] commands.append(('[COLOR yellow]Gestion Vus/Non-Vus[/COLOR]', 'RunPlugin(plugin://plugin.video.sendtokodiU2P/?action=vuNonVu&saison=%d&u2p=%s&refresh=1)' %(media.saison, media.numId))) li.addContextMenuItems(commands) isWidget = xbmc.getInfoLabel('Container.PluginName') if "U2P" not in isWidget: li.setProperty('widgetEpisodes', 'true') li.setProperty('vus', 'RunPlugin(plugin://plugin.video.sendtokodiU2P/?action=vuNonVu&saison=%d&u2p=%s&refresh=1)' %(media.saison, media.numId)) li.setArt({'icon': media.backdrop, "thumb": media.poster, 'poster':media.poster, 'fanart': media.backdrop }) url = sys.argv[0] + '?' + urlencode(parameters) return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=li, isFolder=isFolder) def getEpisodesSaison(numId): try: if __addon__.getSetting("bookonline") != "false": vus = widget.responseSite("http://%s/requete.php?name=%s&type=getvu" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"))) else: vus = widget.getVu("tvshow") except: vus = [] vus = [("Saison %s" %x.split("-")[1].zfill(2), x.split("-")[2])for x in vus if x.split("-")[0] == str(numId)] sql = "SELECT saison, episode FROM tvshowEpisodes WHERE numId={}".format(numId) liste = extractMedias(sql=sql) notice(liste) dictSaisons = {} for saison in list(set([x[0] for x in liste])): nbTotal = len([x for x in liste if x[0] == saison]) nbVus = len([x for x in vus if x[0] == saison]) if nbVus == 0: c = "red" elif nbVus == nbTotal: c = "green" else: c = "orange" dictSaisons[saison] = (c, nbVus, nbTotal) return dictSaisons def detailsTV(params): #notice(params) #typMedia = xbmc.getInfoLabel('ListItem.DBTYPE') #if not typMedia: # xbmc.executebuiltin("Dialog.Close(busydialog)") # xbmc.sleep(500) # typMedia = xbmc.getInfoLabel('ListItem.DBTYPE') #typMedia = params["typM"] typMedia = "tvshow" numId = params["u2p"] u2p = params["u2p"] if u2p and numId != "divers": if "saison" in params.keys(): numSaison = params["saison"].zfill(2) saisons = ["Saison %s" %numSaison] else: sql = "SELECT DISTINCT saison FROM tvshowEpisodes WHERE numId={}".format(numId) saisons = extractMedias(sql=sql, unique=1) dictSaisonsVU = getEpisodesSaison(u2p) sql = "SELECT title, overview, year, genre, backdrop, popu, numId, poster, runtime FROM tvshow WHERE numId={}".format(numId) liste = extractMedias(sql=sql) if liste: title, overview, year, genre, backdrop, popu, numId, poster, runtime = liste[0] else: return False try: int(runtime) except: runtime = 0 overview = "%s\nsynopsis: %s \nAnnée: %s\nGenre: %s\nNote: %.2f\nDurée: %d mns" %(title, overview[:150] + "...", year, genre, popu, runtime) xbmcplugin.setPluginCategory(__handle__, "Menu") xbmcplugin.setContent(__handle__, 'episodes') choixsaisons = [("%s [COLOR %s](%d/%d)[/COLOR]" %(x, dictSaisonsVU[x][0], dictSaisonsVU[x][1], dictSaisonsVU[x][2]), {"action": "visuEpisodes", "u2p": numId, "saison": x}) for x in saisons] categories = [("[COLOR red]Bande Annonce[/COLOR]", {"action": "ba", "u2p": numId, "typM": typMedia})] + choixsaisons + \ [("Acteurs", {"action": "affActeurs", "u2p": numId, "typM": typMedia}),\ ("Similaires", {"action": "suggest", "u2p": numId, "typ": "Similaires","typM": typMedia}), \ ("Recommandations", {"action": "suggest", "u2p": numId, "typ": "Recommendations", "typM": typMedia})] #liste lastview if __addon__.getSetting("bookonline") != "false": listeView = widget.responseSite("http://%s/requete.php?name=%s&type=view&media=tv" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"))) listeView = [int(x) for x in listeView] else: listeView = list(widget.extractIdInVu(t="tvshow")) if int(numId) in listeView: categories.append(("Retirer Last/View", {"action": "supView", "u2p": numId, "typM": typMedia})) #liste favs if __addon__.getSetting("bookonline") != "false": listeM = widget.responseSite("http://%s/requete.php?name=%s&type=favs&media=tvshow" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"))) listeM = [int(x) for x in listeM] else: listeM = list(widget.extractFavs(t="tvshow")) if int(numId) in listeM: categories.append(("Retirer fav's-HK", {"action": "fav", "mode": "sup", "u2p": numId, "typM": "tvshow"})) else: categories.append(("Ajouter fav's-HK", {"action": "fav", "mode": "ajout", "u2p": numId, "typM": "tvshow"})) mdb = TMDB(__keyTMDB__) dictSaisons = dict(mdb.serieNumIdSaison(u2p).items()) notice(dictSaisons) for cat in categories: lFinale = [title, overview, year, genre, backdrop, popu, numId, poster] if "saison" in cat[1].keys(): numSaison = int(cat[1]["saison"].split(" ")[1]) try: tab = dictSaisons[numSaison] lFinale = [title, tab[2], year, genre, backdrop, popu, numId, tab[1]] #lFinale = [tab[0], tab[2], year, genre, backdrop, popu, numId, tab[1]] except Exception as e: notice(str(e)) media = Media("menu", *lFinale) if "saison" in cat[1].keys(): media.saison = numSaison media.typeMedia = typMedia addDirectoryMenu(cat[0], isFolder=True, parameters=cat[1], media=media) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) def suiteSerie(): try: if __addon__.getSetting("bookonline") != "false": listeVus = widget.responseSite("http://%s/requete.php?name=%s&type=getvu" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"))) else: listeVus = widget.getVu("tvshow") except: listeVus = [] dictSerie = {} for vu in listeVus: vu = vu.split("-") episode = int(vu[1].zfill(2) + vu[2].zfill(4)) if vu[0] in dictSerie.keys() and episode > dictSerie[vu[0]]: dictSerie[vu[0]] = episode else: dictSerie[vu[0]] = episode listeSerie = [] for numId, episodeVu in dictSerie.items(): sql = "SELECT ep.saison, ep.episode, (SELECT GROUP_CONCAT(l.link, '*') FROM episodes as l WHERE l.numId=ep.numId AND l.saison=ep.saison AND l.episode=ep.episode) FROM episodes as ep WHERE ep.numId={}".format(numId) episodes = createbdhk.extractMedias(sql=sql) episodes = sorted([(str(x[0]) + str(x[1]).zfill(4), x[2]) for x in episodes]) for (episode, lien) in episodes: if int(episode) > episodeVu: serie = [numId, "", "", lien] sql = "SELECT title, overview, year, backdrop, popu FROM seriesPub WHERE numId={}".format(numId) serie += createbdhk.extractMedias(sql=sql)[0] serie += [int(episode[:-4]), int(int(episode[-4:])), 0] listeSerie.append(serie) #notice(serie) break xbmcplugin.setPluginCategory(__handle__, "Episodes") xbmcplugin.setContent(__handle__, 'episodes') for l in listeSerie[::-1]: media = Media("episode", *l) media.typeMedia = "episode" media.numId = int(l[0]) addDirectoryEpisodes("%s ([COLOR white]S%sE%s[/COLOR])" %(media.title, str(media.saison).zfill(2), str(media.episode).zfill(2)), isFolder=False, parameters={"action": "playHK", "lien": media.link, "u2p": media.numId, "episode": media.episode, "saison": media.saison }, media=media) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) def normalizeNum(num): s, e = num.split("E") e = "%s%s" %("0" * (4 - len(e)), e) return s + "E" + e def affEpisodes2(params): numId = params["u2p"] saison = params["saison"] typM = "episode" xbmcplugin.setPluginCategory(__handle__, "Episodes") xbmcplugin.setContent(__handle__, 'episodes') sql = "SELECT * FROM tvshowEpisodes WHERE numId={} AND saison='{}'".format(numId, saison) liste = extractMedias(sql=sql) mdb = TMDB(__keyTMDB__) tabEpisodes = mdb.saison(numId, saison.replace("Saison ", "")) try: if __addon__.getSetting("bookonline") != "false": vus = widget.responseSite("http://%s/requete.php?name=%s&type=getvu" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"))) else: vus = widget.getVu("tvshow") except: vus = [] liste = [(x[0], x[1], normalizeNum(x[2]), x[3]) for x in liste] for l in sorted(liste, key=lambda x: x[-2]): ep = "%d-%d-%d" %(int(l[0]), int(l[2].split("E")[0].replace("S", "")), int(l[2].split("E")[1])) if ep in vus: isVu = 1 else: isVu = 0 try: lFinale = list(l) + list([episode for episode in tabEpisodes if int(l[2].split("E")[1]) == episode[-1]][0]) except: lFinale = list(l) + ["Episode", "Pas de synopsis ....", "", "", "", int(saison.replace("Saison ", "")) , int(l[2].split("E")[1])] lFinale.append(isVu) media = Media("episode", *lFinale) media.typeMedia = typM #notice(media.vu) media.numId = int(numId) addDirectoryEpisodes("E%d - %s" %(int(l[2].split("E")[1]), media.title ), isFolder=False, parameters={"action": "playHK", "lien": media.link, "u2p": media.numId, "episode": media.episode}, media=media) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) ''' def affEpisodes(numId, saison): typM = "episode" xbmcplugin.setPluginCategory(__handle__, "Episodes") xbmcplugin.setContent(__handle__, 'episodes') sql = "SELECT * FROM tvshowEpisodes WHERE numId={} AND saison='{}'".format(numId, saison) liste = extractMedias(sql=sql) mdb = TMDB(__keyTMDB__) tabEpisodes = mdb.saison(numId, saison.replace("Saison ", "")) for l in liste: try: lFinale = list(l) + list([episode for episode in tabEpisodes if int(l[2].split("E")[1]) == episode[-1]][0]) except: lFinale = list(l) + ["Episode", "Pas de synopsis ....", "", "", "", int(saison.replace("Saison ", "")) , int(l[2].split("E")[1])] #notice(lFinale) media = Media("episode", *lFinale) media.typeMedia = typM media.numId = int(numId) addDirectoryEpisodes("%s" %(media.title), isFolder=False, parameters={"action": "playHK", "lien": media.link, "u2p": media.numId}, media=media) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) ''' def addDirectoryEpisodes(name, isFolder=True, parameters={}, media="" ): ''' Add a list item to the XBMC UI.''' addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") li = xbmcgui.ListItem(label=("%s" %(name))) #, "dbid": media.numId + 500000 #notice(media.episode) updateInfoTagVideo(li,media,False,True,False,False,True) li.setArt({'icon': media.backdrop, "fanart": media.backdrop, 'thumb': media.backdrop}) li.setProperty('IsPlayable', 'true') commands = [] commands.append(('[COLOR yellow]Gestion Vus/Non-Vus[/COLOR]', 'RunPlugin(plugin://plugin.video.sendtokodiU2P/?action=vuNonVu&saison=%d&u2p=%s&refresh=1)' %(media.saison, media.numId))) li.addContextMenuItems(commands, replaceItems=False) isWidget = xbmc.getInfoLabel('Container.PluginName') if "U2P" not in isWidget: li.setProperty('widgetEpisodes', 'true') li.setProperty('vus', 'RunPlugin(plugin://plugin.video.sendtokodiU2P/?action=vuNonVu&saison=%d&u2p=%s&refresh=1)' %(media.saison, media.numId)) url = sys.argv[0] + '?' + urlencode(parameters) return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=li, isFolder=isFolder) def getVerifLinks(numId, typM="movie"): cr = Crypt() sql = "SELECT link, release, taille FROM filmsPubLink WHERE numId={}".format(numId) paramstring = [] liste = createbdhk.extractMedias(sql=sql) for l in liste: l = list(l) if "Go" in l[2]: l[2] = int(float(l[2].replace("Go", "")) * 1000000000) else: l[2] = int(float(l[2])) paramstring.append(l) linksDarkino = [x for x in paramstring if len(x[0]) == 12] links = [x for x in paramstring if x not in linksDarkino] dictLiensfichier = cr.extractLinks([x[0] for x in links]) links = [x for x in links if dictLiensfichier[x[0]]] if linksDarkino: linkOk, linkOut = cr.validLinkDark([x[0] for x in linksDarkino]) linksDarkino = [x for x in linksDarkino if x[0] not in linkOut] links += linksDarkino return links def affLiens2(params): typMedia = xbmc.getInfoLabel('ListItem.DBTYPE') if not typMedia: xbmc.executebuiltin("Dialog.Close(busydialog)") xbmc.sleep(500) typMedia = xbmc.getInfoLabel('ListItem.DBTYPE') #paramstring = params["lien"].split("*") numId = params["u2p"] u2p = params["u2p"] links = getVerifLinks(numId) notice(links) #[('6Ai4sfJOSD', '720.HDlight-Light.French', '1883287480')] """ tabLinkIn = [(x.split("#")[0].split("@")[0], x.split("#")[0].split("@")[1]) for x in paramstring] #dictResos = cr.extractReso(tabLinkIn[:100]) dictResos = {} for i in range(0, len(tabLinkIn), 100): dictResos.update(cr.extractReso(tabLinkIn[i: i + 100])) """ #dictResos = {x.split("#")[0].split("@")[1]: dictResos[x.split("#")[0].split("@")[1]] if dictResos[x.split("#")[0].split("@")[1]] else x.split("#")[1] for x in paramstring} #paramstring = orderLiens(dictResos, paramstring) #tabNomLien = ["[COLOR %s]#%d[/COLOR]| %s - %.2fGo" %(colorLiens(dictResos[x.split("#")[0].split("@")[1]][0]), i + 1, dictResos[x.split("#")[0].split("@")[1]][0], (int(dictResos[x.split("#")[0].split("@")[1]][1]) / 1000000000.0)) for i, x in enumerate(paramstring)] tabNomLien = ["[COLOR %s]#%d[/COLOR]| %s - %.2fGo" %(colorLiens(x[1]), i + 1, x[1], (int(x[2]) / 1000000000.0)) for i, x in enumerate(links)] tabRelease = [x[1] for i, x in enumerate(links)] tabLiens = [(x, links[i][0], tabRelease[i]) for i, x in enumerate(tabNomLien)] notice(tabLiens) affLiens(numId, "movie", tabLiens) def affLiens(numId, typM, liste): xbmcplugin.setPluginCategory(__handle__, "Liens") xbmcplugin.setContent(__handle__, 'files') try: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id \ FROM movie as m WHERE m.numId={}".format(numId) movie = extractMedias(sql=sql) except: movie = [] if not movie: sql = "SELECT title, overview, year, poster, numId, genres, popu, backdrop, runtime, id FROM filmsPub WHERE numId={}".format(numId) movie = createbdhk.extractMedias(sql=sql) for l in liste: l = list(l) l += movie[0] media = Media("lien", *l) media.typeMedia = typM if typM == "movie": addDirectoryFilms("%s" %(l[0]), isFolder=False, parameters={"action": "playHK", "lien": media.link, "u2p": media.numId}, media=media) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True, cacheToDisc=True) def orderLiens(dictResos, paramstring): tabLinks = [] tabLinks2 = [] taille = 5000000001 filtre = 0 if __addon__.getSetting("filtliens") != "false": filtre = 1 for k, v in dictResos.items(): tabFiltre = [] link = [x for x in paramstring if k in x] if filtre: notice(v) if re.search(r"1080", v[0], re.I) and (re.search(r"\.multi", v[0], re.I) or re.search(r"\.vf", v[0], re.I) or re.search(r"\.truefrench", v[0], re.I) or re.search(r"\.french", v[0], re.I)) and int(v[1]) < taille: tabLinks.append(list(v) + [k] + link) tabLinks2.append(list(v) + [k] + link) else: link = [x for x in paramstring if k in x] tabLinks.append(list(v) + [k] + link) if not tabLinks: tabLinks = tabLinks2[:] try: return [x[-1] for x in sorted(tabLinks, key=lambda taille: taille[1], reverse=True)] except: return paramstring def colorLiens(lien): #for c in [("2160", "[COLOR fuchsia]2160[/COLOR]"), ("3D", "[COLOR yellow]3D[/COLOR]"),\ # ("1080", "[COLOR goldenrod]1080[/COLOR]"), ("720", "[COLOR seagreen]720[/COLOR]"), ("480", "[COLOR dodgerblue]480[/COLOR]")]: # lien = lien.replace(c[0], c[1]) if "2160" in lien or re.search("4K", lien, re.I): c = "red" elif re.search("3D", lien, re.I): c = "yellow" elif "1080" in lien: c = "fuchsia" elif "720" in lien: c = "seagreen" elif "480" in lien: c = "dodgerblue" else: c = "white" return c def getVerifLinksSerie(paramstring): cr = Crypt() sql = "SELECT link, release, taille FROM episodes WHERE link IN ('{}')".format("','".join(paramstring)) #notice(sql) liste = createbdhk.extractMedias(sql=sql) paramstring = [] for l in liste: l = list(l) if "Go" in l[2]: l[2] = int(float(l[2].replace("Go", "")) * 1000000000) elif "Mo" in l[2]: l[2] = int(float(l[2].replace("Mo", "")) * 100000000) else: l[2] = int(float(l[2])) paramstring.append(l) linksDarkino = [x for x in paramstring if len(x[0]) == 12] links = [x for x in paramstring if x not in linksDarkino] dictLiensfichier = cr.extractLinks([x[0] for x in links]) links = [x for x in links if dictLiensfichier[x[0]]] if linksDarkino: linkOk, linkOut = cr.validLinkDark([x[0] for x in linksDarkino]) linksDarkino = [x for x in linksDarkino if x[0] not in linkOut] links += linksDarkino return links def getParams(paramstring, u2p=0, saisonIn=1, suite=0): result = {} paramstring = paramstring.split("*") # ===================================================================== resos =========================================================== resos, timing = getresos() histoReso = None #cr = cryptage.Crypt() #dictResos = cr.extractReso([(x.split("#")[0].split("@")[0], x.split("#")[0].split("@")[1]) for x in paramstring]) #dictResos = {x.split("#")[0].split("@")[1]: dictResos[x.split("#")[0].split("@")[1]] if dictResos[x.split("#")[0].split("@")[1]] else x.split("#")[1] for x in paramstring} typMedia = xbmc.getInfoLabel('ListItem.DBTYPE') if not typMedia: xbmc.executebuiltin("Dialog.Close(busydialog)") xbmc.sleep(500) typMedia = xbmc.getInfoLabel('ListItem.DBTYPE') #notice("type media " + typMedia) #notice(xbmc.getInfoLabel('ListItem.DBID')) #notice(xbmc.getInfoLabel('ListItem.DBTYPE')) #numId = xbmc.getInfoLabel('ListItem.DBID') if typMedia != "movie": links = getVerifLinksSerie(paramstring) #notice("links") #notice(links) paramstring = links title = xbmc.getInfoLabel('ListItem.TVShowTitle') saison = xbmc.getInfoLabel('ListItem.Season') #notice("title " + xbmc.getInfoLabel('Player.Title')) if xbmc.Player().isPlaying(): infoTag = xbmc.Player().getVideoInfoTag() title = infoTag.getTVShowTitle() saison = infoTag.getSeason() #notice("serie" + title) histoReso = None if title: idDB = xbmc.getInfoLabel('ListItem.DBID') histoReso = gestionBD("get", title, saison) #notice("histo %s" %histoReso) else: liste = gestionBD("last") if liste: idDB, title, saison, reso = liste histoReso = (reso, ) pos = 0 if suite: if histoReso and histoReso[0]: resos = [histoReso[0]] + resos timing = 2 if u2p: timing = 0 numId = u2p try: for reso in resos: #notice(reso) for i, lien in enumerate(paramstring): if reso in lien[1]: pos = i raise StopIteration except StopIteration: pass # ======================================================================================================================================== selected = 0 if len(paramstring) == 1: if type(paramstring[0]) == str: result['url'] = paramstring[0] else: result['url'] = paramstring[0][0] else: dialog = xbmcgui.Dialog() #if u2p and numId != "divers": # tabNomLien = ["Bande Annonce"] #else: # tabNomLien = [] """ tabNomLien = [] paramstring = orderLiens(dictResos, paramstring) #notice(paramstring) try: tabNomLien += ["#%d (%s - %.2fGo)" %(i + 1, dictResos[x.split("#")[0].split("@")[1]][0], (int(dictResos[x.split("#")[0].split("@")[1]][1]) / 1000000000.0)) for i, x in enumerate(paramstring)] except: tabNomLien += ["#%d (ind)" %(i + 1) for i, x in enumerate(paramstring)] """ tabNomLien = ["[COLOR %s]#%d[/COLOR]| %s - %.2fGo" %(colorLiens(x[1]), i + 1, x[1], (int(x[2]) / 1000000000.0)) for i, x in enumerate(paramstring)] #if u2p and numId != "divers": # tabNomLien += ["Casting", "Similaires", "Recommendations"] selected = dialog.select("Choix lien", tabNomLien, int(timing) * 1000, pos) if selected != -1: if u2p and numId != "divers": if "Bande Annonce" == tabNomLien[selected]: mdb = TMDB(__keyTMDB__) tabBa = mdb.getNumIdBA(numId, typMedia) if tabBa: selectedBa = dialog.select("Choix B.A", ["%s (%s)" %(x[0], x[1]) for x in tabBa], 0, 0) if selectedBa != -1: keyBa = tabBa[selectedBa][2] xbmc.executebuiltin("RunPlugin(plugin://plugin.video.youtube/?action=play_video&videoid={})".format(keyBa), True) return elif tabNomLien[selected] in ["Similaires", "Recommendations"]: loadSimReco(numId, typMedia, tabNomLien[selected]) return else: result['url'] = paramstring[selected][0] reso = paramstring[selected][1] else: result['url'] = paramstring[selected][0] reso = paramstring[selected][1] else: return if typMedia != "movie": if u2p: gestionBD("update", u2p, title, saisonIn, reso, pos) else: if title: gestionBD("update", idDB, title, saison, reso, pos) # debridage ApikeyAlldeb = getkeyAlldebrid() ApikeyRealdeb = getkeyRealdebrid() #ApikeyUpto = getkeyUpto() Apikey1fichier = getkey1fichier() validKey = False cr = Crypt() if len(result['url'].strip()) == 12: urlLink = cr.urlBase + cr.cryptFile(result['url'].strip(), 0) else: urlLink = cr.url + "/?" + cr.cryptFile(result['url'], 0) result['url'] = urlLink if ApikeyAlldeb: erreurs = ["AUTH_MISSING_AGENT", "AUTH_BAD_AGENT", "AUTH_MISSING_APIKEY", "AUTH_BAD_APIKEY"] urlDedrid, status = cr.resolveLink(result['url'], ApikeyAlldeb) if status in erreurs: validKey = False showInfoNotification("Key Alldebrid Out!") #addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") #addon.setSetting(id="keyalldebrid", value="") else: result['url'] = urlDedrid.strip() validKey = True if ApikeyRealdeb and not validKey: urlDedrid, status = cr.resolveLink(result['url'], ApikeyRealdeb) if status == "err": showInfoNotification("Key Realdebrid Out!") #addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") #addon.setSetting(id="keyrealdebrid", value="") else: result['url'] = urlDedrid.strip() validKey = True if Apikey1fichier and not validKey: urlDedrid, status = cr.resolveLink(result['url'], Apikey1fichier) result['url'] = urlDedrid.strip() #if status == 16: # showInfoNotification("Key Uptobox Out!") #addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") #addon.setSetting(id="keyupto", value="") if result['url'][:-4] in [".mkv", ".mp4"]: title = unquote(result['url'][:-4].split("/")[-1])#, encoding='latin-1', errors='replace') else: title = unquote(result['url'].split("/")[-1])#, encoding='latin-1', errors='replace') try: title = unicode(title, "utf8", "replace") except: pass result["title"] = title #notice(result) return result def delcompte(params): link = params["lien"] ApikeyUpto = getkeyUpto() up = uptobox.Uptobox(ApikeyUpto) up.deleteFile(link) showInfoNotification("Delete Ok: %s" %link) def addCompteUpto(params): modeUp = __addon__.getSetting("modeadd") link = params["lien"] ApikeyUpto = getkeyUpto() cr = cryptage.Crypt() links = link.split("*") if len(links) > 1: choix = [x.split("#")[1] for x in links] dialog = xbmcgui.Dialog() d = dialog.select("Que veux tu ajouter?", choix) if d != -1: link = links[d] else: return else: link = links[0] link = link.split("#")[0] if modeUp == "alias": filecode, status = cr.resolveLink(link.split("@")[0], link.split("@")[1], key=ApikeyUpto, addCompte=1) else: filecode, status = cr.resolveLink(link.split("@")[0], link.split("@")[1], key=ApikeyUpto, addCompte=2) showInfoNotification("Upload Ok: %s" %filecode) ''' def loadSimReco(numId, typM, recherche): notice("typ demande " + recherche) dictTyp = {"tvshow": "tv", "movie": "movie"} mdb = TMDB(__keyTMDB__) if recherche == "Similaires": liste = mdb.suggReco(numId, dictTyp[typM], "similar") elif recherche == "Recommendations": liste = mdb.suggReco(numId, dictTyp[typM], "recommendations") if typM == "movie": sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) FROM movie as m \ WHERE m.numId IN ({})".format(",".join([str(x) for x in liste])) else: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu FROM tvshow as m \ WHERE m.numId IN ({})".format(",".join([str(x) for x in liste])) movies = extractMedias(sql=sql) affMovies(typM, movies) ''' def loadSimReco2(params): numId = params["u2p"] recherche = params["typ"] typM = xbmc.getInfoLabel('ListItem.DBTYPE') if not typM: xbmc.executebuiltin("Dialog.Close(busydialog)") xbmc.sleep(500) typM = xbmc.getInfoLabel('ListItem.DBTYPE') if typM == "": typM = params["typM"] #notice("typ demande " + recherche + " typM" +typM) dictTyp = {"tvshow": "tv", "movie": "movie", "episode": "tv"} mdb = TMDB(__keyTMDB__) if recherche == "Similaires": liste = mdb.suggReco(numId, dictTyp[typM], "similar") elif recherche == "Recommendations": liste = mdb.suggReco(numId, dictTyp[typM], "recommendations") if __addon__.getSetting("actifnewpaste") != "false": if typM == "movie": sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genres, m.popu, (SELECT GROUP_CONCAT(l.link, '*') FROM filmsPubLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM filmsPub as m WHERE m.numId IN ({})".format(",".join([str(x) for x in liste])) else: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genres, m.popu, m.backdrop, m.runtime, m.id FROM seriesPub as m\ WHERE m.numId IN ({})".format(",".join([str(x) for x in liste])) movies = createbdhk.extractMedias(sql=sql) elif __addon__.getSetting("actifhk") != "false": if typM == "movie": sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId), m.backdrop, m.runtime, m.id FROM movie as m \ WHERE m.numId IN ({})".format(",".join([str(x) for x in liste])) else: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id FROM tvshow as m \ WHERE m.numId IN ({})".format(",".join([str(x) for x in liste])) movies = extractMedias(sql=sql) affMovies(typM, movies[:-1]) def extractFanart(numId): cnx = sqlite3.connect(__repAddon__ + "medias.bd") cur = cnx.cursor() sql = "SELECT logo, clearart, banner FROM movieFanart WHERE numId={}".format(numId) cur.execute(sql) liste = cur.fetchall() cur.close() cnx.close() if liste: logo, clearart, banner = liste[0] else: logo, clearart, banner = "", "", "" return logo, clearart, banner def extractMedias(limit=0, offset=1, sql="", unique=0): cnx = sqlite3.connect(__repAddon__ + "medias.bd") cur = cnx.cursor() def normalizeTitle(tx): #tx = re.sub(r''' {2,}''', ' ', re.sub(r''':|;|'|"|,''', ' ', tx)) tx = re.sub(r''':|;|'|"|,|\-''', ' ', tx) tx = re.sub(r''' {2,}''', ' ', tx) tx = str(tx).lower() return unicodedata.normalize('NFD', tx).encode('ascii','ignore').decode("latin-1") cnx.create_function("normalizeTitle", 1, normalizeTitle) if sql: cur.execute(sql) if unique: requete = [x[0] for x in cur.fetchall()] else: requete = cur.fetchall() else: if limit > 0: cur.execute("SELECT title, overview, year, poster, link, numId FROM movie LIMIT %d OFFSET %d" %(limit, offset)) else: cur.execute("SELECT title, overview, year, poster, link, numId FROM movie ORDER BY title COLLATE NOCASE ASC") requete = cur.fetchall() cur.close() cnx.close() return requete def affAlldeb(): choix = [("Magnets", {"action":"magnets", "offset": 0}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', "Afficher les liens magnets"), ("History", {"action":"listeAll", "offset": 0, "typeL": "history"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', "Afficher les liens recents"), ("Sauvegarde", {"action":"listeAll", "offset": 0, "typeL": "links"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', "Afficher les liens sauvés"),] xbmcplugin.setPluginCategory(__handle__, "Menu Alldebrid") addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") isFolder = True for ch in sorted(choix): name, parameters, picture, texte = ch li = xbmcgui.ListItem(label=name) updateMinimalInfoTagVideo(li,name,texte) li.setArt({'thumb': picture, 'icon': addon.getAddonInfo('icon'), 'icon': addon.getAddonInfo('icon'), 'fanart': addon.getAddonInfo('fanart')}) url = sys.argv[0] + '?' + urlencode(parameters) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=li, isFolder=isFolder) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) def affUpto(): choix = [("Recherche", {"action":"rechercheUpto", "offset": 0}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/search.png', "Recherche dans le compte Uptobox\n --- ATTENTION ---\n c'est sur le nom du fichier!!!"), ("News", {"action":"newUpto", "offset": 0}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', "Afficher les derniéres nouveautés du compte Uptobox"), ("Racine", {"action": "loadUpto", "rep": "//", "offset": 0, "typM": "movie"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', "Media a la racine du compte"), ("Mon Historique", {"action": "histoupto"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/Mon historique.png', "Suivi series, films Uptobox"),] xbmcplugin.setPluginCategory(__handle__, "Menu Uptobox") addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") liste = widget.extractRepUpto() if liste: for (nom, rep, typMedia) in liste: if typMedia == "movie": choix.append((nom, {"action": "loadUpto", "rep": rep, "offset": 0, "typM": typMedia}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', typMedia)) else: choix.append((nom, {"action": "loadUptoSerie", "rep": rep, "offset": 0, "typM": typMedia}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', typMedia)) liste = widget.extractRepUptoPublic() if liste: for (nom, numHash, folder, typMedia) in liste: choix.append((nom, {"action": "loadUptoP", "hash": numHash, "folder": folder, "offset": 0, "typM": typMedia}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', typMedia)) isFolder = True for ch in sorted(choix): name, parameters, picture, texte = ch li = xbmcgui.ListItem(label=name) updateMinimalInfoTagVideo(li,name,texte) li.setArt({'thumb': picture, 'icon': addon.getAddonInfo('icon'), 'icon': addon.getAddonInfo('icon'), 'fanart': addon.getAddonInfo('fanart')}) url = sys.argv[0] + '?' + urlencode(parameters) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=li, isFolder=isFolder) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) def affTmdb(): liste = widget.extractListTMDB() if liste: xbmcplugin.setPluginCategory(__handle__, "Menu TMDB") addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") choix = [] for (nom, typMedia) in liste: if typMedia == "movie": choix.append((nom, {"action":"MenuFilmHK", "famille": 'tmdb', "nom": nom}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', typMedia)) else: choix.append((nom, {"action":"MenuSerieHK", "famille": 'tmdb', "nom": nom}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', typMedia)) isFolder = True for ch in sorted(choix): name, parameters, picture, texte = ch li = xbmcgui.ListItem(label=name) updateMinimalInfoTagVideo(li,name,texte) li.setArt({'thumb': picture, 'icon': addon.getAddonInfo('icon'), 'icon': addon.getAddonInfo('icon'), 'fanart': addon.getAddonInfo('fanart')}) url = sys.argv[0] + '?' + urlencode(parameters) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=li, isFolder=isFolder) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) def affPastebin(): liste = widget.extractListPaste() if liste: xbmcplugin.setPluginCategory(__handle__, "Menu Liste Pastebin") addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") choix = [] for (nom, typMedia) in liste: if typMedia == "film": choix.append((nom, {"action":"MenuFilm", "famille": 'paste', "nom": nom}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', typMedia)) else: choix.append((nom, {"action":"MenuSerie", "famille": 'paste', "nom": nom, "t": typMedia}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', typMedia)) isFolder = True for ch in sorted(choix): name, parameters, picture, texte = ch li = xbmcgui.ListItem(label=name) updateMinimalInfoTagVideo(li,name,texte) li.setArt({'thumb': picture, 'icon': addon.getAddonInfo('icon'), 'icon': addon.getAddonInfo('icon'), 'fanart': addon.getAddonInfo('fanart')}) url = sys.argv[0] + '?' + urlencode(parameters) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=li, isFolder=isFolder) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) def affTrakt(): xbmcplugin.setPluginCategory(__handle__, "Menu Trakt") addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") choix = [("Listes", {"action":"MenuTrakt", "liste": "liste"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', "Listes personnelles"), ("Listes Liked", {"action":"MenuTrakt", "liste": "liked"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', "Listes que tu as aimées et likées"), ("Watchlist Film", {"action":"MenuTrakt", "cat": "wmovie"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', "Tes envies de films a voir"), ("Watchlist Serie", {"action":"MenuTrakt", "cat": "wshow"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', "Tes envies de series a voir"), ("Recommendations Film", {"action":"MenuTrakt", "cat": "rmovie"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', "Films pour toi"), ("Recommendations Serie", {"action":"MenuTrakt", "cat": "rshow"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', "Series pour toi"), ] isFolder = True for ch in sorted(choix): name, parameters, picture, texte = ch li = xbmcgui.ListItem(label=name) updateMinimalInfoTagVideo(li,name,texte) li.setArt({'thumb': picture, 'icon': addon.getAddonInfo('icon'), 'icon': addon.getAddonInfo('icon'), 'fanart': addon.getAddonInfo('fanart')}) url = sys.argv[0] + '?' + urlencode(parameters) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=li, isFolder=isFolder) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) def ventilationHK(): xbmcplugin.setPluginCategory(__handle__, "Menu") addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") choix = [ ("Setting", {"action":"setting"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/setting.png', "Reglages U2P"), ("Setting Profils", {"action":"affProfils"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/profil.png', "Choix Profils"), #("Il est frais mon poisson ....", {"action": "affNewsUpto", "offset": "0"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', "Dernieres news brut....."), ] # bd hk if __addon__.getSetting("actifhk") != "false": choix += [("Films, Docu, concerts....", {"action":"MenuFilm"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/film1.png', "Bibliothéque de films, concerts, spectacles et documentaires"), ("Divers....", {"action":"MenuDivers"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', "Sports, Show TV, FANKAI, etc..."), ("Series & Animes", {"action":"MenuSerie"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/series1.png', "Bibliothéque séries , animation & animes japonais"), ("Audio Book", {"action":"MenuAudio"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/book.png', "Bibliothéque livres audio"), ("Ma Recherche", {"action":"affSearch"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/search.png', "type de recherche dispo"),] # cretion bd avec les pastes pastebin #if __addon__.getSetting("actiflocal") != "false": # choix += [("Pastes Pastebin", {"action":"pastepastebin"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/pastebin.png', "DATABASE via num paste"),] # creation avec repertoires crypté if __addon__.getSetting("actifnewpaste") != "false": choix += [("Médiathéque HK3", {"action":"menuRepCrypte"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/upto.png', "DATABASE new systéme"),] # trakt if __addon__.getSetting("traktperso") != "false": userPass = [x[1] for x in widget.usersBookmark() if x[0] == __addon__.getSetting("profiltrakt")] if ( __addon__.getSetting("bookonline") != "false" and userPass and __addon__.getSetting("bookonline_name") == userPass[0]) or __addon__.getSetting("bookonline") == "false": choix.append(("Mon Trakt", {"action":"affTraktPerso"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/trakt.png', "ce qui est lié au compte Trakt perso")) # mon contenu uptobox if __addon__.getSetting("actifupto") != "false": choix.append(("Mon Uptobox", {"action":"affUpto"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/upto.png', "accés à ton compte upto et publique")) # liste tmdb if __addon__.getSetting("actiftmdb") != "false": choix.append(("Mon Movie-DB", {"action":"affTmdb"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/tmdb.png', "listes reconstituées avec TMDB")) # liste pastebin d'apres un num de paste depuis hk if __addon__.getSetting("actifpastebin") != "false": choix.append(("Mon Pastebin", {"action":"affPastebin"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/pastebin.png', "listes reconstituées avec le(s) paste(s)")) # mon alldeb if __addon__.getSetting("actifalldeb") != "false": choix.append(("Mon Alldebrid", {"action":"affAlldeb"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/alldebrid.png', "Accés liens du compte")) #choix.append(("Mes Bandes Annonces", {"action":"affbaext"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', "Accés repo special Bande Annonce")) isFolder = True for ch in sorted(choix): name, parameters, picture, texte = ch li = xbmcgui.ListItem(label=name) updateMinimalInfoTagVideo(li,name,texte) if "HK3" in name: commands = [] commands.append(('[COLOR yellow]Details Mediathéque[/COLOR]', 'RunPlugin(plugin://plugin.video.sendtokodiU2P/?action=detailmediatheque)')) li.addContextMenuItems(commands) li.setArt({'thumb': picture, 'icon': addon.getAddonInfo('icon'), 'icon': addon.getAddonInfo('icon'), 'fanart': addon.getAddonInfo('fanart')}) url = sys.argv[0] + '?' + urlencode(parameters) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=li, isFolder=isFolder) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) def traktHKventilation(): trk = TraktHK() user = __addon__.getSetting("usertrakt") if __addon__.getSetting("actifnewpaste") != "false": sqlMovie = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genres, m.popu, (SELECT GROUP_CONCAT(l.link, '*') FROM filmsPubLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM filmsPub as m WHERE m.numId IN ({})" sqlTvshow = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genres, m.popu, m.backdrop, m.runtime, m.id FROM seriesPub as m\ WHERE m.numId IN ({})" fxExtract = createbdhk.extractMedias elif __addon__.getSetting("actifhk") != "false": sqlMovie = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE m.numId IN ({})" sqlTvshow = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id \ FROM tvshow as m WHERE m.numId IN ({})" fxExtract = extractMedias movies = None if "cat" in __params__.keys(): cat = __params__["cat"] #=========================================================================================================================================== if cat == "wmovie": tabNumId = trk.getUserWatchlist(user, typM="movies") tabNumId = [x for x in tabNumId if x] movies = fxExtract(sql=sqlMovie.format(",".join([str(x) for x in tabNumId]))) typM = "movie" #=========================================================================================================================================== elif cat == "wshow": typM = "tvshow" tabNumId = trk.getUserWatchlist(user, typM="shows") tabNumId = [x for x in tabNumId if x] movies = fxExtract(sql=sqlTvshow.format(",".join([str(x) for x in tabNumId]))) #=========================================================================================================================================== elif cat == "rmovie": tabNumId = trk.recommendations(user, typM="movies") tabNumId = [x for x in tabNumId if x] movies = fxExtract(sql=sqlMovie.format(",".join([str(x) for x in tabNumId]))) typM = "movie" #=========================================================================================================================================== elif cat == "rshow": typM = "tvshow" tabNumId = trk.recommendations(user, typM="shows") tabNumId = [x for x in tabNumId if x] movies = fxExtract(sql=sqlTvshow.format(",".join([str(x) for x in tabNumId]))) elif "numliste" in __params__.keys(): numliste = __params__["numliste"] nb = int(__params__["nb"]) listeTR = trk.getListId([numliste, nb]) listeMovies = [liste["movie"]["ids"]["tmdb"] for liste in listeTR if liste["type"] == "movie"] listeMovies = [x for x in listeMovies if x] listeShows = [liste["show"]["ids"]["tmdb"] for liste in listeTR if liste["type"] == "show"] listeShows = [x for x in listeShows if x] if listeMovies and listeShows: dialog = xbmcgui.Dialog() ret = dialog.yesno('Listes', 'Cette liste a 2 types de contenu, lequel afficher?', nolabel="Films", yeslabel="Series") if not ret: typM = "movie" movies = fxExtract(sql=sqlMovie.format(",".join([str(x) for x in listeMovies]))) else: typM = "tvshow" movies = fxExtract(sql=sqlTvshow.format(",".join([str(x) for x in listeShows]))) elif listeMovies: typM = "movie" movies = fxExtract(sql=sqlMovie.format(",".join([str(x) for x in listeMovies]))) elif listeShows: typM = "tvshow" movies = fxExtract(sql=sqlTvshow.format(",".join([str(x) for x in listeShows]))) elif "liste" in __params__.keys(): if __params__["liste"] == "liste": listes = trk.extractListsIdUser(user) if __params__["liste"] == "liked": listes = trk.getUserListsLikes(user) choix = [(k, {"action":"MenuTrakt", "numliste":v[0], "nb": v[1]}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', k) for k, v in list(listes.values())[0].items()] addCategorieMedia(choix) if movies: affMovies(typM, movies) def diversHK(): if "groupe" in __params__.keys(): #title, overview, year, poster, link, numId sql = "SELECT title, groupe||' ('||repo||')', '', '', 'divers', '', '', link, '', '' FROM divers WHERE repo='{}' AND groupe='{}' ORDER BY id DESC".format(__params__["repo"].replace("'", "''"), __params__["groupe"].replace("'", "''")) movies = extractMedias(sql=sql) #notice(movies) affMovies("movie", movies) elif "repo" in __params__.keys(): if __params__["repo"] == "adulte": dialog = xbmcgui.Dialog() d = dialog.input('Enter secret code', type=xbmcgui.INPUT_ALPHANUM, option=xbmcgui.ALPHANUM_HIDE_INPUT) if d == "x2015x": ok = True else: ok = False else: ok = True if ok: sql = "SELECT DISTINCT groupe FROM divers WHERE repo='{}' ORDER BY id DESC".format(__params__["repo"].replace("'", "''")) listeRep = extractMedias(sql=sql, unique=1) choix = [(x, {"action":"MenuDivers", "repo":__params__["repo"], "groupe": x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in sorted(listeRep)] addCategorieMedia(choix) else: sql = "SELECT DISTINCT repo FROM divers" listeRep = extractMedias(sql=sql, unique=1) choix = [(x, {"action":"MenuDivers", "repo":x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/%s.png' %x, x) for x in sorted(listeRep)] addCategorieMedia(choix) def mediasHK(params={}): notice("params: " + str(params)) if params: __params__["famille"] = params["famille"] __params__["u2p"] = params["u2p"] __params__["numIdSaga"] = params["u2p"] typM = "movie" try: offset = int(__params__["offset"]) except: offset = 0 try: limit = int(__params__["limit"]) except: limit = int(getNbMedias()) orderDefault = getOrderDefault() year = datetime.today().year lastYear = year - 1 if "famille" in __params__.keys(): movies = [] #==================================================================================================================================================================================== if __params__["famille"] in ["tmdb"]: nom = __params__["nom"] listeId = widget.recupTMDB(nom, "movie") listeId = listeId[offset:] tabMovies = [] for n in listeId: if n: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId), m.backdrop, m.runtime, m.id FROM movie as m \ WHERE m.numId={}".format(n) movies = extractMedias(sql=sql) if movies: tabMovies.append(movies[0]) if len(tabMovies) == limit: break __params__["offset"] = offset movies = tabMovies[:] #==================================================================================================================================================================================== elif __params__["famille"] in ["paste"]: nom = __params__["nom"] listePaste = widget.recupPaste(nom, "film") listeId = [] for paste in listePaste: sql = "SELECT numId FROM paste WHERE numPaste='{}' ORDER BY id asc".format(paste) listeId += [x[0] for x in extractMedias(sql=sql)] listeId = listeId[offset:] tabMovies = [] for n in listeId: if n: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId), m.backdrop, m.runtime, m.id FROM movie as m \ WHERE m.numId={}".format(n) movies = extractMedias(sql=sql) if movies: tabMovies.append(movies[0]) if len(tabMovies) == limit: break __params__["offset"] = offset movies = tabMovies[:] #==================================================================================================================================================================================== elif __params__["famille"] in ["Last View", "Mon historique"]: if __addon__.getSetting("bookonline") != "false": liste = widget.responseSite("http://%s/requete.php?name=%s&type=view&media=movie" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"))) else: liste = widget.bdHK(extract=1) tabMovies = [] for n in liste: if n: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId), m.backdrop, m.runtime, m.id FROM movie as m \ WHERE m.numId={}".format(n) movies = extractMedias(sql=sql) if movies: tabMovies.append(movies[0]) movies = tabMovies[:] #sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId), m.backdrop, m.runtime, m.id FROM movie as m \ # WHERE m.numId IN ({})".format(",".join([str(x) for x in liste])) #movies = extractMedias(sql=sql) #=============================================================================================================================================================================== elif __params__["famille"] in ["Fav'S HK", "Mes favoris HK"]: if __addon__.getSetting("bookonline") != "false": notice("http://%s/requete.php?name=%s&type=favs&media=movies" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"))) liste = widget.responseSite("http://%s/requete.php?name=%s&type=favs&media=movies" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"))) else: liste = widget.extractFavs("movies") sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId), m.backdrop, m.runtime, m.id FROM movie as m \ WHERE m.numId IN ({})".format(",".join([str(x) for x in liste])) __params__["favs"] = __addon__.getSetting("bookonline_name") movies = extractMedias(sql=sql) #==================================================================================================================================================================================== elif __params__["famille"] in ["Last In", "Derniers Ajouts"]: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id FROM movie as m"\ + " ORDER BY m.id DESC" + " LIMIT {} OFFSET {}".format(limit, offset) __params__["offset"] = offset movies = extractMedias(sql=sql) #==================================================================================================================================================================================== elif __params__["famille"] in ["#Years/Last In", "Nouveautés par année"]: #sql = "SELECT numId FROM movieFamille WHERE famille IN ('#Documentaires', '#Concerts', '#Spectacles')" sql = "SELECT numId FROM movieFamille WHERE famille IN (SELECT o.nom FROM movieTypeFamille as o WHERE o.typFamille='out')" tabNumId = extractMedias(sql=sql, unique=1) sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE m.numId NOT IN ({}) AND m.year!='' AND genre NOT LIKE '%%Documentaire%%' AND genre NOT LIKE '%%Animation%%' ORDER BY ".format(",".join([str(x) for x in tabNumId])) + " year DESC, id DESC"\ + " LIMIT {} OFFSET {}".format(limit, offset) __params__["offset"] = offset movies = extractMedias(sql=sql) #==================================================================================================================================================================================== elif __params__["famille"] in ["Nouveautés %d-%d" %(lastYear, year)]: #sql = "SELECT numId FROM movieFamille WHERE famille IN ('#Documentaires', '#Concerts', '#Spectacles')" sql = "SELECT numId FROM movieFamille WHERE famille IN (SELECT o.nom FROM movieTypeFamille as o WHERE o.typFamille='out')" tabNumId = extractMedias(sql=sql, unique=1) sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE m.numId NOT IN ({}) AND m.year!='' AND m.year>={} AND genre NOT LIKE '%%Documentaire%%' AND genre NOT LIKE '%%Animation%%' ORDER BY ".format(",".join([str(x) for x in tabNumId]), lastYear) + " id DESC"\ + " LIMIT {} OFFSET {}".format(limit, offset) __params__["offset"] = offset movies = extractMedias(sql=sql) #==================================================================================================================================================================================== elif __params__["famille"] in ["#Notations", "Les mieux notés"]: #sql = "SELECT numId FROM movieFamille WHERE famille IN ('#Documentaires', '#Concerts', '#Spectacles')" sql = "SELECT numId FROM movieFamille WHERE famille IN (SELECT o.nom FROM movieTypeFamille as o WHERE o.typFamille='out')" tabNumId = extractMedias(sql=sql, unique=1) sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE m.numId NOT IN ({}) AND m.year!='' AND genre NOT LIKE '%%Documentaire%%' AND genre NOT LIKE '%%Animation%%' AND m.popu>7 AND m.popu<9 AND m.votes>500 ORDER BY ".format(",".join([str(x) for x in tabNumId])) + " m.popu DESC"\ + " LIMIT {} OFFSET {}".format(limit, offset) __params__["offset"] = offset movies = extractMedias(sql=sql) #==================================================================================================================================================================================== elif __params__["famille"] == "cast": mdb = TMDB(__keyTMDB__) liste = mdb.person(__params__["u2p"]) sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId), m.backdrop, m.runtime, m.id FROM movie as m \ WHERE m.numId IN ({})".format(",".join([str(x) for x in liste])) movies = extractMedias(sql=sql) #==================================================================================================================================================================================== elif __params__["famille"] == "Liste Aléatoire": sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id FROM movie as m ORDER BY RANDOM() LIMIT {}".format(getNbMedias()) movies = extractMedias(sql=sql) #==================================================================================================================================================================================== elif __params__["famille"] == "Alpha(s)": if "alpha" in __params__.keys(): alpha = __params__["alpha"] else: dialog = xbmcgui.Dialog() alpha = dialog.input("ex: ram => tous les titres qui commencent par 'ram' \n(astuce le _ remplace tous caractéres)", type=xbmcgui.INPUT_ALPHANUM, defaultt="") if len(alpha) > 0: __params__["alpha"] = alpha sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE m.title LIKE {} ORDER BY title COLLATE NOCASE ASC".format("'" + str(alpha) + "%'") + " LIMIT {} OFFSET {}".format(limit, offset) movies = extractMedias(sql=sql) #==================================================================================================================================================================================== elif __params__["famille"] == "Multi-Tri": #sql = "SELECT numId FROM movieFamille WHERE famille IN ('#Documentaires', '#Concerts', '#Spectacles')" sql = "SELECT numId FROM movieFamille WHERE famille IN (SELECT o.nom FROM movieTypeFamille as o WHERE o.typFamille='out')" tabNumId = extractMedias(sql=sql, unique=1) dictTri = {"Année puis Date entrée": "year DESC, id DESC", "Date entrée": "id DESC", "Année puis Ordre Alpha": "year DESC, title COLLATE NOCASE ASC", "Ordre Alpha A-Z": "title COLLATE NOCASE ASC",\ "Ordre Alpha Z-A": "title COLLATE NOCASE DESC"} if "tri" in __params__.keys(): tri = int(__params__["tri"]) else: dialog = xbmcgui.Dialog() tri = dialog.select("Selectionner le tri", list(dictTri.keys()), 0, 0) if tri != -1: __params__["tri"] = str(tri) sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE m.numId NOT IN ({}) AND m.year!='' AND genre NOT LIKE '%%Documentaire%%' ORDER BY ".format(",".join([str(x) for x in tabNumId])) + dictTri[list(dictTri.keys())[tri]]\ + " LIMIT {} OFFSET {}".format(limit, offset) movies = extractMedias(sql=sql) #==================================================================================================================================================================================== elif __params__["famille"] in ["Search", "Recherche"]: dialog = xbmcgui.Dialog() d = dialog.input("Recherche (mini 3 lettres)", type=xbmcgui.INPUT_ALPHANUM, defaultt="") if len(d) > 2: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE normalizeTitle(m.title) LIKE normalizeTitle({}) ORDER BY title COLLATE NOCASE ASC".format("'%" + str(d).replace("'", "''") + "%'") movies = extractMedias(sql=sql) #==================================================================================================================================================================================== elif __params__["famille"] in ["Groupes Contrib", "Listes des contributeurs"]: sql = "SELECT DISTINCT groupeParent FROM movieGroupe" listeRep = extractMedias(sql=sql, unique=1) choix = [(x, {"action":"MenuFilm", "groupe": x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in sorted(listeRep)] addCategorieMedia(choix) #==================================================================================================================================================================================== elif __params__["famille"] == "Spécial Widget": sql = "SELECT DISTINCT groupeParent FROM movieTrakt" listeRep = extractMedias(sql=sql, unique=1) choix = [(x, {"action":"MenuFilm", "trakt": x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in sorted(listeRep)] addCategorieMedia(choix) #==================================================================================================================================================================================== elif __params__["famille"] == "Mes Widgets": #notice(widget.extractListe()) listeRep = list(widget.extractListe()) choix = [(x, {"action":"MenuFilm", "widget": x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in sorted(listeRep)] addCategorieMedia(choix) #==================================================================================================================================================================================== elif __params__["famille"] == "Listes lecture": listeRep = list(widget.getListesV("movie")) choix = [(x, {"action":"MenuFilm", "listeV": x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in sorted(listeRep)] addCategorieMedia(choix) #==================================================================================================================================================================================== elif __params__["famille"] == "Listes Trakt": listeRep = list(widget.getListesT("movie")) listeRep = list(set([x[3] for x in listeRep])) choix = [(x, {"action":"MenuFilm", "listeT": x, "typM": "movie"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in sorted(listeRep)] addCategorieMedia(choix) #==================================================================================================================================================================================== elif __params__["famille"] == "Sagas": sql = "SELECT numId, title, overview, poster, poster FROM saga ORDER BY title" movies = extractMedias(sql=sql) typM = "saga" #==================================================================================================================================================================================== elif __params__["famille"] == "sagaListe": sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE m.numId IN (SELECT f.numId FROM sagaTitle as f WHERE f.numIdSaga={})".format(__params__["numIdSaga"]) movies = extractMedias(sql=sql) #==================================================================================================================================================================================== elif __params__["famille"] == "Année(s)": if "an" in __params__.keys(): an = [int(x) for x in __params__["an"].split(":")] else: dialog = xbmcgui.Dialog() d = dialog.input("Entrer Année => 2010 / Groupe d'années => 2010:2014", type=xbmcgui.INPUT_ALPHANUM) if d: an = d.split(":") __params__["an"] = ":".join([str(x) for x in an]) if len(an) == 1: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE m.year={}".format(an[0]) + " ORDER BY m.year DESC, m.Title ASC" + " LIMIT {} OFFSET {}".format(limit, offset) else: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE m.year>={} and m.year<={}".format(an[0], an[1]) + " ORDER BY m.year DESC, m.Title ASC" + " LIMIT {} OFFSET {}".format(limit, offset) movies = extractMedias(sql=sql) #==================================================================================================================================================================================== elif __params__["famille"] == "Genre(s)": mdb = TMDB(__keyTMDB__) tabGenre = mdb.getGenre() if "genre" in __params__.keys(): genres = [int(x) for x in __params__["genre"].split("*")] else: dialog = xbmcgui.Dialog() genres = dialog.multiselect("Selectionner le/les genre(s)", tabGenre, preselect=[]) if genres: __params__["genre"] = "*".join([str(x) for x in genres]) genresOk = " or ".join(["m.genre LIKE '%%%s%%'" %tabGenre[x] for x in genres]) sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE " + genresOk + " ORDER BY m.id DESC" + " LIMIT {} OFFSET {}".format(limit, offset) movies = extractMedias(sql=sql) #==================================================================================================================================================================================== elif __params__["famille"] == "genres": rechGenre = __params__["typgenre"] if '#' in rechGenre: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE m.numId IN (SELECT f.numId FROM movieFamille as f WHERE f.famille='{}')".format(rechGenre)\ + orderDefault + " LIMIT {} OFFSET {}".format(limit, offset) else: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE m.genre LIKE '%%{}%%' ORDER BY m.id DESC".format(rechGenre) + " LIMIT {} OFFSET {}".format( limit, offset) movies = extractMedias(sql=sql) #================================================================================================================================================================================== elif __params__["famille"] == "Favoris": pass #==================================================================================================================================================================================== elif __params__["famille"] in ["Derniers Ajouts Films", "Les plus populaires TMDB/trakt", "#Film Last/In", "#Best TMDB-TRAKT"]: dictCf ={"Derniers Ajouts Films": "#Films Last/In", "Les plus populaires TMDB/trakt": "#Best TMDB-TRAKT", "#Films Last/In": "#Films Last/In", "#Best TMDB-TRAKT": "#Best TMDB-TRAKT"} sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE m.numId IN (SELECT f.numId FROM movieFamille as f WHERE f.famille='{}')".format(dictCf[__params__["famille"]])\ + orderDefault + " LIMIT {} OFFSET {}".format(limit, offset) movies = extractMedias(sql=sql) __params__["offset"] = offset else: #ORDER BY m.title COLLATE NOCASE ASC sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE m.numId IN (SELECT f.numId FROM movieFamille as f WHERE f.famille='{}')".format(__params__["famille"])\ + orderDefault + " LIMIT {} OFFSET {}".format(limit, offset) movies = extractMedias(sql=sql) __params__["offset"] = offset #sorted(movies, key=lambda x: x[-1], reverse=True) affMovies(typM, movies, params=__params__) if "groupe" in __params__.keys(): sql = "SELECT groupeFille FROM movieGroupe WHERE groupeParent='{}'".format(__params__["groupe"].replace("'", "''")) listeRep = extractMedias(sql=sql, unique=1) if len(listeRep) == 1 or not listeRep: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId), m.backdrop, m.runtime, m.id FROM movie as m \ WHERE m.numId IN (SELECT d.numId FROM movieGroupeDetail as d WHERE d.groupe='{}')".format(__params__["groupe"].replace("'", "''"))\ + orderDefault + " LIMIT {} OFFSET {}".format(limit, offset) movies = extractMedias(sql=sql) __params__["offset"] = offset affMovies(typM, movies, params=__params__) else: choix = [(x, {"action":"MenuFilm", "groupe": x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in sorted(listeRep)] addCategorieMedia(choix) elif "listeT" in __params__.keys(): if "listeTfille" in __params__.keys(): liste = list(widget.getListesT("movie")) liste = [(x[0], x[1], "movie") for x in liste if x[3] == __params__["listeT"] and x[4] == __params__["listeTfille"]][0] trk = TraktHK() tabNumId = trk.extractList(*liste) tabNumId = [x for x in tabNumId if x] sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE m.numId IN ({})".format(",".join([str(x) for x in tabNumId])) movies = extractMedias(sql=sql) affMovies(typM, movies) else: listeRep = list(widget.getListesT("movie")) listeRep = list(set([x[4] for x in listeRep if x[3] == __params__["listeT"]])) choix = [(x, {"action":"MenuFilm", "listeT": __params__["listeT"], "listeTfille": x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in sorted(listeRep)] addCategorieMedia(choix) elif "trakt" in __params__.keys(): sql = "SELECT groupeFille FROM movieTrakt WHERE groupeParent='{}'".format(__params__["trakt"].replace("'", "''")) listeRep = extractMedias(sql=sql, unique=1) if len(listeRep) == 1 or not listeRep: if listeRep: gr = listeRep[0].replace("'", "''") else: gr = __params__["trakt"].replace("'", "''") tabMovies = [] sql = "SELECT d.numId FROM movieTraktDetail as d WHERE d.groupe='{}' ORDER BY d.id ASC".format(gr) listeNumId = extractMedias(sql=sql, unique=1) for n in listeNumId: if n: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId), m.backdrop, m.runtime, m.id FROM movie as m \ WHERE m.numId={}".format(n) movies = extractMedias(sql=sql) if movies: tabMovies.append(movies[0]) __params__["offset"] = offset affMovies(typM, tabMovies, params=__params__) ''' sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId), m.backdrop, m.runtime, m.id FROM movie as m \ WHERE m.numId IN (SELECT d.numId FROM movieTraktDetail as d WHERE d.groupe='{}' ORDER BY d.id ASC)".format(__params__["trakt"].replace("'", "''"))\ + " LIMIT {} OFFSET {}".format(limit, offset) movies = extractMedias(sql=sql) __params__["offset"] = offset affMovies(typM, movies, params=__params__) ''' else: choix = [(x, {"action":"MenuFilm", "trakt": x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in sorted(listeRep)] addCategorieMedia(choix) elif "widget" in __params__.keys(): sql = widget.getListe(__params__["widget"]) movies = extractMedias(sql=sql) affMovies(typM, movies) elif "listeV" in __params__.keys(): tabNumId = widget.getListesVdetail(__params__["listeV"], "movie") sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE m.numId IN ({})".format(",".join([str(x) for x in tabNumId]))\ + " LIMIT {} OFFSET {}".format(limit, offset) __params__["offset"] = offset movies = extractMedias(sql=sql) affMovies(typM, movies) elif "typfamille" in __params__.keys(): sql = "SELECT nom FROM movieTypeFamille WHERE typFamille='{}'".format(__params__["typfamille"]) listeRepData = extractMedias(sql=sql, unique=1) choix = [(x, {"action":"MenuFilm", "famille":x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/%s.png' %x, x) for x in listeRepData] addCategorieMedia(choix) else: listeRep = ["Derniers Ajouts", "Derniers Ajouts Films", "Nouveautés par année", "Nouveautés %d-%d" %(lastYear, year), "Les mieux notés", "Les plus populaires TMDB/trakt", "Mon historique", "Mes favoris HK",\ "Listes des contributeurs", "Sagas", "Liste Aléatoire", "Spécial Widget", "Mes Widgets", "Listes lecture", "Listes Trakt"] sql = "SELECT DISTINCT typFamille FROM movieTypeFamille" listeRepData = [x for x in extractMedias(sql=sql, unique=1) if x not in ["filtre", "genre", "out"]] choix = [(x, {"action":"MenuFilm", "famille":x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/%s.png' %x, x) for x in listeRep] choix += [(x, {"action":"MenuFilm", "typfamille":x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/%s.png' %x, x) for x in listeRepData] choix.append(("Filtres", {"action":"filtres", "typM": "movie"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/Filtres.png', "Filtres")) choix.append(("Genres", {"action":"genres", "typM": "movie"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', "Genres")) addCategorieMedia(choix) def seriesHK(): typM = "tvshow" try: offset = int(__params__["offset"]) except: offset = 0 try: limit = int(__params__["limit"]) except: limit = int(getNbMedias()) orderDefault = getOrderDefault() xbmcgui.Window(10000).setProperty('nomFenetre', '') if "famille" in __params__.keys(): movies = [] #==================================================================================================================================================================================== if __params__["famille"] in ["tmdb"]: nom = __params__["nom"] listeId = widget.recupTMDB(nom, "tv") listeId = listeId[offset:] tabMovies = [] for n in listeId: if n: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id FROM tvshow as m \ WHERE m.numId={}".format(n) movies = extractMedias(sql=sql) if movies: tabMovies.append(movies[0]) if len(tabMovies) == limit: break __params__["offset"] = offset movies = tabMovies[:] #==================================================================================================================================================================================== elif __params__["famille"] in ["paste"]: nom = __params__["nom"] t = __params__["t"] listePaste = widget.recupPaste(nom, t) listeId = [] for paste in listePaste: sql = "SELECT numId FROM paste WHERE numPaste='{}' ORDER BY id asc".format(paste) listeId += [x[0] for x in extractMedias(sql=sql)] listeId = listeId[offset:] tabMovies = [] for n in listeId: if n: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id FROM tvshow as m \ WHERE m.numId={}".format(n) movies = extractMedias(sql=sql) if movies: tabMovies.append(movies[0]) if len(tabMovies) == limit: break __params__["offset"] = offset movies = tabMovies[:] #=============================================================================================================================================================================== elif __params__["famille"] in ["Last View", "Mon historique"]: if __addon__.getSetting("bookonline") != "false": liste = widget.responseSite("http://%s/requete.php?name=%s&type=view&media=tv" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"))) else: liste = widget.bdHK(extract=1, typM="tvshow") tabMovies = [] for n in liste: if n: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id FROM tvshow as m \ WHERE m.numId={}".format(n) movies = extractMedias(sql=sql) if movies: tabMovies.append(movies[0]) movies = tabMovies[:] #sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id FROM tvshow as m \ # WHERE m.numId IN ({})".format(",".join([str(x) for x in liste])) #movies = extractMedias(sql=sql) #=============================================================================================================================================================================== elif __params__["famille"] in ["Fav'S HK", "Mes favoris HK"]: if __addon__.getSetting("bookonline") != "false": liste = widget.responseSite("http://%s/requete.php?name=%s&type=favs&media=tvshow" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"))) else: liste = widget.extractFavs("tvshow") sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id FROM tvshow as m \ WHERE m.numId IN ({})".format(",".join([str(x) for x in liste])) __params__["favs"] = __addon__.getSetting("bookonline_name") movies = extractMedias(sql=sql) #==================================================================================================================================================================================== elif __params__["famille"] == "Mes Widgets": #notice(widget.extractListe("serie")) listeRep = list(widget.extractListe("serie")) choix = [(x, {"action":"MenuSerie", "widget": x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in sorted(listeRep)] addCategorieMedia(choix) #==================================================================================================================================================================================== elif __params__["famille"] == "Listes lecture": listeRep = list(widget.getListesV("tvshow")) choix = [(x, {"action":"MenuSerie", "listeV": x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in sorted(listeRep)] addCategorieMedia(choix) #=============================================================================================================================================================================== elif __params__["famille"] in ["Last In", "Derniers Ajouts"]: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id FROM tvshow as m ORDER BY m.id DESC"\ + " LIMIT {} OFFSET {}".format(limit, offset) __params__["offset"] = offset movies = extractMedias(sql=sql) #==================================================================================================================================================================================== elif __params__["famille"] in ["#Years/Last In", "Nouveautés par année"]: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id \ FROM tvshow as m WHERE m.genre NOT LIKE '%%Documentaire%%' AND m.genre NOT LIKE '%%Animation%%' AND m.genre NOT LIKE '%%Kids%%' ORDER BY " + " year DESC, id DESC"\ + " LIMIT {} OFFSET {}".format(limit, offset) __params__["offset"] = offset movies = extractMedias(sql=sql) #=============================================================================================================================================================================== elif __params__["famille"] == "Liste Aléatoire": sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id FROM tvshow as m ORDER BY RANDOM() LIMIT 200 " movies = extractMedias(sql=sql) #==================================================================================================================================================================================== elif __params__["famille"] == "cast": mdb = TMDB(__keyTMDB__) liste = mdb.person(__params__["u2p"], typM="tvshow") sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id FROM tvshow as m\ WHERE m.numId IN ({})".format(",".join([str(x) for x in liste])) movies = extractMedias(sql=sql) #=============================================================================================================================================================================== elif __params__["famille"] == "Alpha(s)": if "alpha" in __params__.keys(): alpha = __params__["alpha"] else: dialog = xbmcgui.Dialog() alpha = dialog.input("ex: ram => tous les titres qui commencent par 'ram' \n(astuce le _ remplace tous caractéres)", type=xbmcgui.INPUT_ALPHANUM, defaultt="") if len(alpha) > 0: __params__["alpha"] = alpha sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id \ FROM tvshow as m WHERE m.title LIKE {} ORDER BY title COLLATE NOCASE ASC".format("'" + str(alpha) + "%'")\ + " LIMIT {} OFFSET {}".format(limit, offset) movies = extractMedias(sql=sql) #=============================================================================================================================================================================== elif __params__["famille"] in ["Search", "Recherche"]: dialog = xbmcgui.Dialog() d = dialog.input("Recherche (mini 3 lettres)", type=xbmcgui.INPUT_ALPHANUM, defaultt="") if len(d) > 2: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id \ FROM tvshow as m WHERE normalizeTitle(m.title) LIKE normalizeTitle({}) ORDER BY title COLLATE NOCASE ASC".format("'%" + str(d).replace("'", "''") + "%'") movies = extractMedias(sql=sql) #=============================================================================================================================================================================== elif __params__["famille"] == "Multi-Tri": dialog = xbmcgui.Dialog() dictTri = {"Année puis Date entrée": "year DESC, id DESC", "Date entrée": "id DESC", "Année puis Ordre Alpha": "year DESC, title COLLATE NOCASE ASC", "Ordre Alpha A-Z": "title COLLATE NOCASE ASC",\ "Ordre Alpha Z-A": "title COLLATE NOCASE DESC"} if "tri" in __params__.keys(): tri = int(__params__["tri"]) else: dialog = xbmcgui.Dialog() tri = dialog.select("Selectionner le tri", list(dictTri.keys()), 0, 0) if tri != -1: __params__["tri"] = str(tri) sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id \ FROM tvshow as m WHERE m.genre NOT LIKE '%%Documentaire%%' ORDER BY " + dictTri[list(dictTri.keys())[tri]]\ + " LIMIT {} OFFSET {}".format(limit, offset) movies = extractMedias(sql=sql) #=============================================================================================================================================================================== elif __params__["famille"] == "Année(s)": if "an" in __params__.keys(): an = [int(x) for x in __params__["an"].split(":")] else: dialog = xbmcgui.Dialog() d = dialog.input("Entrer Année => 2010 / Groupe d'années => 2010:2014", type=xbmcgui.INPUT_ALPHANUM) if d: an = d.split(":") __params__["an"] = ":".join([str(x) for x in an]) if len(an) == 1: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id \ FROM tvshow as m WHERE m.year={}".format(an[0]) + " LIMIT {} OFFSET {}".format(limit, offset) else: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id \ FROM tvshow as m WHERE m.year>={} and m.year<={}".format(an[0], an[1]) + " LIMIT {} OFFSET {}".format(limit, offset) movies = extractMedias(sql=sql) #=============================================================================================================================================================================== elif __params__["famille"] == "Genre(s)": mdb = TMDB(__keyTMDB__) tabGenre = mdb.getGenre(typM="tv") if "genre" in __params__.keys(): genres = [int(x) for x in __params__["genre"].split("*")] else: dialog = xbmcgui.Dialog() genres = dialog.multiselect("Selectionner le/les genre(s)", tabGenre, preselect=[]) if genres: __params__["genre"] = "*".join([str(x) for x in genres]) genresOk = " or ".join(["m.genre LIKE '%%%s%%'" %tabGenre[x] for x in genres]) sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id \ FROM tvshow as m WHERE " + genresOk + " LIMIT {} OFFSET {}".format(limit, offset) movies = extractMedias(sql=sql) #==================================================================================================================================================================================== elif __params__["famille"] == "genres": rechGenre = __params__["typgenre"] if '#' in rechGenre: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id \ FROM tvshow as m WHERE m.numId IN (SELECT f.numId FROM tvshowFamille as f WHERE f.famille='{}')".format(rechGenre)\ + orderDefault + " LIMIT {} OFFSET {}".format(limit, offset) else: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id \ FROM tvshow as m WHERE m.genre LIKE '%%{}%%' ORDER BY m.id DESC".format(rechGenre) + " LIMIT {} OFFSET {}".format( limit, offset) movies = extractMedias(sql=sql) #=============================================================================================================================================================================== elif __params__["famille"] == "#Documentaires": sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id \ FROM tvshow as m WHERE m.genre like '%%Documentaire%%'" \ + orderDefault + " LIMIT {} OFFSET {}".format(limit, offset) __params__["offset"] = offset movies = extractMedias(sql=sql) #=============================================================================================================================================================================== elif __params__["famille"] in ["Groupes Contrib", "Listes des contributeurs"]: sql = "SELECT DISTINCT groupeParent FROM tvshowGroupe" listeRep = extractMedias(sql=sql, unique=1) choix = [(x, {"action":"MenuSerie", "groupe": x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in sorted(listeRep)] addCategorieMedia(choix) #==================================================================================================================================================================================== elif __params__["famille"] == "Listes Trakt": listeRep = list(widget.getListesT("show")) listeRep = list(set([x[3] for x in listeRep])) choix = [(x, {"action":"MenuSerie", "listeT": x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in sorted(listeRep)] addCategorieMedia(choix) #==================================================================================================================================================================================== elif __params__["famille"] == "Spécial Widget": sql = "SELECT DISTINCT groupeParent FROM tvshowTrakt" listeRep = extractMedias(sql=sql, unique=1) choix = [(x, {"action":"MenuSerie", "trakt": x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in sorted(listeRep)] addCategorieMedia(choix) elif __params__["famille"] in ["Derniers Ajouts Series", "Les plus populaires TMDB/trakt", "#Series", "#Best TMDB-TRAKT"]: dictCf ={"Derniers Ajouts Series": "#Series", "Les plus populaires TMDB/trakt": "#Best TMDB-TRAKT", "#Series": "#Series", "#Best TMDB-TRAKT": "#Best TMDB-TRAKT"} sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id \ FROM tvshow as m WHERE m.numId IN (SELECT f.numId FROM tvshowFamille as f WHERE f.famille='{}')".format(dictCf[__params__["famille"]])\ + orderDefault + " LIMIT {} OFFSET {}".format(limit, offset) movies = extractMedias(sql=sql) __params__["offset"] = offset else: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id \ FROM tvshow as m WHERE m.numId IN (SELECT f.numId FROM tvshowFamille as f WHERE f.famille='{}')".format(__params__["famille"])\ + orderDefault + " LIMIT {} OFFSET {}".format(limit, offset) movies = extractMedias(sql=sql) __params__["offset"] = offset affMovies(typM, movies, params=__params__) #=============================================================================================================================================================================== if "groupe" in __params__.keys(): sql = "SELECT groupeFille FROM tvshowGroupe WHERE groupeParent='{}'".format(__params__["groupe"].replace("'", "''")) listeRep = extractMedias(sql=sql, unique=1) if len(listeRep) == 1 or not listeRep: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id FROM tvshow as m \ WHERE m.numId IN (SELECT d.numId FROM tvshowGroupeDetail as d WHERE d.groupe='{}')".format(__params__["groupe"].replace("'", "''"))\ + orderDefault + " LIMIT {} OFFSET {}".format(limit, offset) movies = extractMedias(sql=sql) __params__["offset"] = offset affMovies(typM, movies, params=__params__) else: choix = [(x, {"action":"MenuSerie", "groupe": x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in sorted(listeRep)] addCategorieMedia(choix) #=============================================================================================================================================================================== elif "listeT" in __params__.keys(): if "listeTfille" in __params__.keys(): liste = list(widget.getListesT("show")) liste = [(x[0], x[1], "show") for x in liste if x[3] == __params__["listeT"] and x[4] == __params__["listeTfille"]][0] trk = TraktHK() tabNumId = trk.extractList(*liste) tabNumId = [x for x in tabNumId if x] sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id \ FROM tvshow as m WHERE m.numId IN ({})".format(",".join([str(x) for x in tabNumId])) movies = extractMedias(sql=sql) affMovies(typM, movies) else: listeRep = list(widget.getListesT("show")) listeRep = list(set([x[4] for x in listeRep if x[3] == __params__["listeT"]])) choix = [(x, {"action":"MenuSerie", "listeT": __params__["listeT"], "listeTfille": x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in sorted(listeRep)] addCategorieMedia(choix) #=============================================================================================================================================================================== elif "listeV" in __params__.keys(): tabNumId = widget.getListesVdetail(__params__["listeV"], "tvshow") sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id \ FROM tvshow as m WHERE m.numId IN ({})".format(",".join([str(x) for x in tabNumId]))\ + " LIMIT {} OFFSET {}".format(limit, offset) __params__["offset"] = offset movies = extractMedias(sql=sql) affMovies(typM, movies) #=============================================================================================================================================================================== elif "trakt" in __params__.keys(): sql = "SELECT groupeFille FROM tvshowTrakt WHERE groupeParent='{}'".format(__params__["trakt"].replace("'", "''")) listeRep = extractMedias(sql=sql, unique=1) if len(listeRep) == 1 or not listeRep: if listeRep: gr = listeRep[0].replace("'", "''") else: gr = __params__["trakt"].replace("'", "''") tabMovies = [] sql = "SELECT d.numId FROM tvshowTraktDetail as d WHERE d.groupe='{}' ORDER BY d.id ASC".format(gr) listeNumId = extractMedias(sql=sql, unique=1) for n in listeNumId: if n: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id FROM tvshow as m \ WHERE m.numId={}".format(n) movies = extractMedias(sql=sql) if movies: tabMovies.append(movies[0]) __params__["offset"] = offset affMovies(typM, tabMovies, params=__params__) ''' sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.id FROM tvshow as m \ WHERE m.numId IN (SELECT d.numId FROM tvshowTraktDetail as d WHERE d.groupe='{}')".format(__params__["trakt"].replace("'", "''"))\ + " LIMIT {} OFFSET {}".format(limit, offset) movies = extractMedias(sql=sql) __params__["offset"] = offset affMovies(typM, movies, params=__params__) ''' else: choix = [(x, {"action":"MenuSerie", "trakt": x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/%s' %getImageWidget(x), x) for x in sorted(listeRep)] addCategorieMedia(choix) #=============================================================================================================================================================================== elif "widget" in __params__.keys(): sql = widget.getListe(__params__["widget"], "serie") movies = extractMedias(sql=sql) affMovies(typM, movies) #=============================================================================================================================================================================== elif "typfamille" in __params__.keys(): sql = "SELECT nom FROM tvshowTypeFamille WHERE typFamille='{}'".format(__params__["typfamille"]) listeRepData = extractMedias(sql=sql, unique=1) choix = [(x, {"action":"MenuSerie", "famille":x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/%s.png' %x, x) for x in listeRepData] addCategorieMedia(choix) #=============================================================================================================================================================================== elif "network" in __params__.keys(): if "namenetwork" in __params__.keys(): sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id \ FROM tvshow as m WHERE m.numId IN (SELECT f.numId FROM tvshowNetworkDetail as f WHERE f.network='{}')".format(__params__["namenetwork"])\ + orderDefault + " LIMIT {} OFFSET {}".format(limit, offset) movies = extractMedias(sql=sql) __params__["offset"] = offset #__params__["content"] = "images" affMovies(typM, movies, params=__params__) else: sql = "SELECT network, poster FROM tvshowNetwork" listeRepData = extractMedias(sql=sql) choix = [(x[0], {"action":"MenuSerie", "network":"1", "namenetwork": x[0]}, "http://image.tmdb.org/t/p/w500%s" %x[1], x[0]) for x in listeRepData] addCategorieMedia(choix) #=============================================================================================================================================================================== else: #sql = "SELECT DISTINCT famille FROM tvshowFamille" #listeRep = extractMedias(sql=sql, unique=1) sql = "SELECT DISTINCT typFamille FROM tvshowTypeFamille" listeRepData = [x for x in extractMedias(sql=sql, unique=1) if x not in ["filtre", "genre", "out"]] listeRep = [] for cat in ["Derniers Ajouts", "Derniers Ajouts Series", "Nouveautés par année", "Les plus populaires TMDB/trakt", "Mon historique", "Mes favoris HK", "Liste Aléatoire",\ "Listes lecture", "Listes Trakt", "Listes des contributeurs", "Spécial Widget", "Mes Widgets"]: listeRep.append(cat) choix = [("On continue....", {"action":"suiteSerie"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', "Liste des episodes des series en cours....")] choix += [(x, {"action":"MenuSerie", "famille":x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/%s.png' %x, x) for x in listeRep] choix += [(x, {"action":"MenuSerie", "typfamille":x}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/%s.png' %x, x) for x in listeRepData] choix += [("Diffuseurs", {"action":"MenuSerie", "network":"1"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', "Diffuseur")] choix.append(("Filtres", {"action":"filtres", "typM": "tvshow"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/Filtres.png', "Filtres")) choix.append(("Genres", {"action":"genres", "typM": "tvshow"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', "Genres")) addCategorieMedia(choix) def getImageWidget(x): if "Netflix" in x: im = "netflix.jpg" elif "Prime" in x: im = "primevideo.jpg" elif "CANAL" in x: im = "canalplus.jpg" elif "HBO" in x: im = "hbomax.jpg" elif "Disney" in x: im = "disneyplus.jpg" elif "Apple" in x: im = "appletv.jpg" elif "ARTE" in x: im = "ARTE.jpg" elif "SALTO" in x: im = "salto.jpg" else: im = "groupe.png" return im def addCategorieMedia(choix): dictChoix = {"Derniers Ajouts": "Liste des derniers, tous types, entrés dans pastebin (nouveauté ou pas, et update)", "Derniers Ajouts Films": "Liste ,uniquement ,films des derniers entrés dans pastebin (nouveauté ou pas, et update)",\ "Mon historique": "La liste de médias commencés ou la reprise est possible", "Liste Aléatoire": "Liste de médias sortis au hazard, quand tu ne sais pas quoi regarder",\ "Recherche": "ben recherche ..... (astuce le _ remplace n'importe quel caractére ex search => r_m donnera tous les titres qui contiennent ram, rom , rum etc... Interressant pour la recherche simultanées de 'e é è ê'",\ "Groupes Contrib": "Les groupes classés des conributeurs", "Les plus populaires TMDB/trakt": "Les mieux notés ou populaires chez movieDB et trakt",\ "Année(s)": "Recherche par années ex 2010 ou groupe d'années 2010:2013 => tous les medias 2010 2011 2012 2013", "Genre(s)": "vos genres préféres avec le multi choix ex choix sience-fiction et fantatisque => la liste gardera les 2 genres", \ "Alpha(s)": "liste suivant dont le titre commence pas la lettre choisie ou le debut du mot ex ram donnera tous les titres commencant par ram (astuce le _ remplace n'importe quel caractére", "Filtres": "les filtres search, Alphanumérique, genres, Années etc.... Cela est fait sur ensemble des médias", "Nouveautés par année": "Classement medias par ordre d'année ensuite par date entrée dans pastebin (Documentaires, concerts, spectacles, animations exclues)", "Les mieux notés": "Classement notation descroissant > 7 et < 9", "Mes favoris HK": "liste de vos favoris"} content = "" try: if "network" in choix[0][1].keys(): nomF = "Diffuseurs" #content = "files" else: nomF = "Choix Gatégories" except: nomF = "Choix Gatégories" xbmcplugin.setPluginCategory(__handle__, nomF) if content: xbmcplugin.setContent(__handle__, content) addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") isFolder = True for ch in choix: name, parameters, picture, texte = ch if texte in dictChoix.keys(): texte = dictChoix[texte] li = xbmcgui.ListItem(label=name) updateMinimalInfoTagVideo(li,name,texte) if "http" not in picture and not os.path.isfile(xbmcvfs.translatePath(picture)): picture = 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png' li.setArt({'thumb': picture, #'icon': addon.getAddonInfo('icon'), 'fanart': addon.getAddonInfo('fanart')}) url = sys.argv[0] + '?' + urlencode(parameters) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=li, isFolder=isFolder) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True, cacheToDisc=True) def filtres(params): typM = params["typM"] dictChoix = {"Derniers Ajours": "Liste des derniers entrés dans pastebin (nouveautée ou pas)", "Last View": "La liste de médias commencés ou la reprise est possible", "Liste Aléatoire": "Liste de médias sortis au hazard, quand tu ne sais pas quoi regarder",\ "Search": "ben recherche ..... (astuce le _ remplace n'importe quel caractére ex search => r_m donnera tous les titres qui contiennent ram, rom , rum etc... Interressant pour la recherche simultanées de 'e é è ê'",\ "Groupes Contrib": "Les groupes classés des conributeurs",\ "Année(s)": "Recherche par années ex 2010 ou groupe d'années 2010:2013 => tous les medias 2010 2011 2012 2013", "Genre(s)": "vos genres préféres avec le multi choix ex choix sience-fiction et fantatisque => la liste gardera les 2 genres", \ "Alpha(s)": "liste suivant dont le titre commence pas la lettre choisie ou le debut du mot ex ram donnera tous les titres commencant par ram (astuce le _ remplace n'importe quel caractére"} filtres = ["Année(s)" , "Recherche", "Genre(s)", "Alpha(s)", "Multi-Tri"] xbmcplugin.setPluginCategory(__handle__, "Choix Filtres") addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") isFolder = True if typM == "movie": sql = "SELECT nom FROM movieTypeFamille WHERE typFamille='filtre'" liste = extractMedias(sql=sql) liste = sorted([x[0] for x in liste]) filtres += liste choix = [(x, {"action":"MenuFilm", "famille":x, "offset": 0, "limit": int(getNbMedias())}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in filtres] else: choix = [(x, {"action":"MenuSerie", "famille":x, "offset": 0, "limit": int(getNbMedias())}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in filtres] for ch in sorted(choix): name, parameters, picture, texte = ch if texte in dictChoix.keys(): texte = dictChoix[texte] li = xbmcgui.ListItem(label=name) updateMinimalInfoTagVideo(li,name,texte) li.setArt({'thumb': picture, 'icon': addon.getAddonInfo('icon'), 'fanart': addon.getAddonInfo('fanart')}) url = sys.argv[0] + '?' + urlencode(parameters) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=li, isFolder=isFolder) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) def affSearch(): xbmcplugin.setPluginCategory(__handle__, "Recherches...") addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") isFolder = True if __addon__.getSetting("actifnewpaste") != "false": choix = [("Recherche Films", {"action":"mediasHKFilms", "famille": "Search", "offset": 0, "limit": int(getNbMedias())}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/search.png', "ben recherche ....."), ("Recherche Series", {"action":"mediasHKSeries", "famille": "Search", "offset": 0, "limit": int(getNbMedias())}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/search.png', "ben recherche ....."), ("Recherche Globale", {"action": "affGlobalHK2"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/search.png', "ben recherche ....."), ("Recherche Acteurs", {"action": "affSearchCast"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/search.png', "ben recherche ....."), ] else: choix = [("Recherche Films", {"action":"MenuFilm", "famille": "Search", "offset": 0, "limit": int(getNbMedias())}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/search.png', "ben recherche ....."), ("Recherche Series", {"action":"MenuSerie", "famille": "Search", "offset": 0, "limit": int(getNbMedias())}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/search.png', "ben recherche ....."), ("Recherche Globale", {"action":"affGlobal"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/search.png', "ben recherche ....."), ("Recherche Acteurs", {"action":"affSearchCast"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/search.png', "ben recherche ....."), ("Recherche Uptobox", {"action":"rechercheUpto", "offset": 0}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/search.png', "Recherche compte uptobox"), ] for ch in sorted(choix): name, parameters, picture, texte = ch li = xbmcgui.ListItem(label=name) updateMinimalInfoTagVideo(li,name,texte) li.setArt({'thumb': picture, 'icon': addon.getAddonInfo('icon'), 'fanart': addon.getAddonInfo('fanart')}) url = sys.argv[0] + '?' + urlencode(parameters) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=li, isFolder=isFolder) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) def affGlobal(): dictTyp = {"movie": "Film", "tvshow": "Serie"} dialog = xbmcgui.Dialog() d = dialog.input("Recherche (mini 3 lettres)", type=xbmcgui.INPUT_ALPHANUM, defaultt="") if len(d) > 2: sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id \ FROM tvshow as m WHERE normalizeTitle(m.title) LIKE normalizeTitle({}) ORDER BY title COLLATE NOCASE ASC".format("'%" + str(d).replace("'", "''") + "%'") tvshows = extractMedias(sql=sql) sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE normalizeTitle(m.title) LIKE normalizeTitle({}) ORDER BY title COLLATE NOCASE ASC".format("'%" + str(d).replace("'", "''") + "%'") movies = extractMedias(sql=sql) xbmcplugin.setContent(__handle__, 'movies') for typM, medias in [("movie", movies), ("tvshow", tvshows)]: try: dictLogos, dictArt, dictLogosTMDB = logos(typM) except: dictLogos, dictArt, dictLogosTMDB = {}, {}, {} i = 0 for i, media in enumerate(medias): media = Media(typM, *media[:-1]) if typM == "movie": urlFanart = "http://assets.fanart.tv/fanart/movie/" else: urlFanart = "http://assets.fanart.tv/fanart/tv/" if int(media.numId) in dictLogos.keys(): media.clearlogo = urlFanart + dictLogos[int(media.numId)] else: if int(media.numId) in dictLogosTMDB.keys(): media.clearlogo = "http://image.tmdb.org/t/p/w300" + dictLogosTMDB[int(media.numId)] else: media.clearlogo = "" if int(media.numId) in dictArt.keys(): media.clearart = urlFanart + dictArt[int(media.numId)] else: media.clearart = "" media.title = "%s (%s)" %(media.title, dictTyp[typM]) if typM == "movie": ok = addDirectoryFilms("%s" %(media.title), isFolder=True, parameters={"action": "detailM", "lien": media.link, "u2p": media.numId}, media=media) else: ok = addDirectoryFilms("%s" %(media.title), isFolder=True, parameters={"action": "detailT", "lien": media.link, "u2p": media.numId}, media=media) xbmcplugin.addSortMethod(__handle__, xbmcplugin.SORT_METHOD_UNSORTED) xbmcplugin.addSortMethod(__handle__, xbmcplugin.SORT_METHOD_VIDEO_YEAR) xbmcplugin.addSortMethod(__handle__, xbmcplugin.SORT_METHOD_TITLE) xbmcplugin.addSortMethod(__handle__, xbmcplugin.SORT_METHOD_VIDEO_RATING) #xbmcplugin.addSortMethod(__handle__, xbmcplugin.SORT_METHOD_LABEL_IGNORE_FOLDERS, labelMask="Page Suivante") if i == (int(getNbMedias()) -1): addDirNext(params) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True, cacheToDisc=True) def genres(params): typM = params["typM"] xbmcplugin.setPluginCategory(__handle__, "Choix Genres") addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") isFolder = True if typM == "movie": mdb = TMDB(__keyTMDB__) tabGenre = mdb.getGenre() sql = "SELECT nom FROM movieTypeFamille WHERE typFamille='genre'" liste = extractMedias(sql=sql) tabGenre += sorted([x[0] for x in liste]) choix = [(x, {"action":"MenuFilm", "famille": "genres", "typgenre": x, "offset": 0, "limit": int(getNbMedias())}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in tabGenre] else: mdb = TMDB(__keyTMDB__) tabGenre = mdb.getGenre(typM="tv") sql = "SELECT nom FROM tvshowTypeFamille WHERE typFamille='genre'" liste = extractMedias(sql=sql) tabGenre += sorted([x[0] for x in liste]) choix = [(x, {"action":"MenuSerie", "famille": "genres", "typgenre": x, "offset": 0, "limit": int(getNbMedias())}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/groupe.png', x) for x in tabGenre] for ch in sorted(choix): name, parameters, picture, texte = ch li = xbmcgui.ListItem(label=name) updateMinimalInfoTagVideo(li,name,texte) li.setArt({'thumb': picture, 'icon': addon.getAddonInfo('icon'), 'fanart': addon.getAddonInfo('fanart')}) url = sys.argv[0] + '?' + urlencode(parameters) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=li, isFolder=isFolder) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) def affSearchCast(params={}): if "nom" in params.keys(): d = params["nom"].replace("recherche", "") else: dialog = xbmcgui.Dialog() d = dialog.input("Recherche (mini 3 lettres)", type=xbmcgui.INPUT_ALPHANUM, defaultt="") if len(d) > 2: xbmcplugin.setPluginCategory(__handle__, "Acteurs") xbmcplugin.setContent(__handle__, 'files') mdb = TMDB(__keyTMDB__) liste = mdb.searchPerson(d) notice(liste) for l in liste: media = Media("cast", *l) media.typeMedia = "movie" if __addon__.getSetting("actifnewpaste") != "false": addDirectoryFilms("%s" %(media.title), isFolder=True, parameters={"action":"mediasHKFilms", "famille": "cast", "u2p": media.numId}, media=media) else: addDirectoryFilms("%s" %(media.title), isFolder=True, parameters={"action":"MenuFilm", "famille": "cast", "u2p": media.numId}, media=media) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) def affCast2(params): numId = params["u2p"] typM = xbmc.getInfoLabel('ListItem.DBTYPE') if not typM: xbmc.executebuiltin("Dialog.Close(busydialog)") xbmc.sleep(500) typM = xbmc.getInfoLabel('ListItem.DBTYPE') if typM == "": typM = params["typM"] xbmcplugin.setPluginCategory(__handle__, "Acteurs / Réalisateur") xbmcplugin.setContent(__handle__, 'files') mdb = TMDB(__keyTMDB__) if typM == "movie": liste = mdb.castFilm(numId) if __addon__.getSetting("actifnewpaste") != "false": menu = "mediasHKFilms" elif __addon__.getSetting("actifhk") != "false": menu = "MenuFilm" else: liste = mdb.castSerie(numId) if __addon__.getSetting("actifnewpaste") != "false": menu = "mediasHKSeries" elif __addon__.getSetting("actifhk") != "false": menu = "MenuSerie" for l in liste: media = Media("cast", *l) media.typeMedia = typM addDirectoryFilms("%s" %(media.title), isFolder=True, parameters={"action": menu, "famille": "cast", "u2p": media.numId}, media=media) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) def addDirNext(params): isFolder = True addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") #notice("id addon " + str(addon.getAddonInfo("id"))) li = xbmcgui.ListItem(label="[COLOR red]Page Suivante[/COLOR]") updateEmptyInfoTag(li) li.setArt({ 'thumb': 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/next.png', 'icon': addon.getAddonInfo('icon'), 'fanart': addon.getAddonInfo('fanart'), }) #commands = [] try: params["offset"] = str(int(params["offset"]) + int(getNbMedias())) except: pass url = sys.argv[0] + '?' + urlencode(params) return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=li, isFolder=isFolder) def audioHKOld(): movies = extractMedias(sql=sql) affMovies("audiobook", movies) def audioHK(): typM = "audioBook" xbmcplugin.setPluginCategory(__handle__, "Livres Audio") xbmcplugin.setContent(__handle__, 'movies') sql = "SELECT auteur, titre, numId, description, poster, link, '' FROM audioBook ORDER BY id DESC"#auteur ASC, titre ASC " movies = extractMedias(sql=sql) for movie in movies: media = Media("audiobook", *movie) media.typeMedia = typM addDirectoryEbook("%s" %(media.title), isFolder=False, parameters={"action": "playHK", "lien": media.link, "u2p": media.numId, "typMedia": media.typeMedia}, media=media) xbmcplugin.addSortMethod(__handle__, xbmcplugin.SORT_METHOD_UNSORTED) xbmcplugin.addSortMethod(__handle__, xbmcplugin.SORT_METHOD_TITLE) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) def addDirectoryEbook(name, isFolder=True, parameters={}, media="" ): ''' Add a list item to the XBMC UI.''' addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") li = xbmcgui.ListItem(label=("%s" %(name))) #Suppression du If qui faisait la meme chose... updateInfoTagVideo(li,media,False,False,False,False,False) li.setArt({'icon': media.backdrop, 'thumb': media.poster,}) li.setProperty('IsPlayable', 'true') url = sys.argv[0] + '?' + urlencode(parameters) return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=li, isFolder=isFolder) def logos(typM): if typM == "movie": #clearlogo clearart fanart sql = "SELECT numId, logo, clearart FROM movieFanart" logos = extractMedias(sql=sql) dictLogos = {x[0]: x[1] for x in logos if x[1]} dictArt = {x[0]: x[2] for x in logos if x[2]} #clearlogo tmdb sql = "SELECT numId, logo FROM movieTMDBlogo" logos = extractMedias(sql=sql) dictLogosTMDB = {x[0]: x[1] for x in logos} else: sql = "SELECT numId, logo, clearart FROM tvshowFanart" logos = extractMedias(sql=sql) dictLogos = {x[0]: x[1] for x in logos if x[1]} dictArt = {x[0]: x[2] for x in logos if x[2]} #clearlogo tmdb sql = "SELECT numId, logo FROM tvshowTMDBlogo" logos = extractMedias(sql=sql) dictLogosTMDB = {x[0]: x[1] for x in logos} return dictLogos, dictArt, dictLogosTMDB def affMovies(typM, medias, params=""): if params and "favs" in params.keys(): nom = widget.getProfil(params["favs"]) xbmcplugin.setPluginCategory(__handle__, "Favs: %s" %nom) else: xbmcplugin.setPluginCategory(__handle__, typM) try: if medias[0][4] == "divers": xbmcplugin.setContent(__handle__, 'files') elif typM == "movie": xbmcplugin.setContent(__handle__, 'movies') else: xbmcplugin.setContent(__handle__, 'tvshows') except: xbmcplugin.setContent(__handle__, 'movies') if __addon__.getSetting("actifhk") != "false": dictLogos, dictArt, dictLogosTMDB = logos(typM) else: dictLogos, dictArt, dictLogosTMDB = {}, {}, {} i = 0 for i, media in enumerate(medias): try: media = Media(typM, *media[:-1]) except: media = Media(typM, *media) if typM == "saga": addDirectoryFilms(media.title, isFolder=True, parameters={"action":"MenuFilm", "famille": "sagaListe", "numIdSaga": media.numId}, media=media) else: if media.numId == "divers": media.numId = 0 addDirectoryFilms("%s (%s)" %(media.title, media.year), isFolder=False, parameters={"action": "playHK", "lien": media.link, "u2p": media.numId}, media=media) else: if typM == "movie": urlFanart = "http://assets.fanart.tv/fanart/movie/" else: urlFanart = "http://assets.fanart.tv/fanart/tv/" if int(media.numId) in dictLogos.keys(): media.clearlogo = urlFanart + dictLogos[int(media.numId)] else: if int(media.numId) in dictLogosTMDB.keys(): media.clearlogo = "http://image.tmdb.org/t/p/w300" + dictLogosTMDB[int(media.numId)] else: media.clearlogo = "" if int(media.numId) in dictArt.keys(): media.clearart = urlFanart + dictArt[int(media.numId)] else: media.clearart = "" if typM == "movie": ok = addDirectoryFilms("%s" %(media.title), isFolder=True, parameters={"action": "detailM", "lien": media.link, "u2p": media.numId}, media=media) ''' if __addon__.getSetting("newfen") == "false": ok = addDirectoryFilms("%s" %(media.title), isFolder=True, parameters={"action": "detailM", "lien": media.link, "u2p": media.numId}, media=media) else: ok = addDirectoryFilms("%s" %(media.title), isFolder=True, parameters={"action": "visuFenmovie", "lien": media.link, "u2p": media.numId, 'title': media.title}, media=media) ''' elif typM == "audiobook": ok = addDirectoryFilms(media.title, isFolder=True, parameters={"action": "play", "lien": media.link, "u2p": media.numId}, media=media) else: if __addon__.getSetting("actifnewpaste") != "false": ok = addDirectoryFilms("%s" %(media.title), isFolder=True, parameters={"action": "affSaisonUptofoldercrypt", "u2p": media.numId}, media=media) else: ok = addDirectoryFilms("%s" %(media.title), isFolder=True, parameters={"action": "detailT", "lien": media.link, "u2p": media.numId}, media=media) xbmcplugin.addSortMethod(__handle__, xbmcplugin.SORT_METHOD_UNSORTED) xbmcplugin.addSortMethod(__handle__, xbmcplugin.SORT_METHOD_VIDEO_YEAR) xbmcplugin.addSortMethod(__handle__, xbmcplugin.SORT_METHOD_TITLE) xbmcplugin.addSortMethod(__handle__, xbmcplugin.SORT_METHOD_VIDEO_RATING) #xbmcplugin.addSortMethod(__handle__, xbmcplugin.SORT_METHOD_LABEL_IGNORE_FOLDERS, labelMask="Page Suivante") if i >= (int(getNbMedias())) - 1: addDirNext(params) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True, cacheToDisc=True) def addDirectoryFilms(name, isFolder=True, parameters={}, media="" ): ''' Add a list item to the XBMC UI.''' addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") li = xbmcgui.ListItem(label=name) updateInfoTagVideo(li,media,True,False,True,False,False) #liz.setPath("plugin://%s/play/%s" % (ADDON.getAddonInfo("id"),urllib.quote(url, safe='')) ) li.setArt({'icon': media.backdrop, 'thumb': media.poster, 'poster': media.poster, #'icon': addon.getAddonInfo('icon'), 'fanart': media.backdrop}) if media.clearlogo : li.setArt({'clearlogo': media.clearlogo}) if media.clearart : li.setArt({'clearart': media.clearart}) commands = [] #commands.append(("Ajout Fav'S HK", 'RunPlugin(plugin://plugin.video.sendtokodiU2P/?action=fav&mode=ajout&u2p=%s&typM=movies)' %media.numId, )) commands.append(('[COLOR yellow]Bande Annonce[/COLOR]', 'RunPlugin(plugin://plugin.video.sendtokodiU2P/?action=ba&u2p=%s&typM=%s)' %(media.numId, media.typeMedia))) if media.typeMedia == "movie": #trk = actifTrakt() #if trk and media.typeMedia == "movie": # commands.append(('[COLOR yellow]Cocher Vu dans Trakt[/COLOR]', 'ActivateWindow(10025,plugin://plugin.video.sendtokodiU2P/?action=vuMovieTrakt&u2p=%s,return)' %media.numId)) commands.append(('[COLOR yellow]Recherche[/COLOR]', 'ActivateWindow(10025,plugin://plugin.video.sendtokodiU2P/?action=MenuFilm&famille=Search,return)')) sch = 'ActivateWindow(10025,plugin://plugin.video.sendtokodiU2P/?action=MenuFilm&famille=Search,return)' else: commands.append(('[COLOR yellow]Recherche[/COLOR]', 'ActivateWindow(10025,plugin://plugin.video.sendtokodiU2P/?action=MenuSerie&famille=Search,return)')) sch = 'ActivateWindow(10025,plugin://plugin.video.sendtokodiU2P/?action=MenuSerie&famille=Search,return)' commands.append(('[COLOR yellow]Gestion[/COLOR]', 'RunPlugin(plugin://plugin.video.sendtokodiU2P/?action=gestionMedia&u2p=%s&typM=%s)'%(media.numId, media.typeMedia))) commands.append(('[COLOR yellow]Choix Profil[/COLOR]', 'RunPlugin(plugin://plugin.video.sendtokodiU2P/?action=actifPm)')) commands.append(('[COLOR yellow]Reload Skin[/COLOR]', 'RunPlugin(plugin://plugin.video.sendtokodiU2P/?action=rlk)')) #commands.append(("[COLOR yellow]Refresh[/COLOR]", "Container.Refresh")) li.addContextMenuItems(commands) isWidget = xbmc.getInfoLabel('Container.PluginName') if "U2P" not in isWidget: li.setProperty('widget', 'true') if media.typeMedia == "movie": li.setProperty('widgetmovie', 'true') li.setProperty('lire', 'RunPlugin(plugin://plugin.video.sendtokodiU2P/?action=visuFenmovie&u2p=%s&title=%s&lien=%s)' %(media.numId, media.title, media.link)) #li.setProperty('lire', 'RunPlugin(plugin://plugin.video.sendtokodiU2P/?action=playHK&u2p=%s&typM=%s&lien=%s)' %(media.numId, media.typeMedia, media.link)) li.setProperty('ba', 'RunPlugin(plugin://plugin.video.sendtokodiU2P/?action=ba&u2p=%s&typM=%s)' %(media.numId, media.typeMedia)) li.setProperty('gestion', 'RunPlugin(plugin://plugin.video.sendtokodiU2P/?action=gestionMedia&u2p=%s&typM=%s)'%(media.numId, media.typeMedia)) li.setProperty('search', sch) li.setProperty('profil', 'RunPlugin(plugin://plugin.video.sendtokodiU2P/?action=actifPm)') li.setProperty("reloadSkin", 'RunPlugin(plugin://plugin.video.sendtokodiU2P/?action=rlk)') #li.setProperty("Refresh", "Container.Refresh") li.setProperty('IsPlayable', 'true') url = sys.argv[0] + '?' + urlencode(parameters) return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=li, isFolder=isFolder) def fenMovie(params): #xbmcplugin.setPluginCategory(__handle__, "Choix Pbi2kodi") #xbmcplugin.setContent(__handle__, 'files') title = params["title"] u2p = params["u2p"] try: links = params["lien"] except : links = "" #dialog = xbmcgui.Dialog() #ret = dialog.contextmenu(['Détails Informations HK', 'Lire', "Ajouter Fav's HK", "Retirer LastView"]) #notice(ret) #if ret == 0: window = FenFilmDetail(title=title, numId=u2p, links=links) # Show the created window. window.doModal() del window #elif ret == 1: # affLiens2({"u2p": u2p, "lien": links}) #xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True, cacheToDisc=False) def menuPbi(): xbmcplugin.setPluginCategory(__handle__, "Choix U2Pplay") xbmcplugin.setContent(__handle__, 'files') listeChoix = [("01. Médiathéques", {"action":"movies"}, "pastebin.png", "Mediathéque Pastebin"), ("02. Import DataBase", {"action":"bd"}, "strm.png", "Import DATEBASE"), #("03. Création Listes", {"action":"createL"}, "debrid.png", "Création de liste Widget"), #("04. Suppréssion Listes", {"action":"supL"}, "debrid.png", "Suppréssion de liste Widget"), #("05. Création Listes lecture", {"action":"createLV"}, "debrid.png", "Création de liste lecture perso ou favoris"), #("06. Suppréssion Listes lecture", {"action":"suppLV"}, "debrid.png", "Suppréssion de liste lecture perso ou favoris"), #("07. Création Listes Trakt", {"action":"createLT"}, "debrid.png", "Importation et Création liste trakt"), #("08. Suppréssion Listes/Groupe Trakt", {"action":"suppLT"}, "debrid.png", "Suppréssion liste/groupe trakt"), #("09. Création Listes Paste(s)", {"action":"createLP"}, "liste.png", "Création liste avec index paste(s)"), #("10. Suppréssion Listes Paste(s)", {"action":"suppLP"}, "liste.png", "Suppréssion liste avec index paste(s)"), #("11. Création Listes TMDB", {"action":"createLTMDB"}, "liste.png", "Création liste avec numéro TMDB"), #("12. Suppréssion Listes TMDB", {"action":"suppLTMDB"}, "liste.png", "Suppréssion liste TMDB"), #("13. Création Accés Repertoire Uptobox", {"action":"createRUPTO"}, "liste.png", "Création Accés repertoire Uptobox"), #("14. Création Accés Repertoire Publique Uptobox", {"action":"createRUPTOP"}, "liste.png", "Création Accés repertoire Publique Uptobox"), ] # mon iptv if __addon__.getSetting("iptv") != "false" and vIPTV: listeChoix.append(("03. IPTV", {"action":"iptvLoad"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/iptv.png', "iptv")) #strms if __addon__.getSetting("actifStrm") != "false" and vIPTV: listeChoix.append(("04. Import STRMS Maj", {"action":"strms"}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/liste.png', "Création strms via listes perso")) listeChoix.append(("20. Import Config", {"action":"impHK"}, "debrid.png", "Importation d'un config prefaite via rentry")) for choix in listeChoix: addDirectoryItemLocal(choix[0], isFolder=True, parameters=choix[1], picture=choix[2], texte=choix[3]) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) def addDirectoryItemLocal(name, isFolder=True, parameters={}, picture="", texte="" ): ''' Add a list item to the XBMC UI.''' addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") li = xbmcgui.ListItem(label=name) updateMinimalInfoTagVideo(li, name, texte) #Overlay ne dispose de method sur le getVideoInfoTag ... #li.setInfo('video', {"title": name, 'plot': texte, 'mediatype': 'video', "overlay": 6}) #'playcount':0, "status": "Continuing" li.setArt({'thumb': 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/%s' %picture, 'icon': addon.getAddonInfo('icon'), #'icon': addon.getAddonInfo('icon'), 'fanart': addon.getAddonInfo('fanart')}) url = sys.argv[0] + '?' + urlencode(parameters) return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=li, isFolder=isFolder) def selectOS(): addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") choixOs = ["WIN (D:\\kodiBase\\)", "LIBREELEC (/storage/downloads/Kodi/)", "ANDROID (/storage/emulated/0/kodiBase/)", "LINUX (/home/(user)/kodiBase/)", "XBOX (U:\\Users\\UserMgr0\\AppData\\Local\\Packages\\XBMCFoundation.Kodi_4n2hpmxwrvr6p\\LocalState\\userdata\\)", 'LOCAL', "Mon repertoire"] dialogOS = xbmcgui.Dialog() selectedOS = dialogOS.select("Choix OS", choixOs) if selectedOS != -1: if selectedOS == len(choixOs) - 1: osVersion = addon.getSetting("osVersion") dialogPaste = xbmcgui.Dialog() d = dialogPaste.input("Repertoire STRM", type=xbmcgui.INPUT_ALPHANUM, defaultt=osVersion) addon.setSetting(id="osVersion", value=d) else: addon.setSetting(id="osVersion", value=choixOs[selectedOS]) def configKeysApi(): addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") dictApi = {"Uptobox": ["keyupto", "Key Api Uptobox"], "Alldebrid": ["keyalldebrid", "Key Api Alldebrid"], "RealDebrid": ["keyrealdebrid", "Key Api RealDebrid"]} choixApi = list(dictApi.keys()) dialogApi = xbmcgui.Dialog() selectedApi = dialogApi.select("Choix Debrideurs", choixApi) if selectedApi != -1: key = addon.getSetting(dictApi[choixApi[selectedApi]][0]) d = dialogApi.input(dictApi[choixApi[selectedApi]][1], type=xbmcgui.INPUT_ALPHANUM, defaultt=key) addon.setSetting(id=dictApi[choixApi[selectedApi]][0], value=d) def makeStrmsOld(clear=0): pDialog = xbmcgui.DialogProgress() pDialog.create('Pbi2kodi', 'Extraction Paste... .') addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") osVersion = addon.getSetting("osVersion") if "WIN" in osVersion: repKodiName = "D:\\kodiBase\\" elif "LIBREELEC" in osVersion: repKodiName = "/storage/downloads/Kodi/" elif "LINUX" in osVersion: repKodiName = "/storage/downloads/Kodi/" elif "ANDROID" in osVersion: repKodiName = "/storage/emulated/0/kodiBase/" elif "XBOX" in osVersion: repKodiName = "U:\\Users\\UserMgr0\\AppData\\Local\\Packages\\XBMCFoundation.Kodi_4n2hpmxwrvr6p\\LocalState\\userdata\\" else: repKodiName = osVersion lePaste = addon.getSetting("paste") dictPaste = idPaste(lePaste) for nomRep, tabPaste in dictPaste.items(): notice(nomRep) paramPaste = {"tabIdPbi": tabPaste, "namePbi": 'test', "repKodiName": repKodiName, "clear": clear} pbi = Pastebin(**paramPaste) pbi.makeMovieNFO(pbi.dictFilmsPaste.values(), clear=clear, progress=pDialog, nomRep=nomRep) pDialog.update(0, 'SERIES') pbi.makeSerieNFO(pbi.dictSeriesPaste.values(), clear=clear, progress=pDialog, nomRep=nomRep) pDialog.update(0, 'ANIMES') pbi.makeAnimeNFO(pbi.dictAnimesPaste.values(), clear=clear, progress=pDialog, nomRep=nomRep) pDialog.update(0, 'DIVERS') pbi.makeDiversNFO(pbi.dictDiversPaste.values(), clear=clear, progress=pDialog, nomRep=nomRep) showInfoNotification("strms créés!") return True def makeStrms(clear=0): strmC = Strm(BDMEDIANew) strmC.makeStrms(clear=clear) showInfoNotification("strms créés!") xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.Scan", "id": "1"}') fNewSerie = xbmcvfs.translatePath('special://home/addons/plugin.video.sendtokodiU2P/newserie.txt') if os.path.exists(fNewSerie): os.remove(fNewSerie) return True def idPaste(lePaste): html_parser = HTMLParser() motifAnotepad = r'.*<\s*div\s*class\s*=\s*"\s*plaintext\s*"\s*>(?P<txAnote>.+?)</div>.*' rec = requests.get("https://anotepad.com/note/read/" + lePaste, timeout=3) r = re.match(motifAnotepad, rec.text, re.MULTILINE|re.DOTALL|re.IGNORECASE) tx = r.group("txAnote") tx = html_parser.unescape(tx) dictLignes = {x.split("=")[0].strip(): [y.strip() for y in x.split("=")[1].split(",")] for x in tx.splitlines() if x and x[0] != "#"} return dictLignes def editPaste(): addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") paste = addon.getSetting("paste") dialogPaste = xbmcgui.Dialog() d = dialogPaste.input("Num AnotePad Pastes", type=xbmcgui.INPUT_ALPHANUM, defaultt=paste) addon.setSetting(id="paste", value=d) def editNbThumbnails(): addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") paste = addon.getSetting("thumbnails") dialogPaste = xbmcgui.Dialog() d = dialogPaste.input("Nombre d'images THUMBNAILS conservées (0=illimité)", type=xbmcgui.INPUT_ALPHANUM, defaultt=paste) addon.setSetting(id="thumbnails", value=d) def editResos(): addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") paste = addon.getSetting("resos") dialogPaste = xbmcgui.Dialog() d = dialogPaste.input("resos prioritaire & timing", type=xbmcgui.INPUT_ALPHANUM, defaultt=paste) addon.setSetting(id="resos", value=d) def delTag(dataBaseKodi): cnx = sqlite3.connect(dataBaseKodi) cur = cnx.cursor() cur.execute("DELETE FROM tag") cur.execute("DELETE FROM tag_link") cnx.commit() cur.close() cnx.close() def creaGroupe(): showInfoNotification("Création groupes!") addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") osVersion = addon.getSetting("osVersion") lePaste = addon.getSetting("paste") dictPaste = idPaste(lePaste) delTag(__database__) for nomRep, tabPaste in dictPaste.items(): pDialog2 = xbmcgui.DialogProgressBG() pDialog2.create('Pbi2kodi', 'Création Groupes (%s)...' %nomRep) #notice(nomRep) paramPaste = {"tabIdPbi": tabPaste, "namePbi": 'test', "repKodiName": "", "clear": 0} pbi = Pastebin(**paramPaste) pbi.UpdateGroupe(pbi.dictGroupeFilms, __database__, progress= pDialog2, gr=nomRep) #pDialog2.update(10, 'SERIES') pbi.UpdateGroupe(pbi.dictGroupeSeries, __database__, mediaType="tvshow", progress= pDialog2, gr=nomRep) pDialog2.close() showInfoNotification("Groupes créés!") def getTimeBookmark(numId, dataBaseKodi, typMedia): cnx = sqlite3.connect(dataBaseKodi) cur = cnx.cursor() if typMedia == "movie": sql = "SELECT timeInSeconds FROM bookmark WHERE idFile=(SELECT m.idFile FROM movie as m WHERE m.idMovie=%s)" %(numId) else: sql = "SELECT timeInSeconds FROM bookmark WHERE idFile=(SELECT m.idFile FROM episode as m WHERE m.idEpisode=%s)" %(numId) cur.execute(sql) seek = [x[0] for x in cur.fetchall() if x] cur.close() cnx.close() if seek: return seek[0] else: return 0 def getSeasonU2P(numId, dataBaseKodi , numEpisode): cnx = sqlite3.connect(dataBaseKodi) sql = "SELECT c18 FROM episode WHERE c19=(SELECT m.c19 FROM episode as m WHERE m.idEpisode=%s)" %(numId) cur = cnx.cursor() cur.execute(sql) tabEpisodes = sorted([x[0] for x in cur.fetchall() if x]) cur.close() cnx.close() return tabEpisodes[int(numEpisode):] def createListItemFromVideo(video): try: url = video['url'] title = video['title'] li = xbmcgui.ListItem(title, path=url) if "episode" in video.keys(): updateMinimalInfoTagVideo(li,title,None, video['episode'],video['season']) else: updateMinimalInfoTagVideo(li,title) except Exception as e: notice("Service.py - createListItemFromVideo::createListItemFromVideo " + str(e)) return li def getIDfile(f): try: cnx = sqlite3.connect(__database__) cur = cnx.cursor() sql = "SELECT idFile FROM files WHERE strFilename=? AND dateAdded IS NOT NULL" cur.execute(sql, (f,)) return cur.fetchone()[0] except Exception as e: notice("get infos file " + str(e)) def prepareUpNext(title, numId, saison, episode): #notice(episode) if __addon__.getSetting("actifnewpaste") != "false": link = uptobox.getLinkUpNext(numId, saison, int(episode) + 1) elif __addon__.getSetting("actifhk") != "false": sql = "SELECT link FROM tvshowEpisodes \ WHERE numId={} AND saison='Saison {}' AND episode=='S{}E{}'".format(numId, str(saison).zfill(2), str(saison).zfill(2), str(int(episode) + 1).zfill(2)) link = extractMedias(sql=sql, unique=1) #notice(link) next_info = {} if link: try: mdb = TMDB(__keyTMDB__) tabEpisodes = mdb.saison(numId, saison) #['Épisode 1', 'Maud Bachelet semble vivre une vie parfaite ', '2022-01-10', '/jkV6JVxXIiDujhEyFreyEo5IxUe.jpg', 0.0, 1, 1] if [x for x in tabEpisodes if x[-1] > int(episode)]: next_info["current_episode"] = [dict([ ("episodeid", x[-1]), ("tvshowid", 0), ("title", x[0]), ("art", { 'thumb': "http://image.tmdb.org/t/p/w500%s" %x[3], 'tvshow.clearart': "", 'tvshow.clearlogo': "", 'tvshow.fanart': "", 'tvshow.landscape': "http://image.tmdb.org/t/p/w500%s" %x[3], 'tvshow.poster': '', }), ("season", x[-2]), ("episode", x[-1]), ("showtitle", title), ("plot", x[1]), ("rating", x[-3]), ("firstaired", x[2])]) for x in tabEpisodes if x[-1] == int(episode)][0] next_info["next_episode"] = [dict([ ("episodeid", x[-1]), ("tvshowid", 0), ("title", x[0]), ("art", { 'thumb': "http://image.tmdb.org/t/p/w500%s" %x[3], 'tvshow.clearart': "", 'tvshow.clearlogo': "", 'tvshow.fanart': "", 'tvshow.landscape': "http://image.tmdb.org/t/p/w500%s" %x[3], 'tvshow.poster': '', }), ("season", x[-2]), ("episode", x[-1]), ("showtitle", title), ("plot", x[1]), ("rating", x[-3]), ("firstaired", x[2])]) for x in tabEpisodes if x[-1] == int(episode) + 1][0] #plugin://plugin.video.sendtokodiU2P/?action=playHK&lien=7UM9cgSAc%40yqh47Hp6a6kW%23&u2p=154454 param = {"u2p": numId, 'action': "playHKEpisode", 'lien': link[0], "title": title, 'episode': int(episode) + 1, "saison": saison, "typMedia": "episode"} urls = link[0].split("#") url = sys.argv[0] + '?' + urlencode(param) next_info["play_url"] = url #next_info["notification_time"] = 70 #notice(next_info) if next_info["next_episode"]: notice(upnext_signal("plugin.video.sendtokodiU2P", next_info)) except: pass def playEpisode(params): #notice(params) histoReso = gestionBD("getHK", params["u2p"], params["saison"]) if histoReso: resoP = histoReso[0] #notice(resoP) tabreso = ["1080", "720", "2160", "480", "4K", '360'] reso = "1080" for motif in tabreso: ch = r'(\.)(%s)p?\.' %motif r = re.search(ch, resoP, re.I) if r: reso = motif #notice(reso) if xbmc.Player().isPlaying(): tt = 0 for x in range(3): tt = xbmc.Player().getTotalTime() if tt: break time.sleep(0.1) t = xbmc.Player().getTime() if __addon__.getSetting("bookonline") != "false": numEpisode = int(params["episode"]) - 1 widget.pushSite("http://%s/requete.php?name=%s&type=posserie&numid=%s&pos=%.3f&tt=%.3f&saison=%s&episode=%s"\ %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), params["u2p"], t, tt, params["saison"], str(numEpisode))) else: widget.bdHK(numId=params["u2p"], pos=t, tt=tt, typM="episode", saison=int(params["saison"]), episode=int(params["episode"]) - 1) sql = "SELECT link, release FROM episodes WHERE numId={} AND saison={} AND episode={}".format(int(params["u2p"]), int(params["saison"]), int(params["episode"])) liste = createbdhk.extractMedias(sql=sql) params["lien"] = liste[0][0] for link, release in liste: if reso in release: params["lien"] = link break param = {"u2p": params["u2p"], 'action': "playHK", 'lien': params["lien"], 'episode': params["episode"], "saison": params["saison"], "typMedia": "episode"} #param = ["%s=%s" %(k, v) for k, v in param.items()] #param = "&".join(param) #notice(param) #xbmc.executebuiltin("RunPlugin(plugin://plugin.video.sendtokodiU2P/?%s)" %param) playMediaHK(param) def mepInfos(numId): sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m WHERE m.numId={}".format(numId) movies = extractMedias(sql=sql) media = Media("movie", *movies[0]) logo, clearart, banner = extractFanart(numId) urlFanart = "http://assets.fanart.tv/fanart/movie/" li = xbmcgui.ListItem(label=media.title) updateInfoTagVideo(li,media,True,False,True,False,False) #liz.setPath("plugin://%s/play/%s" % (ADDON.getAddonInfo("id"),urllib.quote(url, safe='')) ) li.setArt({'icon': media.backdrop, 'thumb': media.poster, 'poster': media.poster, 'clearlogo': urlFanart, 'fanart': media.backdrop}) return li def testCertification(numId, typMedia): if __addon__.getSetting("bookonline") != "false": try: passwd = __addon__.getSetting("bookonline_name") certificationUser = widget.getCertification(passwd) if certificationUser[0] > 18: return False certificationCorrect = widget.recupCertif(numId, typMedia) if not certificationCorrect: if __addon__.getSetting("actifnewpaste") != "false": if typMedia == "movie": sql = "SELECT certif FROM filmsPub WHERE numId={}".format(numId) else: sql = "SELECT certif FROM seriesPub WHERE numId={}".format(numId) certificationMedia = createbdhk.extractMedias(sql=sql, unique=1) elif __addon__.getSetting("actifhk") != "false": if typMedia == "movie": sql = "SELECT certification FROM movieCertification WHERE numId={}".format(numId) else: sql = "SELECT certification FROM tvshowCertification WHERE numId={}".format(numId) certificationMedia = extractMedias(sql=sql, unique=1) else: certificationMedia = [certificationCorrect] if certificationMedia and isinstance(certificationMedia[0], int): if certificationMedia[0] > certificationUser[0]: showInfoNotification("tu es trop jeune .....") return True else: if certificationUser[1] == 0: if typMedia == "movie": if __addon__.getSetting("actifnewpaste") != "false": sql2 = "SELECT numId FROM filmsRepos WHERE famille='concert'" concerts = createbdhk.extractMedias(sql=sql2) concerts = [x[0] for x in concerts] if int(numId) in concerts: return False elif __addon__.getSetting("actifhk") != "false": sql2 = "SELECT numId FROM movieFamille WHERE famille='#Concerts'" concerts = extractMedias(sql=sql2) concerts = [x[0] for x in concerts] if int(numId) in concerts: return False if certificationUser[0] > 11: if __addon__.getSetting("actifnewpaste") != "false": sql2 = "SELECT numId FROM filmsRepos WHERE famille='concert' or famille='spectacle' or famille='docu'" autres = createbdhk.extractMedias(sql=sql2) autres = [x[0] for x in autres] if int(numId) in autres: return False elif __addon__.getSetting("actifhk") != "false": sql2 = "SELECT numId FROM movieFamille WHERE famille='#Sports' or famille='#Spectacles'" autres = extractMedias(sql=sql2) autres = [x[0] for x in autres] if int(numId) in autres: return False showInfoNotification("tu es trop jeune .....") return True return False except: return False def nettHistoDB(bd): cnx2 = sqlite3.connect(bd) cur2 = cnx2.cursor() try: cur2.execute("SELECT (SELECT f.strFilename FROM files as f WHERE b.idFile=f.idFile) FROM bookmark as b") except: pass try: [cur2.execute("DELETE FROM files WHERE strFilename=?", (x[0],)) for x in cur2.fetchall() if "playMediaUptobox&" not in x[0]] except: pass cnx2.commit() cur2.close() cnx2.close() def playMediaHK(params): typMedia = xbmc.getInfoLabel('ListItem.DBTYPE') if not typMedia: xbmc.executebuiltin("Dialog.Close(busydialog)") xbmc.sleep(500) typMedia = xbmc.getInfoLabel('ListItem.DBTYPE') notice(typMedia) numId = params["u2p"] try: typMedia = params["typMedia"] except: pass #======================================== certification ======================================== if testCertification(numId, typMedia): return #====================================== fin certif ============================================ if typMedia not in ["movie", "audioBook"]: if "saison" in params.keys(): try: title = params['title'] except: title = "" numEpisode = params['episode'] saison = params["saison"] li = xbmcgui.ListItem() media = MediaSp(**{"title": title, "episode": numEpisode, "season": saison, "numId": numId, "typMedia": typMedia}) updateInfoTagVideo2(li, media) #xbmcgui.ListItem().setInfo('video', {"title": title, "episode": numEpisode, "season": saison}) else: title = xbmc.getInfoLabel('ListItem.TVShowTitle') saison = xbmc.getInfoLabel('ListItem.Season') numEpisode = xbmc.getInfoLabel('ListItem.Episode') if not numEpisode and 'episode' in params.keys(): numEpisode = params['episode'] #prepareUpNext(title, numId, saison, numEpisode) else: saison = 1 result = getParams(params['lien'], u2p=numId, saisonIn=saison) #notice(result) if result and "url" in result.keys(): if typMedia not in ["movie", "audioBook"]: result["episode"] = numEpisode result["season"] = saison else: result["episode"] = "" result["season"] = "" url = str(result['url']) showInfoNotification("playing title " + result['title']) try: result['title'] = xbmc.getInfoLabel('ListItem.TITLE') listIt = createListItemFromVideo(result) if "skin" in params.keys(): li = mepInfos(params["u2p"]) xbmc.Player().play(url, li) else: xbmcplugin.setResolvedUrl(__handle__, True, listitem=listIt) except Exception as e: notice("playMediaHK - Erreur Play " + str(e)) threading.Thread(target=gestionThumbnails).start() count = 0 time.sleep(2) while not xbmc.Player().isPlaying(): count = count + 1 if count >= 20: return else: time.sleep(1) try: trk = actifTrakt() except: trk = None if numId != "divers" and str(numId) != "0": if typMedia == "movie": if __addon__.getSetting("bookonline") != "false": #notice("http://%s/requete.php?name=%s&type=getpos&numid=%s&media=%s" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), params["u2p"], typMedia)) recupPos = widget.responseSite("http://%s/requete.php?name=%s&type=getpos&numid=%s&media=%s" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), params["u2p"], typMedia)) if recupPos: seek = float(recupPos[0]) else: seek = 0 else: seek = widget.bdHK(sauve=0, numId=int(numId)) elif typMedia == "audioBook": seek = 0 else: if __addon__.getSetting("bookonline") != "false": recupPos = widget.responseSite("http://%s/requete.php?name=%s&type=getpos&numid=%s&media=%s&saison=%s&episode=%s" \ %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), params["u2p"], typMedia, saison, numEpisode)) if recupPos: seek = float(recupPos[0]) else: seek = 0 else: seek = widget.bdHK(sauve=0, numId=int(numId), typM=typMedia, saison=saison, episode=numEpisode) else: seek = 0 if seek > 0: dialog = xbmcgui.Dialog() resume = dialog.yesno('Play Video', 'Resume last position?') #notice(seek) if resume: notice(xbmc.getInfoLabel('Player.Title')) xbmc.Player().seekTime(int(seek)) else: notice("delete") okUpNext = True tt = xbmc.Player().getTotalTime() #web_pdb.set_trace() notice("typeMedia " + typMedia) # scrobble try: if trk and numId != "divers" and str(numId) != "0": pos = 0.0 if typMedia == "movie": trk.scrobble(title="", year=0, numId=numId, pos=pos, typM="movie", mode="start") else: trk.scrobble(title="", year=0, numId=numId, pos=pos, season=saison, number=numEpisode, typM="show", mode="start") except: pass t = 0 while xbmc.Player().isPlaying(): t = xbmc.Player().getTime() #notice(t) if tt == 0: tt = xbmc.Player().getTotalTime() time.sleep(1) if t > 10 and okUpNext and typMedia not in ["movie", "audioBook"]: try: prepareUpNext(title, numId, saison, numEpisode) okUpNext = False except: pass if t > 180 and numId != "divers" and str(numId) != "0": if typMedia == "movie": if __addon__.getSetting("bookonline") != "false": widget.pushSite("http://%s/requete.php?name=%s&type=posmovie&numid=%s&pos=%.3f&tt=%.3f" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), params["u2p"], t, tt)) else: widget.bdHK(numId=numId, pos=t, tt=tt, typM=typMedia) elif typMedia == "episode": if __addon__.getSetting("bookonline") != "false": requete = "http://%s/requete.php?name=%s&type=posserie&numid=%s&pos=%d&tt=%d&saison=%s&episode=%s"\ %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), params["u2p"], t, tt, saison, numEpisode) widget.pushSite(requete) if tt and (float(t) / float(tt) * 100.0) > 90.0: #push on "on continue" widget.gestOC(params["u2p"], "ajout") scraperUPTO.extractEpisodesOnContinue() # p = {"name": __addon__.getSetting("bookonline_name"), "type": "vuepisodes", "numid": params["u2p"], "saison": saison, "episodes": numEpisode,"vu": "1"} requete = "http://%s/requete.php" %__addon__.getSetting("bookonline_site") + '?' + urlencode(p) widget.pushSite(requete) else: widget.bdHK(numId=numId, pos=t, tt=tt, typM=typMedia, saison=saison, episode=numEpisode) #fin scrooble try: if trk and numId != "divers" and str(numId) != "0" and t : try: pos = t / tt * 100.0 except: pos = 10.0 if typMedia == "movie": trk.scrobble(title="", year=0, numId=numId, pos=pos, typM="movie", mode="stop") else: trk.scrobble(title="", year=0, numId=numId, pos=pos, season=saison, number=numEpisode, typM="show", mode="stop") except: pass nettHistoDB(__database__) if os.path.isfile(xbmcvfs.translatePath('special://home/addons/plugin.video.sendtokodiU2P/rskin.txt')) and __addon__.getSetting("rskin"): time.sleep(5) xbmc.executebuiltin('ReloadSkin') time.sleep(0.05) os.remove(xbmcvfs.translatePath('special://home/addons/plugin.video.sendtokodiU2P/rskin.txt')) return def playMediaOld(params): result = getParams(params['lien']) if result and "url" in result.keys(): listIt = createListItemFromVideo(result) xbmcplugin.setResolvedUrl(__handle__, True, listitem=listIt) def playMedia(params): typDB = "non auto" try: lastBD = os.path.normpath(os.path.join(__repAddon__, "lastDB.txt")) if "autonome" in open(lastBD, "r").readline(): typDB = "auto" except: pass typMedia = xbmc.getInfoLabel('ListItem.DBTYPE') if not typMedia: xbmc.executebuiltin("Dialog.Close(busydialog)") xbmc.sleep(500) typMedia = xbmc.getInfoLabel('ListItem.DBTYPE') if typDB == "auto": fPlay = sys.argv[0] + sys.argv[2] if typMedia != "movie": idfile= getIDfile(fPlay) else: idfile = xbmc.getInfoLabel('ListItem.DBID') else: idfile = xbmc.getInfoLabel('ListItem.DBID') #notice("idFile " + str(idfile)) #notice(xbmc.getInfoLabel('ListItem.Episode')) #notice(xbmc.getInfoLabel('ListItem.TvShowDBID')) #notice(xbmc.getInfoLabel('ListItem.PercentPlayed')) #notice(xbmc.getInfoLabel('ListItem.EndTimeResume')) #notice("fin listem") result = getParams(params['lien']) if result and "url" in result.keys(): url = str(result['url']) showInfoNotification("playing title " + result['title']) notice("num id " + str(result)) notice("handle " + str(__handle__)) try: listIt = createListItemFromVideo(result) xbmcplugin.setResolvedUrl(__handle__, True, listitem=listIt) #xbmc.Player().play(url, listIt) #xbmc.executebuiltin('PlayMedia(%s)' %url) except Exception as e: notice("playMedia - Erreur Play " + str(e)) threading.Thread(target=gestionThumbnails).start() count = 0 time.sleep(2) while not xbmc.Player().isPlaying(): count = count + 1 if count >= 20: return else: time.sleep(1) try: infoTag = xbmc.Player().getVideoInfoTag() #notice(infoTag.getDbId()) #idfile = infoTag.getDbId() notice("idFile " + str(idfile)) #notice(xbmc.Player().getPlayingFile()) #typMedia = infoTag.getMediaType() # seek = getTimeBookmark(idfile, __database__, typMedia) except Exception as e: xbmc.Player().pause notice(str(e)) seek = 0 if seek > 0: dialog = xbmcgui.Dialog() resume = dialog.yesno('Play Video', 'Resume last position?') if resume: notice(xbmc.getInfoLabel('Player.Title')) xbmc.Player().seekTime(int(seek)) #threading.Thread(target=correctionBookmark, args=(idfile, typMedia)) #importDatabase(debug=0) #tx = testDatabase() #if tx: # showInfoNotification("New DataBase en ligne !!!") #threading.Thread(target=importDatabase) if typDB == "auto": tt = xbmc.Player().getTotalTime() while xbmc.Player().isPlaying(): t = xbmc.Player().getTime() if tt == 0: tt = xbmc.Player().getTotalTime() #notice(t) time.sleep(1) notice(str(typMedia) + " " + str(idfile)) if t > 180: correctionBookmark(idfile, t, tt, typMedia) notice("ok") return def correctionBookmark(idfile, t, tt, typeM): try: cnx = sqlite3.connect(__database__) cur = cnx.cursor() notice(typeM) if typeM == "movie": cur.execute("SELECT idFile FROM movie WHERE idMovie=?", (idfile,)) #else: # cur.execute("SELECT idFile FROM episode WHERE idEpisode=?", (idfile,)) idfile = cur.fetchone()[0] notice("idefile " + str(idfile)) try: delta = ((1 - t / tt) * 100) except: delta = 5 if delta < 4: notice("del media bookmark") cur.execute("DELETE FROM bookmark WHERE idFile=%s" %idfile) cur.execute("UPDATE files SET playCount=1 WHERE idFile=%s" %idfile) else: cur.execute("SELECT idFile FROM bookmark WHERE idFile=?", (idfile,)) if cur.fetchone(): sql0 = "UPDATE bookmark SET timeInSeconds=? WHERE idFile=?" cur.execute(sql0, (t, idfile,)) else: sql0 = "REPLACE INTO bookmark (idFile, timeInSeconds, totalTimeInSeconds, thumbNailImage, player, playerState, type) VALUES(?, ?, ?, ?, ?, ?, ? )" cur.execute(sql0, (idfile, t, tt, None, 'VideoPlayer', None, 1,)) cnx.commit() cur.close() cnx.close() except Exception as e: notice("insert bookmark " + str(e)) def majLink(dataBaseKodi): lastBD = os.path.normpath(os.path.join(__repAddon__, "lastDB.txt")) cnx = sqlite3.connect(dataBaseKodi) cur = cnx.cursor() if "autonome" not in open(lastBD, "r").readline(): dateTimeObj = datetime.datetime.now() timestampStr = dateTimeObj.strftime("%Y-%m-%d %H:%M:%S") oldRep = '/storage/emulated/0/kodibase' #newRep = xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/kodibase") newRep, corr = cheminOs() if newRep != "/storage/emulated/0/": newRep = os.path.normpath(os.path.join(newRep, "kodibase")) cur.execute("UPDATE path SET strPath = REPLACE(strPath, '%s', '%s')" % (oldRep, newRep)) cur.execute("UPDATE movie SET c22 = REPLACE(c22, '%s', '%s')" % (oldRep, newRep)) cur.execute("UPDATE episode SET c18 = REPLACE(c18, '%s', '%s')" % (oldRep, newRep)) #UPDATE art SET url = REPLACE(url,'smb://my_nas/old_share', 'smb://my_nas/new_share'); cur.execute("UPDATE tvshow SET c16 = REPLACE(c16, '%s', '%s')" % (oldRep, newRep)) cur.execute("UPDATE files SET strFilename = REPLACE(strFilename, '%s', '%s'), dateAdded='%s'" % (oldRep, newRep, timestampStr)) if corr: cur.execute("UPDATE path SET strPath = REPLACE(strPath,'%s', '%s')" %("/", "\\")) cur.execute("UPDATE episode SET c18 = REPLACE(c18,'%s', '%s')" %("/", "\\")) cur.execute("UPDATE movie SET c22 = REPLACE(c22,'%s', '%s')" %("/", "\\")) cur.execute("UPDATE tvshow SET c16 = REPLACE(c16,'%s', '%s')" %("/", "\\")) cur.execute("UPDATE files SET strFilename = REPLACE(strFilename,'%s', '%s')" %("/", "\\")) cnx.commit() # ajout sources with open(xbmcvfs.translatePath("special://home/userdata/sources.xml"), "r") as f: txSources = f.read() with open(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/fileSources.txt"), "r") as f: tx = f.read() dictSource = {x.split("=")[0]: x.split("=")[1] for x in tx.splitlines()} source = """<source> <name>{}</name> <path pathversion="1">{}</path> <allowsharing>true</allowsharing> </source>\n""" sources = "" for k, v in dictSource.items(): depot = os.path.normpath(v.replace(oldRep, newRep)) if v.replace(oldRep, newRep) not in txSources: sources += source.format(k, depot) with open(xbmcvfs.translatePath("special://home/userdata/sources.xml"), "w") as f: f.write(txSources.format(**{"sources":sources})) cnx2 = sqlite3.connect(__database__) cur2 = cnx2.cursor() cur2.execute("SELECT * FROM bookmark") bookmark = cur2.fetchall() try: cur.executemany("INSERT INTO bookmark VALUES(?, ?, ?, ?, ?, ?, ?, ?)", bookmark) cnx.commit() except: pass cur2.execute("SELECT * FROM files WHERE (lastPlayed OR playCount) AND idfile<400000") histoPlay = cur2.fetchall() histoPlay = [(x[0], x[1], x[2], x[3] if x[3] and x[3] != 'None' else "", x[4] if x[4] and x[4] != 'None' else "", x[5]) for x in histoPlay] #print(histoPlay) for h in histoPlay: cur.execute("UPDATE files set lastPlayed=?, playCount=? WHERE idFile=? AND idPath=?", (h[4], h[3], h[0], h[1], )) cnx.commit() cur2.execute("SELECT * FROM files WHERE idfile>400000") histoPlay = cur2.fetchall() for h in histoPlay: cur.execute("INSERT INTO files VALUES(?, ?, ?, ?, ?, ?)", h) cnx.commit() cur.close() cnx.close() cur2.close() cnx2.close() def cheminOs(): addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") osVersion = addon.getSetting("osVersion") corr = 0 if "WIN" in osVersion: corr = 1 repKodiName = "D:\\kodiBase\\" elif "LIBREELEC" in osVersion: repKodiName = "/storage/downloads/" elif "LINUX" in osVersion: repKodiName = "/storage/downloads/" elif "ANDROID" in osVersion: repKodiName = "/storage/emulated/0/" elif "XBOX" in osVersion: corr = 1 repKodiName = "U:\\Users\\UserMgr0\\AppData\\Local\\Packages\\XBMCFoundation.Kodi_4n2hpmxwrvr6p\\LocalState\\userdata\\" else: corr = 1 repKodiName = osVersion return repKodiName, corr def get_size(rep): total_size = 0 for dirpath, dirnames, filenames in os.walk(rep): for f in filenames: fp = os.path.join(dirpath, f) total_size += os.path.getsize(fp) return total_size def gestionThumbnails(): addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") nbT = addon.getSetting("thumbnails") try: nbT = int(nbT) except: nbT = 0 if nbT > 0: dbTexture = xbmcvfs.translatePath("special://home/userdata/Database/Textures13.db") repThumb = xbmcvfs.translatePath("special://thumbnails") cnx2 = sqlite3.connect(dbTexture) cur2 = cnx2.cursor() tabFiles = [] for dirpath, dirs, files in os.walk(repThumb): for filename in files: fname = os.path.normpath(os.path.join(dirpath,filename)) tabFiles.append(fname) if len(tabFiles) > 7000: tabFiles.sort(key=os.path.getmtime, reverse=True) for f in tabFiles[7000:]: head ,tail = os.path.split(f) cur2.execute("SELECT id FROM texture WHERE cachedurl LIKE '{}'".format("%" + tail + "%")) num = cur2.fetchone() if num: cur2.execute("DELETE FROM texture WHERE id=?", (num[0],)) cur2.execute("DELETE FROM sizes WHERE idtexture=?", (num[0],)) xbmcvfs.delete(f) cnx2.commit() cur2.close() cnx2.close() def extractNews(numVersion): listeNews = [] if os.path.isfile(xbmcvfs.translatePath('special://home/addons/plugin.video.sendtokodiU2P/resources/u2pBD.bd')): cnx = sqlite3.connect(xbmcvfs.translatePath('special://home/addons/plugin.video.sendtokodiU2P/resources/u2pBD.bd')) cur = cnx.cursor() cur.execute("SELECT strm FROM strms WHERE version>?", (numVersion,)) listeNews = [x[0] for x in cur.fetchall()] cur.close() cnx.close() return listeNews def testDatabase(typImport="full"): ApikeyAlldeb = getkeyAlldebrid() ApikeyRealdeb = getkeyRealdebrid() ApikeyUpto = getkeyUpto() cr = cryptage.Crypt() repAddon = xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/") repStrm, _ = cheminOs() if ApikeyUpto: tx = cr.updateBD(repAddon, t=typImport, key=ApikeyUpto, typkey="upto") elif ApikeyAlldeb: tx = cr.updateBD(repAddon, t=typImport, key=ApikeyAlldeb, typkey="alldeb") elif ApikeyRealdeb: tx = cr.updateBD(repAddon, t=typImport, key=ApikeyRealdeb, typkey="realdeb") return tx """ def mepAutoStart(): if not os.path.isfile(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/maj.txt")): repStart = xbmcvfs.translatePath("special://home/addons/service.autoexec/") repFileStart = xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/resources/maj/") try: os.mkdir(repStart) except: pass for f in os.listdir(repFileStart): if not os.path.isfile(xbmcvfs.translatePath("special://home/addons/service.autoexec/%s" %f)): xbmcvfs.copy(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/resources/maj/%s" %f), xbmcvfs.translatePath("special://home/addons/service.autoexec/%s" %f)) open(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/maj.txt"), "w") """ def mepAutoStart2(): repStart = xbmcvfs.translatePath("special://home/addons/service.majhk/") repFileStart = xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/resources/majhk/") try: os.mkdir(repStart) except: pass for f in os.listdir(repFileStart): xbmcvfs.copy(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/resources/majhk/%s" %f), xbmcvfs.translatePath("special://home/addons/service.majhk/%s" %f)) #for f in [x for x in os.listdir(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P")) if x[-4:] == ".pyc"]: # xbmcvfs.copy(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/%s" %f), xbmcvfs.translatePath("special://home/addons/service.majhk/%s" %f)) xbmcvfs.copy(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/loadhk3.py"), xbmcvfs.translatePath("special://home/addons/service.majhk/loadhk3.py")) xbmc.executeJSONRPC('{"jsonrpc": "2.0", "id":1, "method": "Addons.SetAddonEnabled", "params": { "addonid": "service.majhk", "enabled": true }}') showInfoNotification("Mise place service Maj !!") def mepAutoStart(): repStart = xbmcvfs.translatePath("special://home/addons/service.autoexec/") repFileStart = xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/resources/maj/") try: os.mkdir(repStart) except: pass for f in os.listdir(repFileStart): xbmcvfs.copy(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/resources/maj/%s" %f), xbmcvfs.translatePath("special://home/addons/service.autoexec/%s" %f)) xbmc.executeJSONRPC('{"jsonrpc": "2.0", "id":1, "method": "Addons.SetAddonEnabled", "params": { "addonid": "service.autoexec", "enabled": true }}') showInfoNotification("Mise place Maj au Restat Kodi !!") def patchNextUp(): xbmcvfs.delete(xbmcvfs.translatePath("special://home/addons/service.upnext/resources/lib/monitor.py")) xbmcvfs.delete(xbmcvfs.translatePath("special://home/addons/service.upnext/resources/lib/player.py")) xbmcvfs.copy(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/resources/nextup/monitor.py"), xbmcvfs.translatePath("special://home/addons/service.upnext/resources/lib/monitor.py")) xbmcvfs.copy(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/resources/nextup/player.py"), xbmcvfs.translatePath("special://home/addons/service.upnext/resources/lib/player.py")) showInfoNotification("Patch ok, redemarrez KODI !!!!") def majDatabase(): importDatabase(debug=0, maj=1) def createFav(): if not os.path.isfile(os.path.normpath(os.path.join(__repAddon__, "fav.txt"))): favBD = '''\n<favourite name="Import DataBase" thumb="special://home/addons/plugin.video.sendtokodiU2P/resources/png/database.png">PlayMedia(&quot;plugin://plugin.video.sendtokodiU2P/?action=bdauto&quot;)</favourite>''' favMovies = '''<favourite name="Mediatheque HK" thumb="special://home/addons/plugin.video.sendtokodiU2P/resources/png/movies.png">ActivateWindow(10025,&quot;plugin://plugin.video.sendtokodiU2P/?action=movies&quot;,return)</favourite>''' try: with io.open(xbmcvfs.translatePath("special://home/userdata/favourites.xml"), "r", encoding="utf-8") as f: txFavs = f.read() pos = txFavs.find("<favourites>") pos += len("<favourites>") txDeb = txFavs[:pos] txFin = txFavs[pos:] if favBD not in txFavs: txDeb += "%s\n" %favBD if os.path.isfile(os.path.normpath(os.path.join(__repAddon__, "medias.bd"))) and favMovies not in txFavs: txDeb += favMovies with io.open(xbmcvfs.translatePath("special://home/userdata/favourites.xml"), "w", encoding="utf-8") as f: f.write(txDeb + txFin) open(os.path.normpath(os.path.join(__repAddon__, "fav.txt")), "w") except: pass #txFavs = "" #txDeb = "<favourites>" #txFin = "\n</favourites>" def testHKdb(): cnx = sqlite3.connect(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/medias.bd")) cur = cnx.cursor() cur.execute("PRAGMA integrity_check") result = cur.fetchone()[0] cur.close() cnx.close() return result def importDatabase(typImport="full", debug=1, maj=0): repAddon = xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/") fDOWN = 0 tps = time.time() a = time.time() if typImport == "epg": numRecup = __addon__.getSetting("epg") elif typImport == "ba": numRecup = __addon__.getSetting("numba") fDOWN = 1 else: if not __addon__.getSetting("numhk"): dialog = xbmcgui.Dialog() d = dialog.input("Num DB HK ex:zmdeo", type=xbmcgui.INPUT_ALPHANUM) if d: __addon__.setSetting("numhk", d) else: return if maj: numRecup = __addon__.getSetting("numhk") else: dictDB = {"HK-autonome": __addon__.getSetting("numhk"), "HK-autonome-forced": __addon__.getSetting("numhk"), "skin": __addon__.getSetting("skinhk"), "Epg": __addon__.getSetting("epg")} choixDB = sorted(list(dictDB.keys())) dialogApi = xbmcgui.Dialog() if typImport == "autonome": choixDB = [x for x in choixDB if "HK" in x] selectedDB = dialogApi.select("Choix DATABASE", choixDB) if selectedDB != -1: # debridage numRecup = dictDB[choixDB[selectedDB]] if debug: showInfoNotification("Verif Update") if "forced" in choixDB[selectedDB]: fDOWN = 1 else: return ApikeyAlldeb, ApikeyRealdeb, ApikeyUpto = getkeyAlldebrid(), getkeyRealdebrid(), getkeyUpto() cr = cryptage.Crypt() repStrm, _ = cheminOs() tx = False if numRecup: if ApikeyUpto and not tx: tx = cr.updateBD(repAddon, key=ApikeyUpto, typkey="upto", numRentry=numRecup, forceDownload=fDOWN) if ApikeyAlldeb and not tx: tx = cr.updateBD(repAddon, key=ApikeyAlldeb, typkey="alldeb", numRentry=numRecup, forceDownload=fDOWN) if ApikeyRealdeb and not tx: tx = cr.updateBD(repAddon, key=ApikeyRealdeb, typkey="realdeb", numRentry=numRecup, forceDownload=fDOWN) else: if ApikeyUpto and not tx: tx = cr.updateBD(repAddon, key=ApikeyUpto, typkey="upto", forceDownload=fDOWN) if ApikeyAlldeb and not tx: tx = cr.updateBD(repAddon, key=ApikeyAlldeb, typkey="alldeb", forceDownload=fDOWN) if ApikeyRealdeb and not tx: tx = cr.updateBD(repAddon, key=ApikeyRealdeb, typkey="realdeb", forceDownload=fDOWN) notice("download %0.2f" %(time.time() - a)) a = time.time() if tx: if debug == 0: showInfoNotification("Update DataBase en cours, wait ....") pDialogBD2 = xbmcgui.DialogProgressBG() pDialogBD2.create('U2Pplay', 'Update en cours') try: shutil.rmtree(os.path.normpath(os.path.join(repStrm, "xml")), ignore_errors=True) except: pass try: shutil.rmtree(os.path.normpath(os.path.join(repStrm, "xsp")), ignore_errors=True) except: pass if os.path.isfile(os.path.normpath(os.path.join(repAddon, "version.txt"))): shutil.move(os.path.normpath(os.path.join(repAddon, "version.txt")), os.path.normpath(os.path.join(repStrm, "version.txt"))) if os.path.isfile(os.path.normpath(os.path.join(repStrm, "version.txt"))): numVersion = int(open(os.path.normpath(os.path.join(repStrm, "version.txt")), 'r').readline()) else: numVersion = 0 with zipfile.ZipFile(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/MyVideos119-U2P.zip"), 'r') as zipObject: try: zipObject.extract("MyVideos119-U2P.db", repAddon) dbIn = True except: dbIn = False listOfFileNames = zipObject.namelist() nbFiles = len(listOfFileNames) try: zipObject.extract("u2pBD.bd", xbmcvfs.translatePath('special://home/addons/plugin.video.sendtokodiU2P/resources/')) listeNews = extractNews(numVersion) for i, f in enumerate(listOfFileNames): nbGroupe = int((i / float(nbFiles)) * 100.0) pDialogBD2.update(nbGroupe, 'U2Pplay', message="verif STRMS") if (numVersion == 0 and f[-5:] == ".strm") or f in listeNews: zipObject.extract(f, repStrm) elif f[-4:] in [".xml", ".xsp"]: zipObject.extract(f, repAddon) elif f in ["version.txt"]: zipObject.extract(f, repStrm) else: if f in ["fileSources.txt", "sources.xml", "fichierDB.txt"] or f[-4:] in [".xml", ".xsp"]: zipObject.extract(f, repAddon) except: try: zipObject.extract("epg.bd", xbmcvfs.translatePath('special://home/addons/plugin.video.sendtokodiU2P/')) except: pass try: zipObject.extract("ba.db", xbmcvfs.translatePath('special://home/userdata/addon_data/plugin.video.sendtokodiU2P/')) except: pass try: # extract db HK bdOk = False for nbImport in range(3): zipObject.extract("medias.bd", xbmcvfs.translatePath('special://home/addons/plugin.video.sendtokodiU2P/resources/')) time.sleep(0.2) shutil.move(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/resources/medias.bd"), xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/medias.bd")) time.sleep(0.1) if debug: bdOk = True break else: if testHKdb() == "ok": #notice("nombre import: " + str(nbImport + 1)) bdOk = True break time.sleep(2) if not bdOk: showInfoNotification('Erreur database corrupt, faite un "Import manuel"') except: pass for i, f in enumerate(listOfFileNames): nbGroupe = int((i / float(nbFiles)) * 100.0) pDialogBD2.update(nbGroupe, 'U2Pplay', message="verif STRMS") if f in ["fileSources.txt", "sources.xml", "fichierDB.txt"] or f[-4:] in [".xml", ".xsp", ".zip"]: zipObject.extract(f, repAddon) pDialogBD2.close() if dbIn: a = time.time() showInfoNotification("Mise en place new database !!!") notice("extraction %0.2f" %(time.time() - a)) try: shutil.move(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/sources.xml"), xbmcvfs.translatePath("special://home/userdata/sources.xml")) except: pass majLink(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/MyVideos119-U2P.db")) xbmcvfs.delete(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/MyVideos119-U2P.zip")) shutil.move(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/MyVideos119-U2P.db"), __database__) #xbmcvfs.delete(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/MyVideos119-U2P.db")) try: showInfoNotification("Update terminée (%d News)" %len(listeNews)) except: showInfoNotification("Update terminée database-autonome (test)") notice("maj DATABASE %0.2f" %(time.time() - a)) else: xbmcvfs.delete(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/MyVideos119-U2P.zip")) else: if debug: showInfoNotification("Pas d'update") if debug: showInfoNotification("Durée Update : %d s" %(time.time() - tps)) gestionThumbnails() createFav() time.sleep(0.5) #xbmc.executebuiltin('ReloadSkin') return def delDATABASE(): xbmcvfs.copy(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/resources/MyVideos119-U2P.db"), xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/resources/MyVideos119.db")) shutil.move(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/resources/MyVideos119.db"), __database__) showInfoNotification("DATAbase effacée , redemarrez KODI !!!!") def supWidg(): liste = list(widget.extractListe("all")) if liste: dialog = xbmcgui.Dialog() sups = dialog.multiselect("Liste(s) à supprimer", liste, preselect=[]) if sups: listeSup = [liste[x] for x in sups] #notice(listeSup) widget.supListe(listeSup) showInfoNotification("Liste(s) choisie(s) éffacée(s)") def reloadSkin(): time.sleep(0.5) xbmc.executebuiltin('ReloadSkin') return def actifProfil(params, menu=1): passwd = params["passwd"] __addon__.setSetting(id="bookonline_name", value=passwd) #xbmc.executebuiltin("plugin://plugin.video.sendtokodiU2P/?action=movies',return)") if menu: ventilationHK() else: notice("reload skin") reloadSkin() def gestionVuSaison(params): sql = "SELECT DISTINCT episode FROM episodes WHERE numId={} and saison={}".format(params["u2p"], params["saison"]) liste = createbdhk.extractMedias(sql=sql, unique=1) liste = [("S{}E{}".format(params["saison"].zfill(2), str(x).zfill(4)), x) for x in liste] #notice(liste) liste = [x[0] for x in sorted(liste, key=lambda y: y[1])] dialog = xbmcgui.Dialog() choix = ["Aucun", "Tous", "Tous sauf"] + liste selected = dialog.multiselect("Mettre en Vus", choix, preselect=[]) if selected: if 0 in selected: listeVu = [] listeNonVu = liste[:] elif 1 in selected: listeVu = liste[:] listeNonVu = [] elif 2 in selected: listeVu = [x for x in liste if x not in [liste[y - 3] for y in selected if y > 2]] listeNonVu = [liste[y - 3] for y in selected if y > 2] else: listeNonVu = [x for x in liste if x not in [liste[y - 3] for y in selected if y > 2]] listeVu = [liste[y - 3] for y in selected if y > 2] #trakt trk = actifTrakt() if trk: trk.gestionWatchedHistory(numId=[int(params["u2p"])], season=int(params["saison"]), number=[int(x.split("E")[1]) for x in listeVu], typM="show", mode="add") trk.gestionWatchedHistory(numId=[int(params["u2p"])], season=int(params["saison"]), number=[int(x.split("E")[1]) for x in listeNonVu], typM="show", mode="remove") if __addon__.getSetting("bookonline") != "false": episodes = "*".join([str(int(x.split("E")[1])) for x in listeVu]) if episodes: #notice("http://%s/requete.php?name=%s&type=vuepisodes&numid=%s&saison=%s&episodes=%s&vu=1"\ # %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), params["u2p"], params["saison"], episodes)) widget.pushSite("http://%s/requete.php?name=%s&type=vuepisodes&numid=%s&saison=%s&episodes=%s&vu=1"\ %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), params["u2p"], params["saison"], episodes)) time.sleep(0.1) episodes = "*".join([str(int(x.split("E")[1])) for x in listeNonVu]) if episodes: #notice("http://%s/requete.php?name=%s&type=vuepisodes&numid=%s&saison=%s&episodes=%s&vu=0"\ # %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), params["u2p"], params["saison"], episodes)) widget.pushSite("http://%s/requete.php?name=%s&type=vuepisodes&numid=%s&saison=%s&episodes=%s&vu=0"\ %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), params["u2p"], params["saison"], episodes)) else: for l in listeVu: episode = int(l.split("E")[1]) widget.setVu(params["u2p"], int(params["saison"]), episode, 1, typM="tv") for l in listeNonVu: episode = int(l.split("E")[1]) widget.setVu(params["u2p"], int(params["saison"]), episode, 0, typM="tv") if params["refresh"] == "1": xbmc.executebuiltin("Container.Refresh") def createListeV(): dialog = xbmcgui.Dialog() ret = dialog.yesno('Listes', 'Quel type de liste ?', nolabel="Films", yeslabel="Series") if not ret: media = "movie" else: media = "tvshow" d = dialog.input("Nom de la liste", type=xbmcgui.INPUT_ALPHANUM) if d: widget.createListeV(d, media) showInfoNotification("Création liste lecture: %s ok!!" %d) def createListeT(): dialog = xbmcgui.Dialog() ret = dialog.yesno('Listes', 'Quel type de liste ?', nolabel="Films", yeslabel="Series") if not ret: media = "movie" else: media = "show" trk = TraktHK() trk.importListes(media) def createWidg(): cnx = sqlite3.connect(xbmcvfs.translatePath('special://home/userdata/addon_data/plugin.video.sendtokodiU2P/bookmark.db')) cur = cnx.cursor() cur.execute("""CREATE TABLE IF NOT EXISTS listes( `id` INTEGER PRIMARY KEY, title TEXT, sql TEXT, type TEXT, UNIQUE (title)) """) cnx.commit() dialogTuto = xbmcgui.Dialog() dialogTuto.textviewer('Tuto', '''1ere fenêtre tu choisis une liste soit "SERIES ", soit "FILMs" 2eme fenetre tu choisis le contenu filtré ou pas filtré => c'est sans les documentaires , les concerts, les spectacles et les animations 3eme fenêtre FILTRES cela sert a filtrer la bibliothéque en fonction de critéres 1) Genre(s) ca te permet de choisir ta liste en fonction du genre Par exemple tu veux les films horreur, tu selectionnes dans la liste "Horreur" Tu peux aussi faire du multi-genres exemple: Horreur-Fantastique, même procédure mais tu selectionnes "horreur" et "Fantastique" Tu valides par "OK 2) Année(s) tu choisis ta liste avec le critére de la date du médias Exemple tu veux tous les média de 2021 tu inscris simplement: 2021 si tu veux une liste par plage d'années Exemple tu veux les medias des années de 2010 à 2019 (une decade) tu inscris: 2010:2019 et tu valides par "OK" 3) Popularité c'est un critére de notation, il s'emploie, pour un maximum d'efficacité, avec le critére "Votes" que l'on verra aprés tu veux tous les medias donc la note est inférieur à 9 tu inscris: <9 tu veux par plage de notation Exemple tu veux les medias qui ont une note supérieur à 4 et inférieur à 9.5 tu inscris: 4:9.5 On met un . a la place de la , 4) Votes c'est le nombre de votant pour obtenir la note "Popularité" vu juste avant c'est un tandem avec "Popularité" on va prendre un exemple Un média peut connu qui recoit 2 votes , 1 à 9.5 et 1 à 10 (c'est plus frequent qu on ne le pense) Il aura une note de 9.75 qui sera tres tres surfaite.... Pour eviter ce probléme , on indique un nombre de votants minimum si on a mis en "Popularité" 4:9.5, on ajoute un nombre de votant minimum example de 500, et la on filtre bien les médias peu regardés donc peu notés 2 exemples #je veux les films notés entre 3 et 9.5 mais aussi les anciens films (tmdb n'existait pas, donc pas forcément été beaucoup noté) Popularité => 3:9.5 Votes => 200 200 me permet de filtrer les nanard.... #je veux les gros blockbusters, bien notés Popularité => 6:9.5 Votes => 10000 la tu as les top.... 5) Langue tu choisis ta liste en fonction de la langue d'origine exemple , les medias japonais, tu choisis "ja", Francais "fr" etc.. Dans "u2pplay-tutos" tu as tout le détail des langues => "lang_Liste.pdf" Voila pour l'explication des Filtres , tu peux les mettre tous ou une partie c'est à ta convenance, tu peux créér et effacer des listes autant de fois que possible. Entraines-toi , c'est un outil trés interressant et tu verras une fois compris, c'est trés trés simple et c'est un belge qui te le dit .... 4éme Fenêtre ORDRE DE TRI 4 choix Alpha = par ordre alphabétique de A à Z Date Added = par ordre d'arrivée ou de modification dans la mediathéque , c'est le dernier entré ou modifié qui sera en 1er Popularity = par ordre de notation du plus grand vers le plus petit Year = Par ordre d'année du média du plus récent au plus vieux on peut mettre plusieurs tris, la priorité et l'ordre insertion des tris en 1 tu choisis year en 2 popularity ordre se fera => tous les medias de 2022 classé par ordre de note ensuite 2021 classé par ordre de note etc ... 5éme fenêtre Nombre Médias le nombre de médias que va comporter ta liste Il faut qu'il soit toujours inférieur à la pagination (500 par default) ps: si ya des fautes, ca tombe bien ce n'est pas un concours d'orthographe...''') dialog = xbmcgui.Dialog() ret = dialog.yesno('Listes', 'Quel type de liste ?', nolabel="Films", yeslabel="Series") if not ret: media = "film" sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, (SELECT l.link FROM movieLink as l WHERE l.numId=m.numId) , m.backdrop, m.runtime, m.id \ FROM movie as m " tx1 = 'Films uniquement? (sans animation, documentaire, concert et spectacle)' genreMedia = "movie" else: media = "serie" genreMedia = "tv" tx1 = 'Series uniquement? (sans animation, documentaire)' sql = "SELECT m.title, m.overview, m.year, m.poster, m.numId, m.genre, m.popu, m.backdrop, m.runtime, m.id FROM tvshow as m " dictFiltre = {} tabOrder = [] dictOrder = {"Alpha": "Title ASC", "Date Added": "id DESC", "Popularity": "popu DESC", "Year": "year DESC", "Date Release": "dateRelease DESC", "Votes": "votes DESC"} dialog = xbmcgui.Dialog() tc = dialog.yesno('Contenu', tx1) if tc: if media == "film": typContenu = widget.contenuFilm() else: typContenu = widget.contenuSerie() else: typContenu = "" notice(typContenu) ajoutGenre = 1 while ajoutGenre: dialog = xbmcgui.Dialog() choix = ["Genre(s)", "Année(s)", "Popularité", "Votes", "Langue"]#, "Resos", "Acteur", "Réalisateur", "Langue"] selected = dialog.select("Filtres", choix) if selected != -1: filtre = choix[selected] if "Genre" in filtre: dictFiltre[filtre] = widget.genre(__keyTMDB__, genreMedia) elif "Ann" in filtre: dictFiltre[filtre] = widget.year() elif "Popu" in filtre: dictFiltre[filtre] = widget.popu() elif "Votes" in filtre: dictFiltre[filtre] = widget.votes() elif "Langue" in filtre: if genreMedia == "movie": sqlang = "SELECT DISTINCT lang FROM filmsPub" else: sqlang = "SELECT DISTINCT lang FROM seriesPub" liste = createbdhk.extractMedias(sql=sqlang, unique=1) dictFiltre[filtre] = widget.langue(sorted(liste)) dialog = xbmcgui.Dialog() ajout = dialog.yesno('Filtres', 'Ajout Filtre supplémentaire ?') if not ajout: while 1: ajoutGenre = 0 choix = list(dictOrder.keys()) dialog = xbmcgui.Dialog() selected = dialog.select("Choix ordre de tri (par defaut desc sauf Alpha)", choix) if selected != -1: if choix[selected] not in tabOrder: tabOrder.append(choix[selected]) dialog = xbmcgui.Dialog() ajout = dialog.yesno('Filtres', 'Ajout ordre supplémentaire ?') if not ajout: #if tabOrder[0] == "Alpha": # sens = "ASC" #else: # sens = "DESC" tri = " ORDER BY {}".format(",".join([dictOrder[x] for x in tabOrder])) dialog = xbmcgui.Dialog() d = dialog.numeric(0, 'Nombre Médias') if int(d) == 0: d = "40000" elif int(d) < 25: d = "25" limit = " LIMIT %s" %d sqladd = "WHERE " + typContenu sqladd += " AND ".join([v for k, v in dictFiltre.items()]) sqladd += tri sqladd += limit sql += sqladd notice(sql) d = dialog.input("Nom de la liste", type=xbmcgui.INPUT_ALPHANUM) if d: if __addon__.getSetting("bookonline") != "false": site = __addon__.getSetting("bookonline_site") name = __addon__.getSetting("bookonline_name") url = "http://{}/requete.php?type=insertlp&name={}&title={}&media={}&sql={}".format(site, name, d, media, quote(sql)) widget.pushSite(url) cur.execute("REPLACE INTO listes (title, sql, type) VALUES (?, ?, ?)", (d, sql, media, )) cnx.commit() showInfoNotification("Création liste: %s ok!!" %d) break else: break else: break cur.close() cnx.close() def createRUPTO(): dialog = xbmcgui.Dialog() ret = dialog.yesno('Repertoire', 'Quel type de Repertoire?', nolabel="Films", yeslabel="Series") if not ret: genreMedia = "movie" else: genreMedia = "tvshow" dialog = xbmcgui.Dialog() nomRep = dialog.input("Nom du repertoire Upto (ex: //FilmNews)", type=xbmcgui.INPUT_ALPHANUM) if nomRep: nomListe = dialog.input("Nom Affichage HK ", type=xbmcgui.INPUT_ALPHANUM) if nomListe: widget.createRepUpto(nomListe, nomRep, genreMedia) def createRUPTOP(): dialog = xbmcgui.Dialog() ret = dialog.yesno('Repertoire', 'Quel type de Repertoire?', nolabel="Films", yeslabel="Series") if not ret: genreMedia = "movie" else: genreMedia = "tvshow" dialog = xbmcgui.Dialog() hashRep = dialog.input("HASH repertoire", type=xbmcgui.INPUT_ALPHANUM) if hashRep: numRep = dialog.numeric(0, 'Numéro Repertoire') if numRep: nomListe = dialog.input("Nom Affichage HK ", type=xbmcgui.INPUT_ALPHANUM) if nomListe: widget.createRepUptoPublic(nomListe, hashRep, numRep, genreMedia) def createLTMDB(): dialog = xbmcgui.Dialog() ret = dialog.yesno('Listes', 'Quel type de liste ?', nolabel="Films", yeslabel="Series") if not ret: genreMedia = "movie" else: genreMedia = "tv" dialog = xbmcgui.Dialog() ret = dialog.yesno('Listes', 'Quel type de liste TMDB?', nolabel="Keyword", yeslabel="List") if not ret: typList = "Keyword" else: typList = "liste" nomListe = dialog.input("Nom de la liste", type=xbmcgui.INPUT_ALPHANUM) if nomListe: if typList == "Keyword": dialog = xbmcgui.Dialog() liste = sorted(list(medias.keywords.keys())) selected = dialog.select("Choix Keywords", liste) if selected != -1: d = medias.keywords[liste[selected]] else: d = "" else: d = dialog.numeric(0, 'Numéro Liste') if d: mdb = TMDB(__keyTMDB__) if typList == "Keyword": listId = [(nomListe, genreMedia, x) for x in mdb.listKeywords(d, typM=genreMedia)] else: listId = [(nomListe, genreMedia, x) for x in mdb.getList(d)] widget.createListTMDB(listId) showInfoNotification("Liste TMDB %s créé" %nomListe) def createListeLP(): dialog = xbmcgui.Dialog() choix = ["anime", "film", "serie"] selected = dialog.select("Choix type Repo", choix) if selected != -1: typRepo = choix[selected] dialog = xbmcgui.Dialog() d = dialog.input("Nom de la liste", type=xbmcgui.INPUT_ALPHANUM) if d: sql = "SELECT DISTINCT(numPaste) FROM paste WHERE type='{}'".format(typRepo) liste = sorted([x[0] for x in extractMedias(sql=sql)], key=lambda s: s.lower()) listeRepo = [] pos = 1 while 1: dialog = xbmcgui.Dialog() selected = dialog.select("Choix Num Paste", liste) if selected != -1: num = liste[selected] listeRepo.append((d, num, pos, typRepo)) pos += 1 liste.pop(selected) dialog = xbmcgui.Dialog() ajout = dialog.yesno('Pastes', 'Ajouter un paste?') if not ajout: break else: break if listeRepo: widget.insertPaste(listeRepo) showInfoNotification("Repo Pastes %s créé" %d) def choixSkin(): try: repListes = xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/skin/") dictRep = {"Config made in %s" %x: [x] for x in os.listdir(repListes)} dialogApi = xbmcgui.Dialog() choixRep = list(dictRep.keys()) selectedApi = dialogApi.select("Choix Contrib", choixRep) if selectedApi != -1: xbmcvfs.delete(xbmcvfs.translatePath("special://home/userdata/addon_data/script.skinshortcuts/")) contrib = dictRep[choixRep[selectedApi]][0] repConfig = xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/skin/%s/" %contrib) repDesti = xbmcvfs.translatePath("special://home/userdata/addon_data/") filesConfig = os.listdir(repConfig) dialog = xbmcgui.Dialog() repos = dialog.select("Selectionner la config à installer", [x[:-4].replace("_", " ") for x in filesConfig], 0) if repos != -1: with zipfile.ZipFile(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/skin/%s/%s" %(contrib, filesConfig[repos])), 'r') as zipObject: zipObject.extractall(repDesti) xbmc.sleep(500) xbmc.executebuiltin('ReloadSkin') except: showInfoNotification("Import skin à faire en 1er => import Database => skin ") def delView(params): if params["typM"] == "movies": typM = "movie" else: typM = "tvshow2" if __addon__.getSetting("bookonline") != "false": listeView = widget.responseSite("http://%s/requete.php?name=%s&type=view&media=%s" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), typM)) for l in listeView: #notice("id {}, {}".format(l, params["typM"])) #notice("http://%s/requete.php?name=%s&type=supview&media=%s&numid=%s" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), params["typM"], l)) widget.pushSite("http://%s/requete.php?name=%s&type=supview&media=%s&numid=%s" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), params["typM"], l)) time.sleep(0.1) else: listeView = list(widget.extractIdInVu(t=typM)) for l in listeView: widget.supView(params["u2p"], params["typM"]) showInfoNotification("Vider historique ok!!") def supView(params): if __addon__.getSetting("bookonline") != "false": notice("http://%s/requete.php?name=%s&type=supview&media=%s&numid=%s" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), params["typM"], params["u2p"])) widget.pushSite("http://%s/requete.php?name=%s&type=supview&media=%s&numid=%s" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), params["typM"], params["u2p"])) else: widget.supView(params["u2p"], params["typM"]) showInfoNotification("Retrait Last/View ok!!") def choixProfil(menu=0): cnx = sqlite3.connect(xbmcvfs.translatePath('special://home/userdata/addon_data/plugin.video.sendtokodiU2P/bookmark.db')) cur = cnx.cursor() sql = "SELECT nom, pass FROM users" cur.execute(sql) liste = cur.fetchall() cur.close() cnx.close() if not menu: xbmcplugin.setPluginCategory(__handle__, "Choix Users") xbmcplugin.setContent(__handle__, 'files') for choix in liste: addDirectoryItemLocal(choix[0], isFolder=True, parameters={"action":"actifP", "passwd": choix[1]}, picture="pastebin.png", texte="Profil à activer, mettre en favori pour un accés direct") xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) else: dialog = xbmcgui.Dialog() profil = dialog.select("Selectionner le profil à activer", [x[0] for x in liste]) if profil != -1: actifProfil({"passwd": liste[profil][1]}, menu=0) def suppProfil(): cnx = sqlite3.connect(xbmcvfs.translatePath('special://home/userdata/addon_data/plugin.video.sendtokodiU2P/bookmark.db')) cur = cnx.cursor() cur.execute("""CREATE TABLE IF NOT EXISTS users( `id` INTEGER PRIMARY KEY, nom TEXT, pass TEXT, UNIQUE (pass)) """) cnx.commit() sql = "SELECT nom, pass FROM users" cur.execute(sql) liste = cur.fetchall() dialogApi = xbmcgui.Dialog() selected = dialogApi.select("Profil à supprimer", [x[0] for x in liste]) if selected != -1: sql = "DELETE FROM users WHERE nom=? AND pass=?" cur.execute(sql, (liste[selected])) cnx.commit() sql = "DELETE FROM certification WHERE pass=?" cur.execute(sql, (liste[selected][1],)) cnx.commit() cur.close() cnx.close() def insertBookmarkHK(nom, passwd, certification, sans, debug=1): cnx = sqlite3.connect(xbmcvfs.translatePath('special://home/userdata/addon_data/plugin.video.sendtokodiU2P/bookmark.db')) cur = cnx.cursor() cur.execute("""CREATE TABLE IF NOT EXISTS users( `id` INTEGER PRIMARY KEY, nom TEXT, pass TEXT, UNIQUE (pass)) """) cnx.commit() cur.execute("""CREATE TABLE IF NOT EXISTS certification( pass TEXT, certification INTEGER, sans INTEGER, UNIQUE (pass)) """) cnx.commit() try: sql = "REPLACE INTO users (nom, pass) VALUES (?, ?)" cur.execute(sql, (nom, passwd, )) cnx.commit() sql = "REPLACE INTO certification (pass, certification, sans) VALUES (?, ?, ?)" cur.execute(sql, (passwd, certification, sans)) cnx.commit() """ params = { 'action': 'actifP', 'passwd': passwd } cmd = { 'jsonrpc': '2.0', 'method': 'Favourites.AddFavourite', 'params': { 'title': nom, "thumbnail": xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/resources/png/profil.png"), "type":"window", "window":"videos", "windowparameter": __url__ + '?' + urlencode(params) }, 'id': '1' } xbmc.executeJSONRPC(json.dumps(cmd)) """ if debug: showInfoNotification("User: %s créé!!" %nom) except Exception as e: #showInfoNotification(str(e)) showInfoNotification("ECHEC création User!!") cur.close() cnx.close() def importConfigHK(): tabProfil = [] dictCertification = {"Familial":1, "10 ans": 12, "12 ans": 14, "16 ans": 17, "18 ans": 25, "10": 12, "12": 14, "16": 17, "18": 25, "familial":1} dictSans = {"oui": 1, "non": 0, "Oui": 1, "Non": 0} if __addon__.getSetting("rentry"): d = __addon__.getSetting("rentry") numTable, pos, paste = int(d[0]), int(d[1:3]), d[3:] url = "https://rentry.co/%s/raw" %paste dictImport = ast.literal_eval(requests.get(url).content.decode()) for nom, login, certification, autre in dictImport["bookmark"]: #tabProfil.append("%s -- %s -- %s -- %s" %(nom, login, certification, autre)) insertBookmarkHK(nom, login, dictCertification[certification], dictSans[autre], debug=0) try: if dictImport["keyUpto"]: keyUpto = widget.decryptKey(dictImport["keyUpto"], pos, numTable) #showInfoNotification(keyUpto) status, validite = testUptobox(keyUpto) #tabProfil.append("Key Uptobox %s -- %s" % (status, validite)) __addon__.setSetting(id="keyupto", value=keyUpto) except: pass try: if dictImport["keyAlldeb"]: keyAlldeb = widget.decryptKey(dictImport["keyAlldeb"], pos, numTable) #showInfoNotification(keyAlldeb) status, validite = testAlldebrid(keyAlldeb) #tabProfil.append("Key Alldebrid %s -- %s" % (status, validite)) __addon__.setSetting(id="keyalldebrid", value=keyAlldeb) except: pass try: if dictImport["keyRealdeb"]: keyRealdeb = widget.decryptKey(dictImport["keyRealdeb"], pos, numTable) tabProfil.append("Key Realdebrid %s" % keyRealdeb) __addon__.setSetting(id="keyrealdebrid", value=keyRealdeb) except: pass try: if dictImport["keyTMDB"]: keyTMDB = widget.decryptKey(dictImport["TMDB"], pos, numTable) tabProfil.append("Key TMDB %s" % keyTMDB) __addon__.setSetting(id="keyrealdebrid", value=keyTMDB) except: pass try: if dictImport["client_id"]: client_id = widget.decryptKey(dictImport["client_id"], pos, numTable) tabProfil.append("Key Trakt Id %s" % client_id) __addon__.setSetting(id="clientid", value=client_id) except: pass try: if dictImport["client_secret"]: client_secret = widget.decryptKey(dictImport["client_secret"], pos, numTable) tabProfil.append("Key Trakt secret %s" % client_secret) __addon__.setSetting(id="clientsecret", value=client_secret) except: pass try: if dictImport["iptv"]: for fournisseur, macs in dictImport["iptv"].items(): fournisseur, nom = fournisseur.split("=") nbCompte = 0 for mac in macs: nbCompte += iptv.importCompte(fournisseur.strip(), mac, nom.strip()) tabProfil.append("fournisseur %s\nNombre de comptes importés: %d" %(fournisseur, nbCompte)) except: pass try: if dictImport["client_secret"]: client_secret = widget.decryptKey(dictImport["client_secret"], pos, numTable) tabProfil.append("Key Trakt secret %s" % client_secret) __addon__.setSetting(id="clientsecret", value=client_secret) except: pass dialog = xbmcgui.Dialog() dialog.textviewer('Config', "\n".join(tabProfil)) def createPass(): tab = "0123456789AZERTYUIOPMLKJHGFDSQWXVCBN?!azertyuiopmlkjhgfdsqwxcvbn" nb = random.randint(10, 20) tx = "" for i in range(nb): tx += tab[random.randint(0, (len(tab) - 1))] return tx def ajoutProfil(initP=0): dialog = xbmcgui.Dialog() d = dialog.input("Mettre votre pseudo:", type=xbmcgui.INPUT_ALPHANUM) if d: nom = d dialog = xbmcgui.Dialog() d = dialog.input("Mettre ton pass (accés à ton bookmark,\nmettre un pass complexe!!!)", createPass(), type=xbmcgui.INPUT_ALPHANUM) if d: passwd = d dictCertification = {"Familial":1, "10 ans": 12, "12 ans": 14, "16 ans": 17, "18 ans": 25} choix = list(dictCertification.keys()) selected = dialog.select("Certification", choix) if selected != -1: certification = dictCertification[choix[selected]] dialog = xbmcgui.Dialog() tc = dialog.yesno('Certification', "Autoriser la lecture des titres sans certification ?") notice("sans: " + str(tc)) if tc: sans = 1 else: sans = 0 insertBookmarkHK(nom, passwd, certification, sans) if initP: dialog = xbmcgui.Dialog() tc = dialog.yesno('Profil', "Veux-tu activer Bookmark en ligne ? (favoris etc...)") if tc: __addon__.setSetting(id="bookonline", value="true") actifProfil({"passwd": passwd}, menu=0) else: __addon__.setSetting(id="bookonline", value="false") def affProfils(): liste = widget.usersBookmark() xbmcplugin.setPluginCategory(__handle__, "Profils") addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") choix = [(x[0], {"action":"actifP", "passwd": x[1]}, 'special://home/addons/plugin.video.sendtokodiU2P/resources/png/profil.png', "Click pour activer") for x in liste ] isFolder = True for ch in sorted(choix): name, parameters, picture, texte = ch li = xbmcgui.ListItem(label=name) updateMinimalInfoTagVideo(li,name,texte) li.setArt({'thumb': picture, 'icon': addon.getAddonInfo('icon'), 'icon': addon.getAddonInfo('icon'), 'fanart': addon.getAddonInfo('fanart')}) url = sys.argv[0] + '?' + urlencode(parameters) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=li, isFolder=isFolder) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) def actifTrakt(): trk = None if __addon__.getSetting("traktperso") != "false": if __addon__.getSetting("bookonline") != "false": userPass = [x[1] for x in widget.usersBookmark() if x[0] == __addon__.getSetting("profiltrakt")] if userPass and __addon__.getSetting("bookonline_name") == userPass[0]: trk = TraktHK() else: trk = TraktHK() return trk def gestionoc(params): if params["mode"] == "ajout": widget.gestOC(params["u2p"], "ajout") else: widget.gestOC(params["u2p"], "supp") return def supFavHK(params): if __addon__.getSetting("bookonline") != "false": listeM = widget.responseSite("http://%s/requete.php?name=%s&type=favs&media=%s" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), params["typM"])) listeM = [int(x) for x in listeM] else: listeM = list(widget.extractFavs(t=media)) params["mode"] = "sup" for numId in listeM: params["u2p"] = numId gestionFavHK(params) time.sleep(0.1) showInfoNotification("Vider Favoris HK") def gestionFavHK(params): trk = actifTrakt() if params["mode"] == "ajout": typAjout = "add" if __addon__.getSetting("bookonline") != "false": widget.pushSite("http://%s/requete.php?name=%s&type=infavs&media=%s&numid=%s" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), params["typM"], params["u2p"])) else: widget.ajoutFavs(params["u2p"], params["typM"]) else: typAjout = "remove" if __addon__.getSetting("bookonline") != "false": widget.pushSite("http://%s/requete.php?name=%s&type=supfavs&media=%s&numid=%s" %(__addon__.getSetting("bookonline_site"), __addon__.getSetting("bookonline_name"), params["typM"], params["u2p"])) else: widget.supFavs(params["u2p"], params["typM"]) if trk: if params["typM"] == "movies": trk.gestionWatchlist(numId=[int(params["u2p"])], typM="movie", mode=typAjout) else: trk.gestionWatchlist(numId=[int(params["u2p"])], typM="show", mode=typAjout) return def choixRepo(): repReposFilm = xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/xml/movie/") repCatFilm = xbmcvfs.translatePath("special://home/userdata/library/video/movies/") filesRepo = os.listdir(repReposFilm) try: filesCat = os.listdir(repCatFilm) except: showInfoNotification("Installer Library Node Editor") return dialog = xbmcgui.Dialog() repos = dialog.multiselect("Selectionner les repos à installer", [x[:-4].replace("_", " ") for x in filesRepo], preselect=[]) if repos: [xbmcvfs.delete(repCatFilm + x) for x in filesRepo if x in filesCat] for repo in repos: shutil.copy(repReposFilm + filesRepo[repo], repCatFilm + filesRepo[repo]) xbmc.sleep(500) xbmc.executebuiltin('ReloadSkin') def choixliste(): repListes = xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/xsp/") dictRep = {"Listes intélligentes made in %s" %x: [x] for x in os.listdir(repListes)} #dictApi = {"Uptobox": ["keyupto", "Key Api Uptobox"], "Alldebrid": ["keyalldebrid", "Key Api Alldebrid"], "RealDebrid": ["keyrealdebrid", "Key Api RealDebrid"]} dialogApi = xbmcgui.Dialog() choixRep = list(dictRep.keys()) selectedApi = dialogApi.select("Choix Contrib", choixRep) if selectedApi != -1: contrib = dictRep[choixRep[selectedApi]][0] repReposFilm = xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/xsp/%s/" %contrib) repCatFilm = xbmcvfs.translatePath("special://home/userdata/playlists/video/") filesRepo = os.listdir(repReposFilm) filesCat = os.listdir(repCatFilm) dialog = xbmcgui.Dialog() repos = dialog.multiselect("Selectionner les listes à installer", ["Toutes les listes"] + [x[:-4].replace("_", " ") for x in filesRepo], preselect=[]) if repos: [xbmcvfs.delete(repCatFilm + x) for x in filesRepo if x in filesCat] if 0 in repos: repos = range(len(filesRepo) - 1) for repo in repos: shutil.copy(repReposFilm + filesRepo[repo], repCatFilm + filesRepo[repo]) xbmc.sleep(500) xbmc.executebuiltin('ReloadSkin') def testUptobox(key): url = 'https://%s/api/user/me?token=' %__addon__.getSetting("extupto") + key headers = {'Accept': 'application/json'} try: data = requests.get(url, headers=headers).json() status = data["message"] validite = data["data"]["premium_expire"] except: status = "out" validite = "" return status, validite def testAlldebrid(key): url = 'https://api.alldebrid.com/v4/user?agent=myAppName&apikey=' + key #try: data = requests.get(url).json() notice(data) status = data["status"] validate = data["data"]["user"]["premiumUntil"] if int(validate) > 0: validate = datetime.fromtimestamp(validate) else: validate = "Validation expirée" #except: # status = "out" # validate = False return status, validate def assistant(): # key debrideurs a = 0 while a < 5: ApikeyAlldeb, ApikeyRealdeb, ApikeyUpto = getkeyAlldebrid(), getkeyRealdebrid(), getkeyUpto() if not ApikeyUpto and not ApikeyRealdeb and not ApikeyAlldeb: dialog = xbmcgui.Dialog() resume = dialog.yesno('Config', 'As-tu un numéro de config?') if resume: dialog = xbmcgui.Dialog() d = dialog.input("Num config", type=xbmcgui.INPUT_ALPHANUM) if d: __addon__.setSetting("rentry", d) importConfigHK() else: configKeysApi() else: keyOk = False if ApikeyAlldeb: ok, validite = testAlldebrid(ApikeyAlldeb) if ok == "success": keyOk = True showInfoNotification("Key Alldebrid Ok! expire: %s" %validite) else: showInfoNotification("Key Alldebrid out!") __addon__.setSetting(id="keyalldebrid", value="") if ApikeyUpto: ok, validite = testUptobox(ApikeyUpto) if ok == "Success": keyOk = True showInfoNotification("Key Upto ok! expire: %s" %validite) else: showInfoNotification("Key Upto out!") __addon__.setSetting(id="keyupto", value="") if ApikeyRealdeb: keyOk = True if keyOk: break a += 1 if keyOk: if not __addon__.getSetting("numhk"): dialog = xbmcgui.Dialog() d = dialog.input("Num DB HK ex:zmdeo", type=xbmcgui.INPUT_ALPHANUM) if d: __addon__.setSetting("numhk", d) else: return False importDatabase("autonome") return True else: return False def vuMovieTrakt(params): numId = int(params["u2p"]) trk = actifTrakt() trk.gestionWatchedHistory(numId=[numId], typM="movie", mode="add") def correctCertif(params): widget.correctCertif(params["u2p"], params["typM"]) def newUptoPublic(params): folder = __addon__.getSetting("poissFolder") hsh = __addon__.getSetting("poissHash") sql = "SELECT DISTINCT typM FROM repos WHERE repo='poissonnerie' ORDER BY typM" try: tabFiles = uptobox.extractMedias(sql=sql, unique=1) except: tabFiles = [] xbmcplugin.setPluginCategory(__handle__, "Choix U2Pplay") xbmcplugin.setContent(__handle__, 'files') listeChoix = [(x, {"action":"AffCatPoiss", "offset":"0", "typM": x}, "liste.png", "Catégories %s de ma poissonerie !!"%x) for x in tabFiles] listeChoix.append(("Update ou création", {"action":"insertrepo", "repo":"poissonnerie", "folder": folder, "hash": hsh}, "liste.png", "Création ou update de ma poissonnerie!!")) listeChoix.append(("Recherche", {"action":"recherepo", "repo":"poissonnerie"}, "liste.png", "Recherche dans ma poissonnerie!!")) listeChoix.append(("Vider la poissonnerie", {"action":"reinitPoissonnerie"}, "liste.png", "Vider la poissonnerie!!")) for choix in listeChoix: addDirectoryItemLocal(choix[0], isFolder=True, parameters=choix[1], picture=choix[2], texte=choix[3]) xbmcplugin.endOfDirectory(handle=__handle__, succeeded=True) def recherRepo(params): dialog = xbmcgui.Dialog() d = dialog.input("Recherche (mini 3 lettres)", type=xbmcgui.INPUT_ALPHANUM, defaultt="") if len(d) > 2: sql = "SELECT DISTINCT nom, lien FROM repos WHERE repo='poissonnerie' AND (normalizeTitle(title) LIKE normalizeTitle({}) OR normalizeTitle(nom) LIKE normalizeTitle({}) ) ORDER BY id ASC"\ .format("'%" + str(d).replace("'", "''") + "%'", "'%" + str(d).replace("'", "''") + "%'") tab = uptobox.extractMedias(sql=sql) medias = uptobox.ventilationType(tab) uptobox.affUptoboxNews("movie", [x[1:] for x in medias]) def newUptoPublic2(params): limit = __addon__.getSetting("nbupto") offset = int(params["offset"]) typM = params["typM"] if typM == "film": sql = "SELECT DISTINCT numId FROM repos WHERE repo='poissonnerie' AND typM='{}' ORDER BY id ASC LIMIT {} OFFSET {}".format(typM, limit, offset) tab = uptobox.extractMedias(sql=sql, unique=1) tabFiles = [(x, "*".join(uptobox.extractMedias(sql="SELECT lien FROM repos WHERE repo='poissonnerie' AND typM='{}' AND numId={}".format(typM, x), unique=1))) for x in tab] medias = uptobox.getFilmsUptoNews(tabFiles) uptobox.affUptoboxNews("movie", [x[1:] for x in medias], params, cr=1) elif typM == "divers": sql = "SELECT DISTINCT nom FROM repos WHERE repo='poissonnerie' AND typM='{}' ORDER BY id ASC LIMIT {} OFFSET {}".format(typM, limit, offset) tab = uptobox.extractMedias(sql=sql, unique=1) tabFiles = [(x, "*".join(uptobox.extractMedias(sql="SELECT lien FROM repos WHERE repo='poissonnerie' AND typM='{}' AND nom='{}'".format(typM, x.replace("'", "''")), unique=1))) for x in tab] medias = [(0, x[0], "", 0, "", 0, "", "", x[1], "", 0, 0) for x in tabFiles] uptobox.affUptoboxNews("movie", [x[1:] for x in medias], params, cr=1) elif typM == "serie": sql = "SELECT DISTINCT numId FROM repos WHERE repo='poissonnerie' AND typM='{}' ORDER BY id ASC LIMIT {} OFFSET {}".format(typM, limit, offset) tab = uptobox.extractMedias(sql=sql, unique=1) medias = uptobox.getSeriesUptoNews(tab) uptobox.affUptoboxNewsSerie("movie", [x[1:] for x in medias], params,) def affSaisonUptoPoiss(params): numId = params["u2p"] sql = "SELECT DISTINCT saison FROM repos WHERE repo='poissonnerie' AND numId={} and typM='serie' ORDER BY saison".format(numId) tabFiles = uptobox.extractMedias(sql=sql, unique=1) params["tabsaison"] = "*".join([str(x) for x in tabFiles]) uptobox.loadSaisonsUpto(params) def visuEpisodesUptoPoiss(params): numId = params["u2p"] saison = params["saison"] sql = "SELECT DISTINCT episode FROM repos WHERE repo='poissonnerie' AND typM='serie' AND saison={} AND numId={} ORDER BY episode".format(saison, numId) tab = uptobox.extractMedias(sql=sql, unique=1) tabFiles = [(x, "*".join(uptobox.extractMedias(sql="SELECT lien FROM repos WHERE repo='poissonnerie' AND typM='serie' AND numId={} AND saison={} AND episode={} "\ .format(numId, saison, x), unique=1))) for x in tab] if "release" in params.keys(): uptobox.affEpisodesUptoPoiss(numId, saison, tabFiles, params["release"]) else: uptobox.affEpisodesUptoPoissRelease(numId, saison, tabFiles) def majHkcron(): threading.Thread(name="maj", target=scraperUPTO.majHkNewStart).start() def intmajbann15(): #init addon addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") #Change valeur addon.setSetting(id="intmaj", value="15") addon.setSetting(id="delaimaj", value="0") #recup valeur intmaj intmaj = addon.getSetting("intmaj") #recup delaimaj delaimaj = addon.getSetting("delaimaj") # si vide if intmaj: dialog = xbmcgui.Dialog() d = dialog.input("Intervalle des Maj en minutes: [0|5|15|30|45|60|120|240]", type=xbmcgui.INPUT_ALPHANUM) if d: intmaj = d addon.setSetting(id="intmaj", value=d.strip()) else: return # si delaimaj vide if delaimaj: dialog = xbmcgui.Dialog() d = dialog.input("Délai de la 1ère Maj en minutes: Au démarrage de Kodi", type=xbmcgui.INPUT_ALPHANUM) if d: delaimaj = d addon.setSetting(id="delaimaj", value=d.strip()) else: return #notice(intmaj) #notice(delaimaj) showInfoNotification(intmaj + " " + delaimaj) def rskin2(): #init addon addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") #Change valeur addon.setSetting(id="rskin", value="true") def rskin3(): #init addon addon = xbmcaddon.Addon("plugin.video.sendtokodiU2P") #Change valeur addon.setSetting(id="rskin", value="false") def importBDhk3(): cr = Crypt() filecode = __addon__.getSetting("numdatabase") hebergDB = __addon__.getSetting("hebergdb") if len(filecode) == 12: link = cr.urlBase + filecode else: link = cr.url + "/?" + filecode #notice(link) ApikeyAlldeb = getkeyAlldebrid() ApikeyRealdeb = getkeyRealdebrid() Apikey1fichier = getkey1fichier() ApikeyDarkibox = __addon__.getSetting("keydarkibox") if ApikeyAlldeb: linkD, ok = cr.resolveLink(link, ApikeyAlldeb) else: if hebergDB == "1Fichier": linkD, ok = cr.resolveLink(link, Apikey1fichier) else: dictLiens = cr.debridDarkibox(link.split("/")[-1], ApikeyDarkibox) linkD = dictLiens["o"][0] #notice(linkD) r = requests.get(linkD, timeout=3) open(os.path.join(__repAddonData__, "combine.bd"), 'wb').write(r.content) time.sleep(0.2) loadhk3.joinBlocker() showInfoNotification("import et fusion BD Ok...") def gestiondbhk3(): cr = Crypt() Apikey1fichier = getkey1fichier() ApikeyDarkibox = __addon__.getSetting("keydarkibox") hebergDB = __addon__.getSetting("hebergdb") chemin = xbmcvfs.translatePath("special://home/userdata/addon_data/plugin.video.sendtokodiU2P/") bd = "mediasNew.bd" bdSauve = "mediasNewSauve.bd" dialog = xbmcgui.Dialog() ret = dialog.yesno('Gestion', 'Opération ?', nolabel="Sauvegarde", yeslabel="Restauration") if not ret: ret = dialog.yesno('Sauvegarde', 'type ?', nolabel="Local + 1fichier/darkibox", yeslabel="Local") if not ret: #compte xbmcvfs.copy(os.path.join(chemin, bd), os.path.join(chemin, bdSauve)) if hebergDB == "1Fichier": url = cr.upload1fichier(os.path.join(chemin, bd), Apikey1fichier) numDB = url.split("?")[1].split("/")[-1] else: numDB = cr.uploadDarkibox(os.path.join(chemin, bd), ApikeyDarkibox) #notice(numDB) __addon__.setSetting(id="numdatabase", value=numDB) showInfoNotification("Sauvegarde Ok...") else: xbmcvfs.copy(os.path.join(chemin, bd), os.path.join(chemin, bdSauve)) showInfoNotification("Sauvegarde Ok...") else: if os.path.isfile(os.path.join(chemin, bdSauve)): xbmcvfs.copy(os.path.join(chemin, bdSauve), os.path.join(chemin, bd)) showInfoNotification("Restauration Ok...") else: showInfoNotification("Restauration Ko , fait sauvegarde avant...") def detailmediatheque(): sql = "SELECT COUNT(*) FROM filmsPub" nbFilms = createbdhk.extractMedias(sql=sql) sql = "SELECT COUNT(*) FROM seriesPub" nbSeries = createbdhk.extractMedias(sql=sql) showInfoNotification("%d type film, %d type serie" %(nbFilms[0][0], nbSeries[0][0])) def router(paramstring): params = dict(parse_qsl(paramstring)) dictActions = { # player 'play': (playMedia, params), 'playHK': (playMediaHK, params), 'playHKEpisode': (playEpisode, params), # u2p local 'os': (selectOS, ""), 'apiConf': (configKeysApi, ""), 'clearStrms': (makeStrms, 1), 'ePaste': (editPaste, ""), 'groupe': (creaGroupe, ""), # config kodi 'thmn': (editNbThumbnails, ""), 'resos': (editResos, ''), 'rlk': (reloadSkin, ""), # database 'bd': (importDatabase, ""), 'bdauto': (importDatabase, "autonome"), 'maj': (majDatabase, ""), 'delDta': (delDATABASE, ""), 'patch': (patchNextUp, ""), 'bdepg': (importDatabase, "epg"), # listes 'choixrepo': (choixRepo, ""), 'choixliste': (choixliste, ""), 'createL': (createWidg, ""), 'supL': (supWidg, ""), 'createLV': (createListeV, ""), 'createLT': (createListeT, ""), 'suppLV': (widget.suppListeTV, ""), 'suppLT': (widget.suppListeT, ""), "affTraktPerso": (affTrakt, ""), "createLP": (createListeLP, ""), "affPastebin": (affPastebin, ""), 'suppLP': (widget.suppListeLP, ""), "createLTMDB": (createLTMDB, ""), "affTmdb": (affTmdb, ""), "suppLTMDB": (widget.suppLTMDB, ""), "createRUPTO": (createRUPTO, ""), "createRUPTOP": (createRUPTOP, ""), "suppLUPTO": (widget.suppLUPTO, ""), # HK 'MenuFilm': (mediasHK, ""), 'MenuDivers': (diversHK, ""), 'MenuSerie': (seriesHK, ""), 'detailM': (detailsMedia, params), 'detailT': (detailsTV, params), "afficheLiens": (affLiens2, params), "suggest": (loadSimReco2, params), "affActeurs": (affCast2, params), "ba": (getBa, params), "visuEpisodes": (affEpisodes2, params), "filtres": (filtres, params), 'movies': (ventilationHK, ""), 'supView': (supView, params), 'fav': (gestionFavHK, params), "visuFenmovie": (fenMovie, params), "genres": (genres, params), "impHK": (importConfigHK, ""), "gestionMedia": (gestionMedia, params), 'MenuTrakt': (traktHKventilation, ""), "vuMovieTrakt": (vuMovieTrakt, params), "affSearch": (affSearch, ""), "affGlobal": (affGlobal, ""), "affSearchCast": (affSearchCast, ""), "correctCertif": (correctCertif, params), "affUpto": (affUpto, ""), "loadUpto": (uptobox.loadUpto, params), "loadUptoP": (uptobox.loadUptoP, params), "newUpto": (uptobox.newUpto, params), "playMediaUptobox": (uptobox.playMediaUptobox, params), "rechercheUpto": (uptobox.searchUpto, params), "loadUptoSerie": (uptobox.loadSeriesUpto, params), "affSaisonUpto": (uptobox.loadSaisonsUpto, params), "visuEpisodesUpto": (uptobox.affEpisodesUpto, params), "affAlldeb": (affAlldeb, ""), "magnets": (uptobox.magnets, params), "histoupto": (uptobox.getHistoUpto, __database__), "listeAll": (uptobox.listeAllded, params), "affNewsUpto": (newUptoPublic, params), "addcompte": (addCompteUpto, params), "AffCatPoiss": (newUptoPublic2, params), "affSaisonUptoPoiss": (affSaisonUptoPoiss, params), "visuEpisodesUptoPoiss": (visuEpisodesUptoPoiss, params), "delcompte": (delcompte, params), "affdetailfilmpoiss": (uptobox.detailFilmPoiss, params), "cryptFolder": (scraperUPTO.cryptFolder, ""), "affGlobalHK2": (createbdhk.affGlobal, ""), "feninfo": (fenInfo, params), "delView": (delView, params), "supFavHK": (supFavHK, params),'MenuFilmHK': (createbdhk.mediasHKFilms, params), 'MenuSerieHK': (createbdhk.mediasHKSeries, params), #strm 'strms': (makeStrms, ""), "strmSelectWidget" : (configureSTRM,""), 'strmsc': (makeStrms, 1), #profils 'ajoutP': (ajoutProfil, ""), 'choixP': (choixProfil, ""), 'suppP': (suppProfil, ""), 'actifP': (actifProfil, params), 'actifPm': (choixProfil, 1), "affProfils":( affProfils, ""), "assistant": (assistant, ""), #audiobook 'MenuAudio':(audioHK, ""), #skin 'choixskin': (choixSkin, ""), # repo "insertrepo": (scraperUPTO.createRepo, params), "recherepo": (recherRepo, params), "gestionMajRepSerie": (scraperUPTO.updateSeriesRepCR, ""), "gestionMajRep": (uptobox.majRepsFilms, ""), "folderPubDetails": (createbdhk.detailsmenuRepCrypte, params), "folderPub": (uptobox.loadFoldersPub, params),"affFoldercryptDivers": (uptobox.loadFolderCryptDivers, params), "reinitPoissonnerie": (scraperUPTO.reinitPoissonnerie, ''), #pastebin "pastepastebin": (createbdhk.menu, ""), "repopastebin": (createbdhk.createRepo, ""), "affRepoPaste": (createbdhk.affRepo, params), "affSaisonPastebin": (createbdhk.affSaisonPastebin, params), "visuEpisodesPastebin": (createbdhk.visuEpisodesPastebin, params), "folderPastebin": (scraperUPTO.ajoutFoldercr, params), "menuPastebin": (createbdhk.menuPastebin, ""), "menuRepCrypte": (createbdhk.menuRepCrypte, ""), #newHK "mediasHKFilms": (createbdhk.mediasHKFilms, params), "majHkNew": (scraperUPTO.majHkNew, ''), "genresHK": (createbdhk.genresHK, params), "mediasHKSeries": (createbdhk.mediasHKSeries, params), "suiteSerieHK": (createbdhk.suiteSerie, params), "suiteSerieHK2": (createbdhk.suiteSerie2, ""), "lockRepHK": (uptobox.lockRep, params), "tmdbSerie": (createbdhk.tmdbSerie, params), "majhkneww": (scraperUPTO.majHkNew, ''), "findf": (createbdhk.rechercheFilm, params), "findss": (createbdhk.rechercheSerie, params), "findc": (affSearchCast, params), "mepautostart": (mepAutoStart, ""), "affbaext": (createbdhk.affbaext, ""), "affbacat": (createbdhk.affbacat, params), "playMediabaext": (uptobox.playMediabaext, params), "affbacattmdb": (createbdhk.affbacattmdb, params), "updateba": (importDatabase, "ba"), "rskin2": (rskin2, ''), "rskin3": (rskin3, ''), "intmajbann15": (intmajbann15, ''), "majhkcron": (majHkcron, ''), "mepautostart2": (mepAutoStart2, ""), #hk3 "loadhk3": (loadhk3.getLinks, ""), "resetBDhkNew":(loadhk3.resetBdFull, ""), "affSaisonUptofoldercrypt": (uptobox.loadSaisonsHK3, params), "visuEpisodesFolderCrypt": (uptobox.affEpisodesHK3, params), "loaddbhk3": (importBDhk3, ""), "suiteSerie": (suiteSerie, ""), 'vuNonVu': (gestionVuSaison, params), "gestiondb": (gestiondbhk3, ""), "loadhk3v": (loadhk3.getLinks, 1), "detailmediatheque": (detailmediatheque, ""), } if vIPTV: dictActionsIPTV = { "iptvLoad": (iptv.menu, ""), "affChaine": (iptv.affChaines, params), "playMediaIptv": (iptv.playMedia, params), "ajoutIPTV": (iptv.ajoutIPTV, ""), "loadF": (iptv.menuFournisseur, params), "activemac": (iptv.activeMac, params), "gestfourn": (iptv.gestfourn, params), "lock": (iptv.lock, params), "affepgChann": (iptv.affepgChann, params), "mapepg": (iptv.mapEpg, params), "gestFuseau": (iptv.gestFuseau, params), "getVod": (iptv.getVodSeries, params), "affVod": (iptv.affVod, params), "gestfournVod": (iptv.gestfournVod, params), "affEpisodes": (iptv.affEpisodes, params), "retireriptv": (iptv.retireriptv, ""), "delDB": (iptv.removeDB, ""), "IPTVbank":(iptv.IPTVbank, ""), "addFavIptv": (iptv.addFavIptv, params), "IPTVfav": (iptv.IPTVfav, ""), "iptvsupfav": (iptv.supfavIptv, params), "iptvdepfav": (iptv.iptvdepfav, params), "iptvreplay": (iptv.replay, params), "loadFX": (iptv.loadX, params), "affChainex": (iptv.affChainesx, params), "fepgx": (iptv.forceMajEpgX, ""), "menus": (iptv.menuStalker, ""), "menux": (iptv.menuXtream, ""), "loadFTV": (iptv.load, params), "searchVod": (iptv.searchVod, params), "searchVodf": (iptv.searchVod2, params), "loadXitv": (iptv.loadXitv, params), "loadXvod": (iptv.loadXvod, params), "affVodx": (iptv.affVodx, params)} dictActions.update(dictActionsIPTV) notice(len(dictActions)) if params: fn = params['action'] if fn in dictActions.keys(): argv = dictActions[fn][1] if argv: dictActions[fn][0](argv) else: dictActions[fn][0]() elif fn == 'setting': xbmcaddon.Addon().openSettings() else: raise ValueError('Invalid paramstring: {0}!'.format(paramstring)) else: menuPbi() if __name__ == '__main__': nameExploit = sys.platform __addon__ = xbmcaddon.Addon("plugin.video.sendtokodiU2P") #notice(nameExploit) # Get the plugin url in plugin:// notation. __url__ = sys.argv[0] # Get the plugin handle as an integer number. __handle__ = int(sys.argv[1]) # database video kodi bdKodis = ["MyVideos119.db", "MyVideos121.db", "MyVideos122.db", "MyVideos123.db"] for bdKodi in bdKodis: if os.path.isfile(xbmcvfs.translatePath("special://home/userdata/Database/%s" %bdKodi)): __database__ = xbmcvfs.translatePath("special://home/userdata/Database/%s" %bdKodi) #break #Deprecated xbmc.translatePath. Moved to xbmcvfs.translatePath __repAddon__ = xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/") __repAddonData__ = xbmcvfs.translatePath("special://home/userdata/addon_data/plugin.video.sendtokodiU2P") __keyTMDB__ = getkeyTMDB() __params__ = dict(parse_qsl(sys.argv[2][1:])) # # assistant """ if __addon__.getSetting("actifhk") != "false": if not os.path.exists(xbmcvfs.translatePath('special://home/userdata/addon_data/plugin.video.sendtokodiU2P')): os.makedirs(xbmcvfs.translatePath('special://home/userdata/addon_data/plugin.video.sendtokodiU2P')) plusProfil = False if not os.path.isfile(xbmcvfs.translatePath("special://home/addons/plugin.video.sendtokodiU2P/medias.bd")): widget.initBookmark() if assistant(): plusProfil = True else: sys.exit() if plusProfil: liste = widget.usersBookmark() if not liste: ajoutProfil(initP=1) xbmcgui.Dialog().ok("Configuration" , "Config Ok !!\nUn petit merci aux contributeurs est toujours le bienvenu\nBon film....") """ if not os.path.isfile(__repAddon__ + "service.txt"): with open(__repAddon__ + "service.txt", "w") as f: mepAutoStart2() createFav() #notice(pyVersion) #notice(pyVersionM) #xbmc.executebuiltin("InstallFromZip") #notice(sys.version_info) #notice(__url__) #notice(__handle__) router(sys.argv[2][1:]) #Setting most video properties through ListItem.setInfo() is deprecated and might be removed in future Kodi versions. Please use the respective setter in InfoTagVideo.
osmoze06/repo.weebox
repo/plugin.video.sendtokodiU2P/service.py
service.py
py
291,195
python
en
code
2
github-code
6
[ { "api_name": "sys.version_info", "line_number": 54, "usage_type": "attribute" }, { "api_name": "sys.version_info", "line_number": 55, "usage_type": "attribute" }, { "api_name": "xbmcvfs.translatePath", "line_number": 82, "usage_type": "call" }, { "api_name": "xbm...
36621325200
import pygame import sys from moviepy.editor import VideoFileClip from PIL import Image pygame.init() music_background = pygame.mixer.music.load("assets/LostCompanionTomboFry.mp3") pygame.mixer.music.play() pygame.mixer.music.set_volume(0.2) lar = 550 hut = 700 screen = pygame.display.set_mode((lar, hut)) pygame.display.set_caption("Menu") gif_path = "assets/bg.gif" clip = VideoFileClip(gif_path) fps = clip.fps frames = [] for t in range(0, int(clip.duration * fps)): frame = clip.get_frame(t / fps) pil_image = Image.fromarray((frame * 255).astype('uint8')) pil_image = pil_image.resize((lar, hut)) pygame_image = pygame.image.fromstring(pil_image.tobytes(), pil_image.size, pil_image.mode) frames.append(pygame_image) # Carregar recursos do menu antecipadamente fonte = pygame.font.Font(None, 30) texto_play = fonte.render("Play", True, (0, 0, 0)) texto_quit = fonte.render("Quit", True, (0, 0, 0)) Title = fonte.render("Pythongoras-Game", True, (255, 255, 255)) def mostrar_menu(): frame_index = 0 clock = pygame.time.Clock() while True: for evento in pygame.event.get(): if evento.type == pygame.QUIT: pygame.quit() sys.exit() elif evento.type == pygame.MOUSEBUTTONDOWN: if batom_play.collidepoint(evento.pos): pygame.time.delay(100) iniciar_jogo() elif batom_quit.collidepoint(evento.pos): pygame.quit() sys.exit() screen.blit(frames[frame_index], (0, 0)) batom_Title = pygame.Rect(190, 100 + 50, 150, 50) pos_text_Title = Title.get_rect(center=batom_Title.center) screen.blit(Title, pos_text_Title) batom_play = pygame.Rect(lar/2 - 75, hut/2 + 50, 150, 50) pygame.draw.rect(screen, (255, 255, 255), batom_play) pos_text_play = texto_play.get_rect(center=batom_play.center) screen.blit(texto_play, pos_text_play) if batom_play.collidepoint(pygame.mouse.get_pos()): pygame.draw.rect(screen, (200, 200, 200), batom_play) batom_quit = pygame.Rect(lar/2 - 75, hut/2 + 140, 150, 50) pygame.draw.rect(screen, (255, 255, 255), batom_quit) pos_text_quit = texto_quit.get_rect(center=batom_quit.center) screen.blit(texto_quit, pos_text_quit) if batom_quit.collidepoint(pygame.mouse.get_pos()): pygame.draw.rect(screen, (200, 200, 200), batom_quit) pygame.display.flip() frame_index = (frame_index + 1) % len(frames) clock.tick(fps) def iniciar_jogo(): print("O jogo começou!") import Chose mostrar_menu()
RuFiripo/Pythongoras-Game
menu.py
menu.py
py
2,712
python
en
code
0
github-code
6
[ { "api_name": "pygame.init", "line_number": 6, "usage_type": "call" }, { "api_name": "pygame.mixer.music.load", "line_number": 8, "usage_type": "call" }, { "api_name": "pygame.mixer", "line_number": 8, "usage_type": "attribute" }, { "api_name": "pygame.mixer.music...
73765860989
import asyncio import collections import contextlib import datetime import functools import io import multiprocessing import multiprocessing.pool import os import signal import tempfile from aiohttp import web import marshmallow from oslo_config import cfg from oslo_log import log LOG = log.getLogger(__name__) CONF = cfg.CONF UploadedFile = collections.namedtuple( "UploadedFile", ("name", "filename", "content_type", "original_filename") ) """Class to hold uploaded field metadata when passed to model's methods .. py:attribute:: name Name of the argument where this file is being sent. .. py:attribute:: filename Complete file path to the temporary file in the filesystem, .. py:attribute:: content_type Content-type of the uploaded file .. py:attribute:: original_filename Filename of the original file being uploaded. """ ReturnedFile = collections.namedtuple( "ReturnedFile", ("name", "filename", "content_type", "original_filename") ) """Class to pass the files returned from predict in a pickable way .. py:attribute:: name Name of the argument where this file is being sent. .. py:attribute:: filename Complete file path to the temporary file in the filesystem, .. py:attribute:: content_type Content-type of the uploaded file .. py:attribute:: original_filename Filename of the original file being uploaded. """ # set defaults to None, mainly for compatibility (vkoz) UploadedFile.__new__.__defaults__ = (None, None, None, None) ReturnedFile.__new__.__defaults__ = (None, None, None, None) class ModelWrapper(object): """Class that will wrap the loaded models before exposing them. Whenever a model is loaded it will be wrapped with this class to create a wrapper object that will handle the calls to the model's methods so as to handle non-existent method exceptions. :param name: Model name :param model: Model object :raises HTTPInternalServerError: in case that a model has defined a response schema that is not JSON schema valid (DRAFT 4) """ def __init__(self, name, model_obj, app=None): self.name = name self.model_obj = model_obj self._app = app self._loop = asyncio.get_event_loop() self._workers = CONF.workers self._executor = self._init_executor() if self._app is not None: self._setup_cleanup() schema = getattr(self.model_obj, "schema", None) if isinstance(schema, dict): try: schema = marshmallow.Schema.from_dict( schema, name="ModelPredictionResponse" ) self.has_schema = True except Exception as e: LOG.exception(e) raise web.HTTPInternalServerError( reason=("Model defined schema is invalid, " "check server logs.") ) elif schema is not None: try: if issubclass(schema, marshmallow.Schema): self.has_schema = True except TypeError: raise web.HTTPInternalServerError( reason=("Model defined schema is invalid, " "check server logs.") ) else: self.has_schema = False self.response_schema = schema def _setup_cleanup(self): self._app.on_cleanup.append(self._close_executors) async def _close_executors(self, app): self._executor.shutdown() def _init_executor(self): n = self._workers executor = CancellablePool(max_workers=n) return executor @contextlib.contextmanager def _catch_error(self): name = self.name try: yield except AttributeError: raise web.HTTPNotImplemented( reason=("Not implemented by underlying model (loaded '%s')" % name) ) except NotImplementedError: raise web.HTTPNotImplemented( reason=("Model '%s' does not implement this functionality" % name) ) except Exception as e: LOG.error( "An exception has happened when calling method on " "'%s' model." % name ) LOG.exception(e) if isinstance(e, web.HTTPException): raise e else: raise web.HTTPInternalServerError(reason=e) def validate_response(self, response): """Validate a response against the model's response schema, if set. If the wrapped model has defined a ``response`` attribute we will validate the response that :param response: The response that will be validated. :raises exceptions.InternalServerError: in case the reponse cannot be validated. """ if self.has_schema is not True: raise web.HTTPInternalServerError( reason=( "Trying to validate against a schema, but I do not " "have one defined" ) ) try: self.response_schema().load(response) except marshmallow.ValidationError as e: LOG.exception(e) raise web.HTTPInternalServerError( reason="ERROR validating model response, check server logs." ) except Exception as e: LOG.exception(e) raise web.HTTPInternalServerError( reason="Unknown ERROR validating response, check server logs." ) return True def get_metadata(self): """Obtain model's metadata. If the model's metadata cannot be obtained because it is not implemented, we will provide some generic information so that the call does not fail. :returns dict: dictionary containing model's metadata """ try: d = self.model_obj.get_metadata() except (NotImplementedError, AttributeError): d = { "id": "0", "name": self.name, "description": ( "Could not load description from " "underlying model (loaded '%s')" % self.name ), } return d def _run_in_pool(self, func, *args, **kwargs): fn = functools.partial(func, *args, **kwargs) ret = self._loop.create_task(self._executor.apply(fn)) return ret async def warm(self): """Warm (i.e. load, initialize) the underlying model. This method is called automatically when the model is loaded. You should use this method to initialize the model so that it is ready for the first prediction. The model receives no arguments. """ try: func = self.model_obj.warm except AttributeError: LOG.debug("Cannot warm (initialize) model '%s'" % self.name) return try: n = self._workers LOG.debug("Warming '%s' model with %s workers" % (self.name, n)) fs = [self._run_in_pool(func) for _ in range(0, n)] await asyncio.gather(*fs) LOG.debug("Model '%s' has been warmed" % self.name) except NotImplementedError: LOG.debug("Cannot warm (initialize) model '%s'" % self.name) @staticmethod def predict_wrap(predict_func, *args, **kwargs): """Wrapper function to allow returning files from predict This wrapper exists because buffer objects are not pickable, thus cannot be returned from the executor. """ ret = predict_func(*args, **kwargs) if isinstance(ret, io.BufferedReader): ret = ReturnedFile(filename=ret.name) return ret def predict(self, *args, **kwargs): """Perform a prediction on wrapped model's ``predict`` method. :raises HTTPNotImplemented: If the method is not implemented in the wrapper model. :raises HTTPInternalServerError: If the call produces an error :raises HTTPException: If the call produces an error, already wrapped as a HTTPException """ for key, val in kwargs.items(): if isinstance(val, web.FileField): fd, name = tempfile.mkstemp() fd = os.fdopen(fd, "w+b") fd.write(val.file.read()) fd.close() aux = UploadedFile( name=val.name, filename=name, content_type=val.content_type, original_filename=val.filename, ) kwargs[key] = aux # FIXME(aloga); cleanup of tmpfile here with self._catch_error(): return self._run_in_pool( self.predict_wrap, self.model_obj.predict, *args, **kwargs ) def train(self, *args, **kwargs): """Perform a training on wrapped model's ``train`` method. :raises HTTPNotImplemented: If the method is not implemented in the wrapper model. :raises HTTPInternalServerError: If the call produces an error :raises HTTPException: If the call produces an error, already wrapped as a HTTPException """ with self._catch_error(): return self._run_in_pool(self.model_obj.train, *args, **kwargs) def get_train_args(self): """Add training arguments into the training parser. :param parser: an argparse like object This method will call the wrapped model ``add_train_args``. """ try: args = self.model_obj.get_train_args() except (NotImplementedError, AttributeError): args = {} return args def get_predict_args(self): """Add predict arguments into the predict parser. :param parser: an argparse like object This method will call the wrapped model ``get_predict_args``. """ try: args = self.model_obj.get_predict_args() except (NotImplementedError, AttributeError): args = {} return args class NonDaemonProcess(multiprocessing.context.SpawnProcess): """Processes must use 'spawn' instead of 'fork' (which is the default in Linux) in order to work CUDA [1] or Tensorflow [2]. [1] https://pytorch.org/docs/stable/notes/multiprocessing.html #cuda-in-multiprocessing [2] https://github.com/tensorflow/tensorflow/issues/5448 #issuecomment-258934405 """ @property def daemon(self): return False @daemon.setter def daemon(self, value): pass class NonDaemonPool(multiprocessing.pool.Pool): # Based on https://stackoverflow.com/questions/6974695/ def Process(self, *args, **kwds): # noqa proc = super(NonDaemonPool, self).Process(*args, **kwds) proc.__class__ = NonDaemonProcess return proc class CancellablePool(object): def __init__(self, max_workers=None): self._free = {self._new_pool() for _ in range(max_workers)} self._working = set() self._change = asyncio.Event() def _new_pool(self): return NonDaemonPool(1, context=multiprocessing.get_context("spawn")) async def apply(self, fn, *args): """ Like multiprocessing.Pool.apply_async, but: * is an asyncio coroutine * terminates the process if cancelled """ while not self._free: await self._change.wait() self._change.clear() pool = usable_pool = self._free.pop() self._working.add(pool) loop = asyncio.get_event_loop() fut = loop.create_future() def _on_done(obj): ret = {"output": obj, "finish_date": str(datetime.datetime.now())} loop.call_soon_threadsafe(fut.set_result, ret) def _on_err(err): loop.call_soon_threadsafe(fut.set_exception, err) pool.apply_async(fn, args, callback=_on_done, error_callback=_on_err) try: return await fut except asyncio.CancelledError: # This is ugly, but since our pools only have one slot we can # kill the process before termination try: pool._pool[0].kill() except AttributeError: os.kill(pool._pool[0].pid, signal.SIGKILL) pool.terminate() usable_pool = self._new_pool() finally: self._working.remove(pool) self._free.add(usable_pool) self._change.set() def shutdown(self): for p in self._working: p.terminate() self._free.clear()
indigo-dc/DEEPaaS
deepaas/model/v2/wrapper.py
wrapper.py
py
12,808
python
en
code
31
github-code
6
[ { "api_name": "oslo_log.log.getLogger", "line_number": 19, "usage_type": "call" }, { "api_name": "oslo_log.log", "line_number": 19, "usage_type": "name" }, { "api_name": "oslo_config.cfg.CONF", "line_number": 21, "usage_type": "attribute" }, { "api_name": "oslo_co...
8407981184
from abc import ABC, abstractmethod import threading import boto3 import botocore import sys import logging import logging.config from enum import Enum from itertools import cycle from botocore.config import Config from botocore.endpoint import MAX_POOL_CONNECTIONS from collections.abc import Iterable class AWS_SVC_BASE(ABC): ''' Represent an AWS service that contain multiple resources(workers) ''' aws_config = Config( retries=dict( total_max_attempts=25, mode='adaptive' ), max_pool_connections=MAX_POOL_CONNECTIONS, ) def __init__(self, svc_type, session, svc_config): if not isinstance(session, boto3.Session): logging.error('session must be of type boto3.Session') raise(ValueError) if not isinstance(svc_type, AWS_SVC_TYPE): logging.error('svc_type must be of type AWS_SVC_TYPE') raise(ValueError) if not isinstance(svc_config, dict): logging.error('svc_config must be of type AWS_SVC_TYPE') raise(ValueError) self.session = session self.account_id = 0 self.service_type = svc_type self.svc_config = svc_config self.rsc_prefix = svc_config['resource_prefix'] self._key_lock = threading.Lock() self.worker_cycle = cycle(list()) super().__init__() @abstractmethod def get_existing_workers(self): ''' Query the existing workers based on the rsc_prefix ''' # pass @abstractmethod def create_workers(self): ''' Create workers/resources of this service ''' # pass @abstractmethod def delete_workers(self): ''' Delete the workers created by create_workers() function ''' # pass @abstractmethod def _check_existing_identity(self, identiy_arn): ''' Check if identiy_arn exists in AWS ''' # pass def check_existing_user(self, aws_id, target_user, aws_partition = 'aws'): ''' Check if the target_user exists in AWS account aws_id ''' user_arn = 'arn:{}:iam::{}:user/{}'.format(aws_partition, aws_id, target_user) return self._check_existing_identity(user_arn) def check_existing_role(self, aws_id, target_role, aws_partition = 'aws'): ''' Check if the target_role exists in AWS account aws_id ''' role_arn = 'arn:{}:iam::{}:role/{}'.format(aws_partition, aws_id, target_role) return self._check_existing_identity(role_arn) def precheck(self): ''' Check if there is at least one resrouce to perform the test ''' # If no object is in the cycle, the default value None will be returned if next(self.worker_cycle, None) is None: return False return True def _get_next_worker(self): with self._key_lock: try: return next(self.worker_cycle) except StopIteration: logging.error('Empty worker cycle') return None def _set_worker_cycle(self, iterable_obj): if not isinstance(iterable_obj, Iterable): logging.error('set_worker_cycle function expects an Iterable input') return self.worker_cycle = cycle(iterable_obj) def _check_boto3_response(self, resp): return 'ResponseMetadata' in resp and resp['ResponseMetadata']['HTTPStatusCode'] >= 200 and resp['ResponseMetadata']['HTTPStatusCode'] < 300 def _enable_logging(self): logging.config.dictConfig({ 'version': 1, 'disable_existing_loggers': True, }) logging.basicConfig(level=logging.DEBUG, format='%(module)s: %(message)s') class AWS_SVC_TYPE(Enum): IAM = 'iam' S3 = 's3' KMS = 'kms' SQS = 'sqs'
prisma-cloud/IAMFinder
aws_svc/aws_service_base.py
aws_service_base.py
py
3,839
python
en
code
102
github-code
6
[ { "api_name": "abc.ABC", "line_number": 14, "usage_type": "name" }, { "api_name": "botocore.config.Config", "line_number": 16, "usage_type": "call" }, { "api_name": "botocore.endpoint.MAX_POOL_CONNECTIONS", "line_number": 21, "usage_type": "name" }, { "api_name": ...
42739931950
import os import pickle import shutil import numpy as np from tqdm import tqdm import time class ModelManager: ''' Model manager is designed to load and save all models No matter what dataset name. ''' path_name = './checkpoints/' @classmethod def __init__(cls, cfg): if not cfg.MODEL.TRAINING and cfg.PATH.MODEL_PATH is not None: cls.path_name = cfg.PATH.MODEL_PATH elif cfg.MODEL.TRAINING and cfg.MODEL.MODEL_NAME: cls.path_name += cfg.MODEL.MODEL_NAME+"-"+ time.strftime("%Y_%m_%d__%H_%M_%S", time.localtime()) +'/' cfg.PATH.MODEL_PATH = cls.path_name else: raise Exception('Model path initialization error, please check your config.py') def save_model(self, model, model_name): ''' Save model to model/ dir :param model: model to be saved :param model_name: model name :return: None ''' if 'pkl' not in model_name: model_name += '.pkl' if not os.path.exists('checkpoints'): os.makedirs('checkpoints') if not os.path.exists( self.path_name): os.makedirs(self.path_name) pickle.dump(model,open(self.path_name+model_name,'wb')) def save_config(self,cfg): ''' Save config to model/ dir as yaml file :param cfg: config :return: None ''' if not os.path.exists(self.path_name): os.makedirs(self.path_name) cfg.PATH.CONFIG_PATH = self.path_name+'config.yaml' with open(self.path_name+'config.yaml','w') as f: f.write(cfg.dump()) def load_model(self, model_name): ''' load model from model/ dir :param model_name: model name :return: model ''' if 'pkl' not in model_name: model_name += '.pkl' if not os.path.exists(self.path_name+model_name): raise Exception('Model not found %s'%(self.path_name+model_name)) return pickle.load(open(self.path_name+model_name,'rb')) def save_test_result(self,test_result): ''' Save test result to model/ dir :param test_result: test result, as txt file :return: None ''' if not os.path.exists(self.path_name): os.makedirs(self.path_name) with open(self.path_name+'test_result.txt','w') as f: for item in test_result: f.write(str(item)+'\n') @staticmethod def clean_workspace(): ''' clean model/ dir :return: None ''' if os.path.exists('checkpoints'): shutil.rmtree('checkpoints') def get_time_cost(begin_time, end_time): ''' get the time cost :param begin_time: the start time :param end_time: the end time :return: the time cost ''' time_cost = end_time - begin_time return "%d day %d hour %d minute %.2f second"%(time_cost // 86400, time_cost % 86400 // 3600, time_cost % 3600 // 60, time_cost % 60) def k_neighbors(sim_vector, k): ''' input the similarity matrix, the index of the user, and the k return the k nearest neighbor of the user :param sim_vector: the similarity matrix :param k: the k :return: the k nearest neighbor of the user and the similarity between the user and the neighbor ''' # get the similarity matrix sim_vector = sim_vector # get the k k = k # get the k nearest neighbor of the user neighbor = np.argsort(sim_vector)[-k-1:-1] neighbor_sim = np.sort(sim_vector)[-k-1:-1] # do not include the user itself return neighbor, neighbor_sim def get_score_matrix(train_rating,user_map,movie_map): ''' get the score matrix @param: train_rating, the train rating @param: user_map, the user map @param: movie_map, the movie map @return: score_matrix, the movie popularity, the movie count ''' print("<<<< begin to conduct the score matrix") score_matrix = np.zeros((len(user_map.keys()),len(movie_map.keys()))) movie_popular = np.zeros(len(movie_map.keys())) movie_count = len(movie_map.keys()) tqdm_process = tqdm(total=train_rating.shape[0]) for row in train_rating.itertuples(index=True,name="Pandas"): user = user_map[getattr(row,'userId')] movie = movie_map[getattr(row,'movieId')] rate = getattr(row,'rating') score_matrix[user][movie] = rate movie_popular[movie] += 1 tqdm_process.update(1) tqdm_process.close() print(">>>> end to conduct the score matrix") print("@ score matrix shape:",score_matrix.shape) print('movie_popular shape:',movie_popular.shape) print('movie_count:',movie_count) return score_matrix, movie_popular, movie_count def calculate_movie_similarity(train_set,pre_sim_calcul = False): ''' calculate the tfidf of the movies :param train_set: the train set, a tuple of (trainset,user_map,movie_map,movie_type_features) :return: score_matrix, movie_popular, movie_sim, movie_count ''' # get the train set train_rating, user_map, movie_map, movie_type_features = train_set score_matrix, movie_popular, movie_count = get_score_matrix(train_rating,user_map,movie_map) movie_sim= np.zeros((movie_count, movie_count)) if pre_sim_calcul: print("<<<< begin to conduct the movie similarity matrix") begin_time = time.time() # record the start time for i in tqdm(range(movie_count)): movie_sim[i][i] = 1 for j in range(i+1,movie_count): movie_sim[i][j] = cosine_similarity(movie_type_features[i],movie_type_features[j]) movie_sim[j][i] = movie_sim[i][j] end_time = time.time() # record the end time print(">>>> end to conduct the movie similarity matrix") print("@ time cost: %s"%get_time_cost(begin_time,end_time)) else: print("post calculate the similarity during prediction!") return score_matrix, movie_popular, movie_sim, movie_count,user_map,movie_map,movie_type_features def cosine_similarity(list1,list2): ''' calculate the cosine_similarity of list1 and list2 :param list1: the first list :param list2: the second list :return: the cosine_similarity ''' # get the number of common items assert(len(list1) == len(list2)) n = len(list1) assert(n > 0) # calculate the sum of the two lists sum1 = sum(list1*list2) # calculate the square of the two lists den = np.sqrt(sum(list1**2)) * np.sqrt(sum(list2**2)) # calculate the cosine similarity if den == 0: return 0 else: return sum1/den def calculate_user_sim_matrix(train_set,pre_sim_calcul = True): ''' calculate the similarity matrix between users :param train_set: the train set, a tuple of (trainset,user_map,movie_map,movie_type_features) """ :return: the score_matrix, the similarity matrix, movie_popular, movie_count ''' # conduct the score matrix print("<<<<<< begin to caculate the similarity matrix, the movie popularity and the movie count") train_rating, user_map, movie_map, movie_type_features = train_set score_matrix, movie_popular, movie_count = get_score_matrix(train_rating,user_map,movie_map) # get the similarity matrix between users user_sim_matrix = np.zeros((score_matrix.shape[0],score_matrix.shape[0])) if pre_sim_calcul: user_sim_matrix = get_user_sim_matrix(score_matrix) else: print("post calculate the similarity during prediction!") print(">>>> end to caculate the similarity matrix.") print('user_sim_matrix shape:',user_sim_matrix.shape) return score_matrix,user_sim_matrix, movie_popular, movie_count,user_map,movie_map,movie_type_features def get_user_sim_matrix(input_matrix): ''' get the similarity matrix between users with pearson similarity :param input_matrix: the input matrix with shape (n_users, n_items) :return: the similarity matrix ''' # get the shape of the input matrix begin_time = time.time() # record the start time print("<<<< begin to get the similarity matrix") input_matrix = np.array(input_matrix) # convert to numpy array print('input score matrix shape:',input_matrix.shape) # get the number of users n_users = input_matrix.shape[0] # calculate the similarity matrix between users with person similarity user_sim_matrix = np.zeros((n_users, n_users)) print('user_sim_matrix shape:',user_sim_matrix.shape) for i in tqdm(range(n_users)): user_sim_matrix[i][i] = 1 for j in range(i+1,n_users): user_sim_matrix[i][j] = pearson_sim(input_matrix[i],input_matrix[j]) user_sim_matrix[j][i] = user_sim_matrix[i][j] print(">>>> end to get the similarity matrix") end_time = time.time() # record the end time print('@ time cost: '+get_time_cost(begin_time, end_time)) return user_sim_matrix def pearson_sim(list1,list2): ''' calculate the pearson similarity between two lists :param list1: the first list :param list2: the second list :return: the pearson similarity ''' # get the number of common items assert len(list1) == len(list2) n = len(list1) assert n > 0 # calculate the sum of the two lists avg1 = sum(list1)/n avg2 = sum(list2)/n norm1 = list1 - avg1 norm2 = list2 - avg2 # calculate the sum of the two lists sum1 = sum(norm1*norm2) # calculate the square of the two lists den = np.sqrt(sum(norm1**2)) * np.sqrt(sum(norm2**2)) # calculate the pearson similarity if den == 0: return 0.0 else: return sum1/den def SSE_error(prediction,real_rating): ''' calculate the SSE error :param prediction: the prediction of the user :param real_rating: the real rating of the user :return: the SSE error ''' # get the prediction and the real rating prediction = np.array(prediction) real_rating = np.array(real_rating) # calculate the SSE error SSE = sum((prediction - real_rating)**2) return SSE if __name__ == '__main__': # test the similarity matrix from dataset import Dataset from config import cfg dataset = Dataset(cfg) train_set = dataset.get_trainset() a = pearson_sim(np.array([1,2,3,4,5]),np.array([1,2,3,4,5])) b = pearson_sim(np.array([1,2,3,4,5]),np.array([5,4,3,2,1])) print(a,b) score_matrix,user_sim_matrix, movie_popular, movie_count,user_map,movie_map = calculate_user_sim_matrix(train_set,pre_sim_calcul = False) pickle.dump(user_sim_matrix,open('user_map.pkl','wb')) pickle.dump(movie_map,open('movie_map.pkl','wb')) # print(user_sim_matrix, movie_popular, movie_count) # pickle.dump(train_set, open('./checkpoints\CF-2022_04_11__11_32_40/trainset.pkl', 'wb')) # pickle.dump(score_matrix,open('checkpoints\CF-2022_04_11__11_32_40\score_matrix.pkl','wb')) # test the model_manager # model_manager = ModelManager(cfg) # model_manager.clean_workspace() # model_manager.save_model(user_sim_matrix, 'user_sim_matrix') # model_manager.save_model(movie_popular, 'movie_popular') # model_manager.save_model(movie_count, 'movie_count') # d = model_manager.load_model('score_matrix') # a = model_manager.load_model('user_sim_mat') # b = model_manager.load_model('movie_popular') # c = model_manager.load_model('movie_count') # print(a[0:3],b,c,d[0:3]) # test the time cost # begin_time = time.time() # record the start time # time.sleep(3) # end_time = time.time() # record the end time # # print the time cost # print('@ time cost:',get_time_cost(begin_time,end_time))
Jack-Lio/RecommenderSystem
utls.py
utls.py
py
11,868
python
en
code
0
github-code
6
[ { "api_name": "time.strftime", "line_number": 20, "usage_type": "call" }, { "api_name": "time.localtime", "line_number": 20, "usage_type": "call" }, { "api_name": "os.path.exists", "line_number": 33, "usage_type": "call" }, { "api_name": "os.path", "line_numbe...
5762269283
import os scriptPath = os.path.dirname(os.path.abspath(__file__)) projRootPath = os.path.abspath( os.path.join(scriptPath , os.path.join('..', '..'))) import numpy as np # matplotlib for displaying the output import matplotlib.pyplot as plt import seaborn as sns sns.set() from scipy import signal from scipy.io import wavfile # and IPython.display for audio output import IPython.display # Librosa for audio import librosa # And the display module for visualization import librosa.display #### Path to data # Get data files two_up = os.path.abspath(os.path.join('.' ,"../..")) print("Project root path is: ", two_up) dataDirName = "data" rawDataDirName = "converted_wav" className = "violin" # className = "guitar" data_path = os.path.join(projRootPath, dataDirName, rawDataDirName, className) print(data_path) root_paths = [] # Get all files from data_path # r=root, d=directories, f = files (_, d, allFiles) = next(os.walk(data_path)) wavFiles = [f for f in allFiles if f.endswith(".wav")] file = wavFiles[1] sample_rate, samples = wavfile.read(os.path.join(data_path, file)) frequencies, times, spectrogram = signal.spectrogram(samples, sample_rate) # all spectrogram plt.pcolormesh(times, frequencies, spectrogram) plt.imshow(spectrogram) plt.ylabel('Frequency') plt.gca().invert_yaxis() plt.xlabel('Time') plt.show()
mariusdgm/AudioMining
src/visualization/spectrogram.py
spectrogram.py
py
1,360
python
en
code
0
github-code
6
[ { "api_name": "os.path.dirname", "line_number": 3, "usage_type": "call" }, { "api_name": "os.path", "line_number": 3, "usage_type": "attribute" }, { "api_name": "os.path.abspath", "line_number": 3, "usage_type": "call" }, { "api_name": "os.path.abspath", "line...
4714847905
import requests import ast import sys import getopt class XkcdClient(): def api_call(self, url): self.urls = url r = requests.get(url = self.urls) byte_str = r.content dict_str = byte_str.decode("UTF-8") my_data = ast.literal_eval(dict_str) return my_data def get_image(self,img_url): self.img_name = img_url.split('/')[-1] img_data = requests.get(img_url).content with open(self.img_name, 'wb') as handler: handler.write(img_data) # client = XkcdClient() # response = client.api_call('https://xkcd.com/info.0.json') # print(response) if __name__ == '__main__': cmd_line_args = sys.argv[1:] unix_args = 'hn:os' gnu_args = ['help','comicnum=','print','save-image'] oplist, args = getopt.getopt(cmd_line_args,unix_args,gnu_args) print(args) #Extra arguments that are not part of the uni_args or gnu_args print(oplist) #oplist is a list of tuples comic_num = '' client = XkcdClient() url_latest = 'https://xkcd.com/info.0.json' for opt, arg in oplist: print(opt) print(arg) if opt == '-h' or opt == '--help': print('help message') print('Use -n or --comicnum to specify the comic number you want use 0 as argument for latest comic') print('Use -o or --print to get info in text/json format') print('Use -s or --save-image to download image in this directory') elif opt == '-n' or opt == '--comicnum': comic_num = arg if comic_num is '0': #default get the latest comic print('Get the comic number ' + str(arg)) response = client.api_call(url_latest) print(response) else: url_specific = 'http://xkcd.com/'+arg+'/info.0.json' response = client.api_call(url_specific) elif opt == '-o' or opt == '--print': if comic_num: if comic_num is '0': print('print output in format json/text') print(response) else: print('The output in json/text is') print(response) else: print('Set the -n parameter first') elif opt == '-s' or opt == '--save-image': if comic_num: img_url = response['img'] client.get_image(img_url) else: print('Set the -n parameter first')
nishantasarma/XkcdClientApp
client.py
client.py
py
2,527
python
en
code
0
github-code
6
[ { "api_name": "requests.get", "line_number": 13, "usage_type": "call" }, { "api_name": "ast.literal_eval", "line_number": 16, "usage_type": "call" }, { "api_name": "requests.get", "line_number": 21, "usage_type": "call" }, { "api_name": "sys.argv", "line_numbe...
7874667169
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Jun 22 16:48:54 2019 @author: xiaohaoren """ import json import pickle import numpy as np negative_word = ['悶熱','吵雜','髒','髒亂','加強','改進','缺點'] def Load_All_Info(json_path,pickle_path): with open(json_path,'r') as fp: json_data = json.load(fp) with open(pickle_path, 'rb') as fp: pickle_data = pickle.load(fp) keys = list(json_data.keys()) return json_data,pickle_data,keys def FilteringAndRanking(querys,places,corpus,review_list=None): """ query = ['冷氣','衛生',...] place = ['春山茶水舖','小川拉麵',...] corpus = {'春山茶水舖':{'不錯':(正向次數,評論編號),'五花肉':(正向分數,評論編號),...}} """ scoreboard = {} for i,place in enumerate(places): #N = corpus[place]['__termNum__'] N = corpus[place]['__reviewNum__'] scoreboard[place]=0 if place not in corpus: continue for term in querys: term_score = 0 term_sign = -1 if term in negative_word else 1 if term not in corpus[place]: continue else: keyword_data = corpus[place][term] for rid,p in keyword_data.items(): term_score += (term_sign * p) if review_list is not None: rid = int(rid) review_content = review_list[rid] print('"%s"由於「%s」中的"%s"而加%d分' % (place,review_content,term,term_sign*p)) scoreboard[place] += term_score scoreboard[place] = scoreboard[place]/(N*len(querys)) * 100 return scoreboard if __name__ == "__main__": corpus_path = '../data/place_dict.json' reviewContent_path = '../data/review_list.json' querys = ['乾淨','衛生'] corpus,review_list,places = Load_All_Info(json_path=corpus_path,pickle_path=reviewContent_path) scoreboard = FilteringAndRanking(querys=querys,places=places,corpus=corpus,review_list=review_list)
e841018/DinnerSelector
utils/Filtering.py
Filtering.py
py
2,301
python
en
code
0
github-code
6
[ { "api_name": "json.load", "line_number": 18, "usage_type": "call" }, { "api_name": "pickle.load", "line_number": 20, "usage_type": "call" } ]
35839328750
import argparse from distutils.util import strtobool import pathlib import siml import convert_raw_data def main(): parser = argparse.ArgumentParser() parser.add_argument( 'settings_yaml', type=pathlib.Path, help='YAML file name of settings.') parser.add_argument( 'raw_data_directory', type=pathlib.Path, help='Raw data directory') parser.add_argument( '-p', '--preprocessors-pkl', type=pathlib.Path, default=None, help='Preprocessors.pkl file') parser.add_argument( '-o', '--out-dir', type=pathlib.Path, default=None, help='Output directory name') parser.add_argument( '-f', '--force-renew', type=strtobool, default=0, help='If True, overwrite existing data [False]') parser.add_argument( '-l', '--light', type=strtobool, default=0, help='If True, compute minimum required data only [False]') parser.add_argument( '-n', '--read-npy', type=strtobool, default=1, help='If True, read .npy files instead of original files ' 'if exists [True]') parser.add_argument( '-r', '--recursive', type=strtobool, default=1, help='If True, process directory recursively [True]') parser.add_argument( '-e', '--elemental', type=strtobool, default=0, help='If True, create also elemental features [False]') parser.add_argument( '-a', '--convert-answer', type=strtobool, default=1, help='If True, convert answer [True]') parser.add_argument( '-s', '--skip-interim', type=strtobool, default=0, help='If True, skip conversion of interim data [False]') args = parser.parse_args() main_setting = siml.setting.MainSetting.read_settings_yaml( args.settings_yaml) if not args.convert_answer: main_setting.conversion.required_file_names = ['*.msh', '*.cnt'] main_setting.data.raw = args.raw_data_directory if args.out_dir is None: args.out_dir = args.raw_data_directory main_setting.data.interim = [siml.prepost.determine_output_directory( main_setting.data.raw, main_setting.data.raw.parent / 'interim', 'raw')] main_setting.data.preprocessed = [ siml.prepost.determine_output_directory( main_setting.data.raw, main_setting.data.raw.parent / 'preprocessed', 'raw')] else: main_setting.data.interim = [args.out_dir / 'interim'] main_setting.data.preprocessed = [args.out_dir / 'preprocessed'] if not args.skip_interim: conversion_function = convert_raw_data.HeatConversionFuncionCreator( create_elemental=args.elemental, convert_answer=args.convert_answer, light=args.light) raw_converter = siml.prepost.RawConverter( main_setting, conversion_function=conversion_function, filter_function=convert_raw_data.filter_function_heat, force_renew=args.force_renew, recursive=args.recursive, to_first_order=True, write_ucd=False, read_npy=args.read_npy, read_res=args.convert_answer) raw_converter.convert() preprocessor = siml.prepost.Preprocessor( main_setting, force_renew=args.force_renew, allow_missing=True) preprocessor.convert_interim_data(preprocessor_pkl=args.preprocessors_pkl) return if __name__ == '__main__': main()
yellowshippo/isogcn-iclr2021
src/preprocess_raw_data_with_preprocessors.py
preprocess_raw_data_with_preprocessors.py
py
3,638
python
en
code
42
github-code
6
[ { "api_name": "argparse.ArgumentParser", "line_number": 11, "usage_type": "call" }, { "api_name": "pathlib.Path", "line_number": 14, "usage_type": "attribute" }, { "api_name": "pathlib.Path", "line_number": 18, "usage_type": "attribute" }, { "api_name": "pathlib.P...
71175312187
import re import time import textwrap from copy import copy import torch.nn.functional as F from training_utils import * BASH_FORMATTING = { 'PURPLE': '\033[95m', 'CYAN': '\033[96m', 'DARKCYAN': '\033[36m', 'BLUE': '\033[94m', 'GREEN': '\033[92m', 'YELLOW': '\033[93m', 'RED':'\033[91m', 'BOLD': '\033[1m', 'UNDERLINE': '\033[4m', 'END': '\033[0m' } def bash_format_text(text, *args): formatting = '' for arg in args: formatting += BASH_FORMATTING[arg] return formatting + text + BASH_FORMATTING['END'] # maybe add do_sample ? # randomly select one of two values that evaluate to true or false lols hehehe def transfer_learning_bot(model, tokenizer, max_length, top_k, top_p): ''' for chatbot trained using transfer learning ''' input_sentence = input('User >> ') input_sentence = input_sentence.lower() context = copy(input_sentence) input_sentence = tokenizer.encode(input_sentence, truncation = True, max_length = 128, return_tensors = 'pt') continue_convo = True while continue_convo: print(bash_format_text('Typing...', 'YELLOW', 'BOLD'), end='\r' ) uni_temp = round(torch.rand(1).clamp(0.1).item(), 2) repeat_penalty = round((torch.rand(1) * 5).clamp(1).item(), 2) ngram = int(np.random.choice([2,3,4], 1)[0]) bot_reply = model.generate(input_sentence, max_length = max_length, top_k = top_k, top_p = top_p, temperature = uni_temp, repetition_penalty = repeat_penalty, skip_special_tokens = True, no_repeat_ngram_size=ngram, pad_token_id = tokenizer.eos_token_id) # length_penalty=length_penalty) bot_reply = tokenizer.decode(bot_reply.squeeze()).replace('<|endoftext|>', '') bot_reply = textwrap.fill(bot_reply, width=75) print(bash_format_text('Aubrey: {}'.format(bot_reply), 'YELLOW', 'BOLD')) response = input('User >> ') if (response == 'q' or response == 'quit' or response == 'exit'): continue_convo = False input_sentence = tokenizer.encode(response.lower(), truncation= True, max_length = 128, return_tensors = 'pt')
amauriciorr/AubreyBot
chat_utils.py
chat_utils.py
py
2,389
python
en
code
2
github-code
6
[ { "api_name": "copy.copy", "line_number": 35, "usage_type": "call" }, { "api_name": "torch.nn.functional.rand", "line_number": 40, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 40, "usage_type": "name" }, { "api_name": "torch.nn.funct...
33359786284
from unittest import TestCase import unittest import requests # import sys # # sys.path.insert(0, '../../src') class TestLoadTimeSeries(TestCase): def test_load_data_success(self): f = open("tests/routes/time_series_covid19_recovered_global.csv", "rb") file = f.read() url = 'https://covid-monitor-61.herokuapp.com/time_series/data?type=recovered' r = requests.post(url, data=file, headers={"Content-Type": "text/csv"}) f.close() self.assertEqual(r.status_code, 200) def test_query_data(self): url = 'https://covid-monitor-61.herokuapp.com/time_series/cases' body = {"return_type": "json", "start_date": "01/26/20", "end_date": "01/28/20", "types": ["Recovered"], "locations": [ {"Country/Region": "Albania"}, {"Country/Region": "Canada", "Province/State": "Ontario"}, {"Country/Region": "Australia"} ] } r = requests.post(url, json=body, headers={"Content-Type": "application/json"}) self.assertEqual(r.status_code, 200) if __name__ == '__main__': unittest.main()
shin19991207/CSC301-A2
tests/routes/test_time_series.py
test_time_series.py
py
1,255
python
en
code
0
github-code
6
[ { "api_name": "unittest.TestCase", "line_number": 10, "usage_type": "name" }, { "api_name": "requests.post", "line_number": 15, "usage_type": "call" }, { "api_name": "requests.post", "line_number": 32, "usage_type": "call" }, { "api_name": "unittest.main", "li...
25254340151
import numpy as np import sys from vispy import app, visuals, scene # build visuals Plot3D = scene.visuals.create_visual_node(visuals.line.line.LineVisual) # build canvas canvas = scene.SceneCanvas(keys='interactive', title='plot3d', show=True) # Add a ViewBox to let the user zoom/rotate view = canvas.central_widget.add_view() view.camera = 'turntable' view.camera.fov = 45 view.camera.distance = 6 # prepare data x, y, z, segments = [], [], [], [] for start, i in enumerate(np.linspace(-5, 5, 1000)): N = 6000 x.append(np.sin(np.linspace(-5-i, 5+1, N)*np.pi)) y.append(np.cos(np.linspace(-5+i, 5-i, N)*np.pi)) z.append(np.linspace(-5-i, 5-i, N)) start_idx = 1000 * start idxs = np.arange(start_idx, start_idx+N-1) idxs = np.stack([idxs, idxs+1], axis=-1) segments.append(idxs) x, y, z = np.concatenate(x), np.concatenate(y), np.concatenate(z) segments = np.concatenate(segments, axis=0) # plot pos = np.c_[x, y, z] Plot3D(pos, width=10.0, color=(1.0, 0.0, 0.0, 1.0), method='gl', connect=segments, parent=view.scene) if __name__ == '__main__': if sys.flags.interactive != 1: app.run()
ptmorris03/Clip3D
lines.py
lines.py
py
1,152
python
en
code
0
github-code
6
[ { "api_name": "vispy.scene.visuals.create_visual_node", "line_number": 8, "usage_type": "call" }, { "api_name": "vispy.scene.visuals", "line_number": 8, "usage_type": "attribute" }, { "api_name": "vispy.scene", "line_number": 8, "usage_type": "name" }, { "api_name...
11932438017
from env import data from selenium import webdriver from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui import Select from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from time import sleep #Program Isi Data def program(data_set): #masuk website browser = webdriver.Chrome() actions = ActionChains(browser) browser.get(data['linkActive']) print('==== Welcome To Bangef, Automated Post-Test ===='); try: for d in data_set: # mengecek apakah elemen input sudah ada dan mengisikannya # Nama WebDriverWait(browser, 10).until( EC.presence_of_element_located((By.ID, data['selectorById'][0])) ).send_keys(d['namaLengkap']) # Email WebDriverWait(browser, 10).until( EC.presence_of_element_located((By.ID, data['selectorById'][1])) ).send_keys(d['email']) # Nomer Telepon WebDriverWait(browser, 10).until( EC.presence_of_element_located((By.ID, data['selectorById'][2])) ).send_keys('0'+d['noTelpon']) # Jenis Kelamin elementJK = browser.find_element(By.ID, data['selectorById'][3]) Select(elementJK).select_by_value(d['jk']) # Usia WebDriverWait(browser, 10).until( EC.presence_of_element_located((By.ID, data['selectorById'][4])) ).send_keys(d['usia']) # Pekerjaan elementPekerjaan = browser.find_element(By.ID, data['selectorById'][5]) Select(elementPekerjaan).select_by_value(d['pekerjaan']) # Komunitas WebDriverWait(browser, 10).until( EC.presence_of_element_located((By.ID, data['selectorById'][6])) ).send_keys(d['organisasi']) # Pendidikan elementPendidikan = browser.find_element(By.ID, data['selectorById'][7]) Select(elementPendidikan).select_by_value(d['pendidikan']) # Provinsi WebDriverWait(browser, 10).until( EC.presence_of_element_located((By.ID, data['selectorById'][8])) ).send_keys(d['provinsi'], Keys.RETURN) # Kota Asal elementCities = browser.find_element(By.ID, data['selectorById'][9]) elementCities.send_keys(d['kotaAsal'], Keys.RETURN) # Captcha browser.execute_script("arguments[0].scrollIntoView();", elementCities) captcha = input('Masukan validasi captcha (sample : 9*9): \n'); arr = list(captcha) if arr[1] == '+' : result = int(arr[0]) + int(arr[2]) else : result = int(arr[0]) * int(arr[2]) browser.find_element(By.ID, data['selectorById'][10]).send_keys(result) # Select Radio Button q1 = browser.find_element(By.ID, "1070-"+d['qSatu']) browser.execute_script("arguments[0].scrollIntoView();", q1) sleep(.5) q1.click() q2 = browser.find_element(By.ID, "1071-"+d['qDua']) browser.execute_script("arguments[0].scrollIntoView();", q2) q2.click() q3 = browser.find_element(By.ID, "1072-"+d['qTiga']) browser.execute_script("arguments[0].scrollIntoView();", q3) q3.click() q4 = browser.find_element(By.ID, "1073-"+d['qEmpat']) actions.move_to_element(q4).click().perform() browser.execute_script("arguments[0].scrollIntoView();", q4) sleep(.5) q4.click() q5 = browser.find_element(By.ID, "1076-"+d['qLima']) browser.execute_script("arguments[0].scrollIntoView();", q5) q5.click() # submit footer = browser.find_element(By.CSS_SELECTOR, '#__next > div > div.footer.mt-3') browser.execute_script("arguments[0].scrollIntoView();", footer) sleep(1) browser.find_element(By.XPATH, data['selectorByXpath']).click() sleep(3) # kembali ke page sebelumnya browser.get(data['linkActive']) WebDriverWait(browser, 10).until( EC.presence_of_element_located((By.ID, data['selectorById'][0])) ) print('Data dengan atas nama '+d['namaLengkap']+' berhasil ✔️') print('Total Data : '+ str(d['id']) +' Selesai Post Test') except Exception as err: print('Data Selesai Terakhir : "id": "'+str(d['id'])+'".') print(err) browser.quit()
bangef/pz
python/post-test/module/program.py
program.py
py
4,797
python
en
code
0
github-code
6
[ { "api_name": "selenium.webdriver.Chrome", "line_number": 15, "usage_type": "call" }, { "api_name": "selenium.webdriver", "line_number": 15, "usage_type": "name" }, { "api_name": "selenium.webdriver.common.action_chains.ActionChains", "line_number": 16, "usage_type": "cal...
12153147067
import matplotlib.pyplot as plt import numpy as np with open('scores.txt', 'r') as f: scores = f.read().splitlines() scores = list(map(int, scores)) mean = [] max_list = [] for i,j in enumerate(scores): if i % 1000 == 0: mean.append(np.average(scores[i:i+1000])) max_list.append(max(scores[i:i+1000])) episode = range(len(mean)) plt.ylabel('episode scores') plt.xlabel('training episodes') plt.yticks(np.arange(min(mean), max(mean)+1, 5000)) plt.title('mean scores') plt.plot(episode, mean) plt.show() plt.ylabel('episode scores') plt.xlabel('training episodes') plt.yticks(np.arange(min(max_list), max(max_list)+1, 10000)) plt.title('max scores') plt.plot(episode, max_list) plt.show()
Mike-Teng/Deep_Learning
lab/lab2/plot.py
plot.py
py
798
python
en
code
0
github-code
6
[ { "api_name": "numpy.average", "line_number": 12, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.ylabel", "line_number": 16, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name" }, { "api_name": "matplotlib.p...
333459228
import argparse import glob import os import h5py import hdbscan import numpy as np from scipy.ndimage import binary_erosion from skimage.filters import gaussian from skimage.segmentation import watershed from sklearn.cluster import MeanShift def expand_labels_watershed(seg, raw, erosion_iters=4): bg_mask = seg == 0 # don't need to do anything if we only have background if bg_mask.size == int(bg_mask.sum()): return seg hmap = gaussian(raw, sigma=1.) bg_mask = binary_erosion(bg_mask, iterations=erosion_iters) seg_new = seg.copy() bg_id = int(seg.max()) + 1 seg_new[bg_mask] = bg_id seg_new = watershed(hmap, seg_new) seg_new[seg_new == bg_id] = 0 return seg_new def cluster(emb, clustering_alg, semantic_mask=None): output_shape = emb.shape[1:] # reshape (E, D, H, W) -> (E, D * H * W) and transpose -> (D * H * W, E) flattened_embeddings = emb.reshape(emb.shape[0], -1).transpose() result = np.zeros(flattened_embeddings.shape[0]) if semantic_mask is not None: flattened_mask = semantic_mask.reshape(-1) assert flattened_mask.shape[0] == flattened_embeddings.shape[0] else: flattened_mask = np.ones(flattened_embeddings.shape[0]) if flattened_mask.sum() == 0: # return zeros for empty masks return result.reshape(output_shape) # cluster only within the foreground mask clusters = clustering_alg.fit_predict(flattened_embeddings[flattened_mask == 1]) # always increase the labels by 1 cause clustering results start from 0 and we may loose one object result[flattened_mask == 1] = clusters + 1 return result.reshape(output_shape) def cluster_hdbscan(emb, min_size, eps, min_samples=None, semantic_mask=None): clustering = hdbscan.HDBSCAN(min_cluster_size=min_size, cluster_selection_epsilon=eps, min_samples=min_samples) return cluster(emb, clustering, semantic_mask) def cluster_ms(emb, bandwidth, semantic_mask=None): clustering = MeanShift(bandwidth=bandwidth, bin_seeding=True) return cluster(emb, clustering, semantic_mask) def run_clustering(emb, clustering, delta_var, min_size, expand_labels, remove_largest): assert clustering in ['ms', 'hdbscan'] if clustering == 'hdbscan': clusters = cluster_hdbscan(emb, min_size, delta_var) else: clusters = cluster_ms(emb, delta_var) # watershed the empty (i.e. noise) region if expand_labels: clusters = expand_labels_watershed(clusters, raw) if remove_largest: ids, counts = np.unique(clusters, return_counts=True) clusters[ids[np.argmax(counts)] == clusters] = 0 return clusters if __name__ == '__main__': parser = argparse.ArgumentParser(description='Segment embryos') parser.add_argument('--emb_dir', type=str, help='Path to embedding predictions directory', required=True) parser.add_argument('--clustering', type=str, help='Clustering algorithm: ms or hdbscan', required=True) parser.add_argument('--seg_ds', type=str, help='Output seg dataset name', required=True) parser.add_argument('--delta_var', type=float, help='delta_var param', default=0.5) parser.add_argument('--min_size', type=int, help='HDBSCAN min_size param', default=50) parser.add_argument('--remove_largest', help='Remove largest instance (BG)', action='store_true') parser.add_argument('--expand_labels', help='Expand labels with watershed', action='store_true') parser.add_argument('--min_instance_size', type=int, help='Min instance size filtering', required=False, default=None) args = parser.parse_args() assert os.path.isdir(args.emb_dir) for file_path in glob.glob(os.path.join(args.emb_dir, '*predictions.h5')): _, filename = os.path.split(file_path) print(f'Processing {filename}') with h5py.File(file_path, 'r+') as f: raw_sequence = f['raw_sequence'][:] embedding_sequence = f['embedding_sequence1'][:] seg_sequence = [] i = 0 for raw, emb in zip(raw_sequence, embedding_sequence): i += 1 print(f'Processing patch {i}') seg = run_clustering(emb, args.clustering, args.delta_var, args.min_size, args.expand_labels, args.remove_largest) seg_sequence.append(seg) if args.seg_ds in f: del f[args.seg_ds] segments = np.stack(seg_sequence, axis=0) f.create_dataset(args.seg_ds, data=segments, compression='gzip') print('Done')
kreshuklab/takafumi_embryos_segmentation
utils/cluster.py
cluster.py
py
4,632
python
en
code
0
github-code
6
[ { "api_name": "skimage.filters.gaussian", "line_number": 20, "usage_type": "call" }, { "api_name": "scipy.ndimage.binary_erosion", "line_number": 22, "usage_type": "call" }, { "api_name": "skimage.segmentation.watershed", "line_number": 27, "usage_type": "call" }, { ...
32311173285
#import networkx as nx #import matplotlib.pyplot as plt import json import pprint from TwitterModule import * import time from datetime import datetime #Set up api and global variables twitter_api = oauth_login()#twitter api for grabbing data #dates = [330,331,401,402,403] dates = [401,402,403,404,405,406,407] for day in dates: print(day) names = ['@itsnotdrew','@davidhogg111','@IngrahamAngle','@sleepnumber','@ATT','@Allstate','@esurance','@Bayer','@RocketMortgage','@LibertyMutual','@Arbys','@TripAdvisor','@Nestle','@hulu','@Wayfair','@FoxNews','#BoycottIngramAdverts','#boycottLauraIngraham','#FireIngraham','#FireLauraIngraham'] errorLogName = 'errorLog' + str(day) + '_4' + '.txt' errorLog = open(errorLogName,'w') for q in names: try: dateStr = str(day) dateDay = dateStr[1:] dateDayPlusOne = str(int(dateDay)+1) dateMonth = dateStr[0] if (dateStr == '331'): #dirty code to fix a logic bug when switching months dateDayPlusOne = '01' dateMonth = '4' until = '2018-0' + dateMonth + '-' + dateDayPlusOne tweetsDicitonary = {} name = q[1:] nameFile = name + dateStr +'_4'+ '.json' file = open(nameFile,'w') ''' First search call to twitter_api Parameters: q: is the search term result_type: is whether we want recent, popular or mixed tweets. Currently set to recent max_results: is how many results we wan to take in a single call. Is currently 10 for testing until: specifies the date that all tweets returned form this call should come before (so all tweets from this call are from 3/28/2018) getMaxID parses the maxID from the appropriate string in the search return metadata maxid will then be used to call the next batch of tweets. More info on maxid is Available on the search api documentation ''' print(q + 'at ' + str(datetime.now())) #prints twitter user being processed response = make_twitter_request(twitter_api.search.tweets,q=q,result_type='recent',count=5, until=until) try: next_results = response['search_metadata']['next_results'] getMaxID = dict([ kv.split('=') for kv in next_results[1:].split("&") ]) maxid = getMaxID['max_id'] except: next_results = "" maxid = 0 line = "\nretrieval error at " + str(datetime.now()) + " while processing beginning call of " + q errorLog.write(line) ''' Parameters in response: most are the same -result_type is mixed (testing) -max_results is 100 (testing, but really it should be kept like this) -max_id field is at the end of the call, allowing each call of the function to retrieve older and older tweets time.sleep(5): Can only call the search api 180 times in 15 minutes, so ~5 seconds. Right now set to one because testing, but should probably be set to 10self. Or, we can edit the make-twitter_request function to handle this error for us ''' for i in range(1,101): #top possible tweets 10,000 #print(i) #testing code try: response = make_twitter_request(twitter_api.search.tweets,q=q,result_type='recent',count=100,until=until,max_id=maxid) next_results = response['search_metadata']['next_results'] if (next_results == None): break getMaxID = dict([ kv.split('=') for kv in next_results[1:].split("&") ])#to get the nextID maxid = getMaxID['max_id'] # print(maxid) time.sleep(5) except: line = "\nretrieval error at " + str(datetime.now()) + " while processing " + q + ' at loop number ' + str(i) errorLog.write(line) break for tweet in response['statuses']:#add each tweet to a dictionary try: tweetsDicitonary[tweet['id']] = tweet except: line = "\ndicitonary error at " + str(datetime.now()) + " while processing " + str(tweet['id']) errorLog.write(line) file.seek(0) file.seek(0) json.dump(tweetsDicitonary,file) file.close() except: line = "\nFatal error at " + str(datetime.now()) + " while processing " + q errorLog.write(line) json.dump(tweetsDicitonary,file) file.close()
drewpj/cis400tweetfrequency
searchTweets.py
searchTweets.py
py
4,925
python
en
code
1
github-code
6
[ { "api_name": "datetime.datetime.now", "line_number": 47, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 47, "usage_type": "name" }, { "api_name": "datetime.datetime.now", "line_number": 57, "usage_type": "call" }, { "api_name": "datetim...
26096479620
from typing import final import pandas as pd import numpy as np import os final_df=pd.read_csv("prepared_final_data.csv") print(final_df) values=final_df["pollution"].values print(values) print(final_df.columns) """# Normalized the data""" from sklearn.preprocessing import MinMaxScaler # values = final_df.values print(values) scaler = MinMaxScaler(feature_range=(0, 1)) scaled_dataset = scaler.fit_transform(values.reshape(-1,1)) scaled_dataset # Creating a window for previous data def to_supervised(window_size,train): X = [] Y = [] for i in range(window_size, len(train)): X.append(train[i-window_size:i,:]) Y.append(train[i,0:1]) return np.array(X), np.array(Y) feature,label = to_supervised(window_size=5, train=scaled_dataset) n_train = 24*365 X_train, X_test = feature[n_train:,] , feature[:n_train,] print('X_train' ,X_train.shape) print('X_test' ,X_test.shape) Y_train, Y_test = label[n_train:,] , label[:n_train,] print('Y_train' ,Y_train.shape) print('Y_test' ,Y_test.shape) import keras from keras.models import Sequential from keras.layers import Dense, Dropout,LSTM model = Sequential() model.add(LSTM(units = 50, return_sequences = True, input_shape=(X_train.shape[1], X_train.shape[2]))) model.add(Dropout(0.2)) model.add(LSTM(units = 50, return_sequences = True)) model.add(Dropout(0.2)) model.add(LSTM(units = 50)) model.add(Dropout(0.2)) model.add(Dense(units = 1)) model.compile(optimizer = 'adam', loss = 'mean_squared_error') from keras.callbacks import EarlyStopping es_callback = EarlyStopping(monitor='val_loss', patience=3,min_delta=0.01) path = 'air_pollution_forecasting_model' isdir = os.path.isdir(path) print(isdir) if isdir: reconstructed_model = keras.models.load_model("air_pollution_forecasting_model") model = reconstructed_model else: model.fit(X_train, Y_train, validation_split = 0.1, epochs = 10, batch_size = 32, callbacks=[es_callback]) model.save("air_pollution_forecasting_model") breakpoint() Y_pred = np.round(model.predict(X_test),2) from sklearn.metrics import mean_squared_error mse = mean_squared_error(Y_test, Y_pred) rmse = np.sqrt(mse) print(rmse) # Scaling back to the original scale d = scaled_dataset[:8760,:] print('dummy',d.shape) print('Y_pred',Y_pred.shape) Y_predicted = np.concatenate((Y_pred,d[:8760,1:]), axis =1) print('concat y_pred',Y_pred.shape) Y_tested = np.concatenate((Y_test, d[:8760,1:]), axis = 1) print('concat Y_test', Y_test.shape) Y_predicted = scaler.inverse_transform(Y_predicted) Y_tested = scaler.inverse_transform(Y_tested) Y_predicted = Y_predicted[:,0:1] Y_tested = Y_tested[:,0:1] print('Y_tested', Y_tested.shape) print('Y_predicted', Y_predicted.shape) import matplotlib.pyplot as plt plt.plot(Y_predicted[:100,:], color= 'green') plt.plot(Y_tested[:100,:] , color = 'red') plt.title("Air Pollution Prediction (Multivariate)") plt.xlabel("Date") plt.ylabel("Pollution level") plt.savefig("results.png") import pickle pickle.dump(scaler, open('min_max_scaler.pkl','wb'))
manisha841/Air-Quality-Index-Prediction
train.py
train.py
py
3,028
python
en
code
0
github-code
6
[ { "api_name": "pandas.read_csv", "line_number": 6, "usage_type": "call" }, { "api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 18, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": 31, "usage_type": "call" }, { "api_name": "keras.m...
19167044996
""" A collection of neural network code. The first part of the script includes blocks, which are the building blocks of our models. The second part includes the actual Pytorch models. """ import torch import torchvision.transforms as transforms class ConvBlock(torch.nn.Module): """ A ConvBlock represents a convolution. It's not just a convolution however, as some common operations (dropout, activation, batchnorm, 2x2 pooling) can be set and run in the order mentioned. """ def __init__( self, dim, n_out, kernel_size=3, stride=1, padding=1, batchnorm=False, dropout=0, activation=True, ): """ A convolution operation """ super(ConvBlock, self).__init__() n_in = int(dim[0]) self.conv2d = torch.nn.Conv2d( n_in, n_out, kernel_size=kernel_size, stride=stride, padding=padding ) self.batchnorm = torch.nn.BatchNorm2d(n_out) if batchnorm else None self.activation = torch.nn.ReLU(inplace=True) if activation else None self.dropout = torch.nn.Dropout2d(dropout) if dropout else None dim[0] = n_out dim[1:] = 1 + (dim[1:] + padding * 2 - kernel_size) // stride self.n_params = n_out * (n_in * kernel_size * kernel_size + (3 if batchnorm else 1)) print( "Conv2d in %4i out %4i h %4i w %4i k %i s %i params %9i" % (n_in, *dim, kernel_size, stride, self.n_params) ) def forward(self, batch): """ Forward the 4D batch """ out = self.conv2d(batch) if self.activation: out = self.activation(out) if self.batchnorm: out = self.batchnorm(out) if self.dropout: out = self.dropout(out) return out class LinearBlock(torch.nn.Module): """ A LinearBlock represents a fully connected layer. It's not just this, as some common operations (dropout, activation, batchnorm) can be set and run in the order mentioned. """ def __init__(self, dim, n_out, batchnorm=False, dropout=0.0, activation=True): """ A fully connected operation """ super(LinearBlock, self).__init__() n_in = int(dim[0]) self.linear = torch.nn.Linear(n_in, n_out) dim[0] = n_out if type(n_out) in (int, float) else n_out[0] self.batchnorm = torch.nn.BatchNorm1d(dim[0]) if batchnorm else None self.dropout = torch.nn.Dropout(dropout) if dropout > 0.0 else None self.activation = torch.nn.ReLU(inplace=True) if activation else None self.n_params = n_out * (n_in + (3 if batchnorm else 1)) print( "Linear in %4i out %4i params %9i" % (n_in, n_out, self.n_params) ) def forward(self, batch): """ Forward the 2D batch """ out = self.linear(batch) if self.activation: out = self.activation(out) if self.batchnorm: out = self.batchnorm(out) if self.dropout: out = self.dropout(out) return out class PoolBlock(torch.nn.Module): """ A PoolBlock is a pooling operation that happens on a matrix, often between convolutional layers, on each channel individually. By default only two are supported: max and avg. """ def __init__(self, dim, pool="max", size=None, stride=None): """ A pooling operation """ super(PoolBlock, self).__init__() stride = size if stride is None else stride if size: dim[1:] //= stride else: size = [int(x) for x in dim[1:]] dim[1:] = 1 if pool == "max": self.pool = torch.nn.MaxPool2d(size, stride=stride, padding=0) elif pool == "avg": self.pool = torch.nn.AvgPool2d(size, stride=stride, padding=0) self.n_params = 0 def forward(self, batch): """ Forward the 4D batch """ out = self.pool(batch) return out class ViewBlock(torch.nn.Module): """ A ViewBlock restructures the shape of our activation maps so they're represented as 1D instead of 3D. """ def __init__(self, dim, shape=-1): """ A reshape operation """ super(ViewBlock, self).__init__() self.shape = shape if self.shape == -1: dim[0] = dim[0] * dim[1] * dim[2] dim[-2] = 0 dim[-1] = 0 else: dim[:] = shape self.n_params = 0 print("View d %4i h %4i w %4i" % (*dim,)) def forward(self, batch): """ Forward the 4D batch into a 2D batch """ return batch.view(batch.size(0), self.shape) class Tiny(torch.nn.Module): """ A small and quick model """ def __init__(self, in_dim, n_status, n_out): """ Args: in_dim (list): The input size of each example n_status (int): Number of status inputs to add n_out (int): Number of values to predict """ super(Tiny, self).__init__() self.n_status = n_status dim = in_dim.copy() self.feat = torch.nn.Sequential( ConvBlock(dim, 16), PoolBlock(dim, "max", 2), ConvBlock(dim, 32), PoolBlock(dim, "max", 2), ConvBlock(dim, 48), PoolBlock(dim, "max", 2), ConvBlock(dim, 64), PoolBlock(dim, "max", 2), ) self.view = ViewBlock(dim) dim[0] += n_status self.head = torch.nn.Sequential(LinearBlock(dim, n_out, activation=False)) self.n_params = sum([x.n_params for x in self.feat]) + sum([x.n_params for x in self.head]) print("Tiny params %9i" % self.n_params) def forward(self, batch, status): """ Args: batch (4D tensor): A batch of camera input. status (1D tensor): Status inputs indicating things like speed. """ out = self.feat(batch) out = self.view(out) if self.n_status: out = torch.cat((out, status), 1) out = self.head(out) return out class StarTree(torch.nn.Module): """ A medium-sized model that uses layers with few activation maps to efficiently increase the number of layers, and therefore nonlinearities. """ def __init__(self, in_dim, n_status, n_out): """ Args: in_dim (list): The input size of each example n_status (int): Number of status inputs to add n_out (int): Number of values to predict """ super(StarTree, self).__init__() self.n_status = n_status dim = in_dim.copy() self.feat = torch.nn.Sequential( ConvBlock(dim, 64, dropout=0.25), ConvBlock(dim, 16), ConvBlock(dim, 32), PoolBlock(dim, "max", 2), ConvBlock(dim, 24), ConvBlock(dim, 48), PoolBlock(dim, "max", 2), ConvBlock(dim, 32), ConvBlock(dim, 64), PoolBlock(dim, "max", 2), ConvBlock(dim, 40), ConvBlock(dim, 80, dropout=0.25), PoolBlock(dim, "max", 2), ) self.view = ViewBlock(dim) dim[0] += n_status self.head = torch.nn.Sequential( LinearBlock(dim, 50), LinearBlock(dim, n_out, activation=False), ) self.n_params = sum([x.n_params for x in self.feat]) + sum([x.n_params for x in self.head]) print("StarTree params %9i" % self.n_params) def forward(self, batch, status): """ Args: batch (4D tensor): A batch of camera input. status (1D tensor): Status inputs indicating things like speed. """ out = self.feat(batch) out = self.view(out) if self.n_status: out = torch.cat((out, status), 1) out = self.head(out) return out def train_epoch(device, model, optimizer, criterion, loader): """ Run the optimzer over all batches in an epoch """ model.train() epoch_loss = 0 batch_index = 0 for batch_index, (examples, statuses, labels) in enumerate(loader): optimizer.zero_grad() guesses = model(examples.to(device), statuses.to(device)) loss = criterion(guesses, labels.to(device)) loss.backward() optimizer.step() epoch_loss += loss.item() return epoch_loss / (batch_index + 1) def test_epoch(device, model, criterion, loader): """ Run the evaluator over all batches in an epoch """ model.eval() epoch_loss = 0 batch_index = 0 with torch.no_grad(): for batch_index, (examples, statuses, labels) in enumerate(loader): guesses = model(examples.to(device), statuses.to(device)) loss = criterion(guesses, labels.to(device)) epoch_loss += loss.item() return epoch_loss / (batch_index + 1) def compose_transforms(transform_config): """ Apply all image transforms """ transform_list = [] for perturb_config in transform_config: if perturb_config["name"] == "colorjitter": transform = transforms.ColorJitter( brightness=perturb_config["brightness"], contrast=perturb_config["contrast"], saturation=perturb_config["saturation"], hue=perturb_config["hue"], ) transform_list.append(transform) transform_list.append(transforms.ToTensor()) return transforms.Compose(transform_list)
notkarol/derplearning
derp/model.py
model.py
py
9,661
python
en
code
40
github-code
6
[ { "api_name": "torch.nn", "line_number": 10, "usage_type": "attribute" }, { "api_name": "torch.nn.Conv2d", "line_number": 31, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 31, "usage_type": "attribute" }, { "api_name": "torch.nn.BatchNorm2d", ...
36562134507
import sys import json import time import numpy as np import argparse from operator import itemgetter from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import dok_matrix from math import sqrt from math import log from upper_learning_corpus import LearningCorpus from sparse_matrix import * from ranking import * def convert_counts_to_pmi2(matrix, rowSum, colSum): totalSum = sum(rowSum.values()) sys.stderr.write('Converting to csc_matrix format... ') startTime = time.time() matrix = coo_matrix(matrix) sys.stderr.write('done. Time taken: '+str(time.time()-startTime)+' secs\n') totalEntries = len(matrix.row) sys.stderr.write('Num entries: '+str(totalEntries)+'\n') numEntries = 1. # symmetric matrix for r, c, val in zip(np.nditer(matrix.row), np.nditer(matrix.col), np.nditer(matrix.data, op_flags=['readwrite'])): pi, pj, pij = (1.*val/rowSum[str(r)], 1.*val/colSum[str(c)], 1.*val/totalSum) val[...] = log(pij/(pi*pj)) if numEntries% 1000000 == 0: sys.stderr.write(str(numEntries)+' ') numEntries += 1 sys.stderr.write('done!\n') return csc_matrix((matrix.data, (matrix.row, matrix.col)), shape=matrix.shape) def convert_counts_to_pmi(matrix, rowSum, colSum): totalSum = sum(rowSum.values()) sys.stderr.write('Converting to dok_matrix format... ') startTime = time.time() matrix = dok_matrix(matrix) sys.stderr.write('done. Time taken: '+str(time.time()-startTime)+' secs\n') totalEntries = len(matrix) sys.stderr.write('Num entries: '+str(totalEntries)+'\n') r, c = matrix.shape numEntries = 1. # symmetric matrix if r == c: for key, val in matrix.iteritems(): i, j = key i, j = (str(i), str(j)) if int(i) <= int(j): pi, pj, pij = (1.*val/rowSum[i], 1.*val/colSum[j], 1.*val/totalSum) pmi = log(pij/(pi*pj)) matrix[int(i), int(j)] = pmi matrix[int(j), int(i)] = pmi else: pass if numEntries% 1000000 == 0: sys.stderr.write(str(numEntries)+' ') numEntries += 1 else: for key, val in matrix.iteritems(): i, j = key i, j = (str(i), str(j)) pi, pj, pij = (1.*val/rowSum[i], 1.*val/colSum[j], 1.*val/totalSum) matrix[int(i), int(j)] = log(pij/(pi*pj)) if numEntries% 1000000 == 0: sys.stderr.write(str(numEntries)+' ') numEntries += 1 sys.stderr.write('done!\n') return csc_matrix(matrix) if __name__=='__main__': parser = argparse.ArgumentParser() parser.add_argument("-m", "--matrixfile", type=str, default=None, help="Matrix file name") parser.add_argument("-d", "--dictfile", type=str, help="Dictionary file name") parser.add_argument("-o", "--outputfile", type=str, default=None, help="Output file name") args = parser.parse_args() outFileName = args.outputfile dictFile = open(args.dictfile, 'r') values = dictFile.readline().strip().split() if len(values) == 3: colCutoff, rowCutoff, windowSize = values else: colCutoff, windowSize = values rowCutoff = 0. vocab = json.loads(dictFile.readline()) wordFeatures = json.loads(dictFile.readline()) rowSum = json.loads(dictFile.readline()) colSum = json.loads(dictFile.readline()) contextMat = load_sparse_matrix(args.matrixfile) sys.stderr.write("windowSize: "+str(windowSize)+" colCutoff: "+str(colCutoff)+" rowCutoff: "+str(rowCutoff)+'\n') sys.stderr.write("featLen: "+str(len(wordFeatures))+" vocabLen: "+str(len(vocab))+'\n') sys.stderr.write('Read the matrix!\n') ''' Convert the matrix here! ''' contextMat = convert_counts_to_pmi(contextMat, rowSum, colSum) sys.stderr.write('Writing the matrix now... ') if outFileName is None: outFileName = args.dictfile.replace('.dict', '_pmi') save_sparse_matrix(outFileName, contextMat) sys.stderr.write('done!\n')
mfaruqui/vector-semantics
src/svd/convert_counts_to_pmi.py
convert_counts_to_pmi.py
py
4,114
python
en
code
5
github-code
6
[ { "api_name": "sys.stderr.write", "line_number": 21, "usage_type": "call" }, { "api_name": "sys.stderr", "line_number": 21, "usage_type": "attribute" }, { "api_name": "time.time", "line_number": 22, "usage_type": "call" }, { "api_name": "sys.stderr.write", "li...
25026171656
from flask import abort from flask_restx import Resource, Namespace, Model, fields, reqparse from infraestructura.alumnos_repo import AlumnosRepo from api.cursos_api import modeloCurso from flask_restx.inputs import date repo = AlumnosRepo() nsAlumno = Namespace('Alumnos', description='Administrador de Alumno') modeloAlumnoSinID = Model('AlumnoSinID',{ 'nombre': fields.String(), 'direccion': fields.String(), 'sexo':fields.String(), 'edad':fields.Integer(), 'fecha_baja': fields.Date() }) modeloAlumno = modeloAlumnoSinID.clone('Alumno',{ 'id': fields.Integer(), #'cursos': fields.Nested(modeloCurso, skip_none=True) }) # modeloBusqueda = Model('BusquedaFechas', { # 'desde': fields.Date(), # 'hasta': fields.Date() # }) nsAlumno.models[modeloAlumno.name] = modeloAlumno nsAlumno.models[modeloAlumnoSinID.name] = modeloAlumnoSinID # nsAlumno.models[modeloBusqueda.name] = modeloBusqueda nuevoAlumnoParser = reqparse.RequestParser(bundle_errors=True) nuevoAlumnoParser.add_argument('nombre', type=str, required=True) nuevoAlumnoParser.add_argument('direccion', type=str, required=True) nuevoAlumnoParser.add_argument('sexo', type=str, required=True) nuevoAlumnoParser.add_argument('edad', type=int, required=True) nuevoAlumnoParser.add_argument('fecha_baja', type=date, required=False) editarAlumnoParser = nuevoAlumnoParser.copy() editarAlumnoParser.add_argument('id', type=int, required=True) @nsAlumno.route('/') class AlumnosResource(Resource): # @nsAlumno.marshal_list_with(modeloAlumno) # def get(self): # return repo.get_all() @nsAlumno.marshal_list_with(modeloAlumno) def get(self): return repo.get_all() @nsAlumno.expect(modeloAlumnoSinID) @nsAlumno.marshal_with(modeloAlumno) def post(self): data = nuevoAlumnoParser.parse_args() Alumno = repo.agregar(data) if Alumno: return Alumno, 201 abort(500) @nsAlumno.route('/<int:id>') class AlumnoResource(Resource): @nsAlumno.marshal_with(modeloAlumno) def get(self, id): Alumno = repo.get_by_id(id) if Alumno: return Alumno, 200 abort(404) @nsAlumno.expect(modeloAlumno) def put(self, id): data = editarAlumnoParser.parse_args() if repo.modificar(id, data): return 'Alumno actualizado', 200 abort(404) # @nsAlumno.route('/buscar/<string:desde>/<string:hasta>/') # class AlumnoResource(Resource): # @nsAlumno.marshal_list_with(modeloAlumno) # def get(self, desde, hasta): # l = repoLep.buscar(desde, hasta) # if l: # a = [] # for x in l: # h = repo.get_by_id(x.Alumno_id) # a.append(h) # return l, 200 # abort(404) @nsAlumno.route('/baja/<int:id>') class AlumnoResource(Resource): def put(self, id): if repo.baja(id): return 'Alumno dado de baja', 200 abort(400) @nsAlumno.route('/buscar/<int:curso>') class AlumnoResource(Resource): @nsAlumno.marshal_list_with(modeloAlumno) def get(self, curso): l = repo.get_alumno_curso(curso) if l: return l, 200 abort(404)
PepoPalo/Final-Laboratorio-Diciembre2021
Backend/api/alumnos_api.py
alumnos_api.py
py
3,258
python
es
code
1
github-code
6
[ { "api_name": "infraestructura.alumnos_repo.AlumnosRepo", "line_number": 8, "usage_type": "call" }, { "api_name": "flask_restx.Namespace", "line_number": 11, "usage_type": "call" }, { "api_name": "flask_restx.Model", "line_number": 13, "usage_type": "call" }, { "a...
32144899005
import pandas as pd def read_fasta(file_path): sequences = {"Header": [], "Sequence": []} current_header = None current_sequence = "" with open(file_path, "r") as file: for line in file: line = line.strip() if line.startswith(">"): # New header found if current_header is not None: sequences["Header"].append(current_header) sequences["Sequence"].append(current_sequence) current_header = line[1:] current_sequence = "" else: # Continue building the sequence current_sequence += line # Add the last sequence if current_header is not None: sequences["Header"].append(current_header) sequences["Sequence"].append(current_sequence) return pd.DataFrame(sequences) def extract_label(header): # Extract label after the "|" symbol parts = header.split("|") if len(parts) > 1: return parts[1].strip() else: return None file_path = "data/pharos/pharos.fasta" fasta_df = read_fasta(file_path) fasta_df["Label"] = fasta_df["Header"].apply(extract_label) tclin_df = fasta_df[fasta_df["Label"] == "Tclin"] tdark_df = fasta_df[fasta_df["Label"] == "Tdark"] length_tclin_df = len(tclin_df) random_tdark_df = tdark_df.sample(n=length_tclin_df, random_state=42) from sklearn.model_selection import train_test_split import os # Assuming tclin_df and tdark_df are already defined # Define the test size test_size = 0.2 # Split the positive sequences (Tclin) into train and test sets tclin_train, tclin_test = train_test_split( tclin_df, test_size=test_size, random_state=42 ) # Split the negative sequences (Tdark) into train and test sets tdark_train, tdark_test = train_test_split( random_tdark_df, test_size=test_size, random_state=42 ) # Create folders if they don't exist train_folder = "data/pharos/fastadata/Train" test_folder = "data/pharos/fastadata/Independent_Test" # Create folders if they don't exist for folder in [train_folder, test_folder]: if not os.path.exists(folder): os.makedirs(folder) if not os.path.exists(train_folder): os.makedirs(train_folder) if not os.path.exists(test_folder): os.makedirs(test_folder) # Function to extract header before the '|' symbol def extract_header(identifier): return identifier.split("|")[0] # Function to write sequences to fasta file def write_fasta(filename, dataframe): with open(filename, "w") as file: for index, row in dataframe.iterrows(): header = extract_header(row["Header"]) file.write(f">{header}\n{row['Sequence']}\n") # Save the sequences to FASTA files in the train and test folders write_fasta(os.path.join(train_folder, "positive_train_sequence.fasta"), tclin_train) write_fasta(os.path.join(test_folder, "positive_test_sequence.fasta"), tclin_test) write_fasta(os.path.join(train_folder, "negative_train_sequence.fasta"), tdark_train) write_fasta(os.path.join(test_folder, "negative_test_sequence.fasta"), tdark_test)
txz32102/paper
util/sample.py
sample.py
py
3,139
python
en
code
0
github-code
6
[ { "api_name": "pandas.DataFrame", "line_number": 28, "usage_type": "call" }, { "api_name": "sklearn.model_selection.train_test_split", "line_number": 59, "usage_type": "call" }, { "api_name": "sklearn.model_selection.train_test_split", "line_number": 64, "usage_type": "ca...
23873824195
import cv2 import os # Input folder containing the saved images image_folder = '/Users/tobieabel/Desktop/video_frames/ConcatVideo/' # Output video file path output_video_path = '/Users/tobieabel/Desktop/video_frames/Youtube/v3_a demo.mp4' # Get the list of image files in the input folder image_files = os.listdir(image_folder) image_files.remove('.DS_Store') def file_sort_key(filename): # Extract the numeric portion of the filename number = int(os.path.splitext(filename)[0]) return number # Sort the files chronologically sorted_files = sorted(image_files, key=file_sort_key) print(sorted_files) # Get the dimensions of the first image to initialize the video writer first_image_path = os.path.join(image_folder, sorted_files[0]) first_image = cv2.imread(first_image_path) height, width, _ = first_image.shape #need to be careful of this. I scrapped a video from youtube whose resolution was an odd width 1740, height 988 #and these dimensions didn't work with cv2.VideoWriter so I had to use cv2.resize to change the images to 1920x1080 which is the closest accepatable format # Define the codec and create the video writer fourcc = cv2.VideoWriter_fourcc(*'mp4v') fps = 30 # Adjust as needed video_writer = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height)) # Loop through the image files and write them to the video for i in sorted_files: image_path = os.path.join(image_folder, i) image = cv2.imread(image_path) # Write the image to the video writer video_writer.write(image) # Release the video writer video_writer.release() print(f"Video saved to: {output_video_path}")
tobieabel/demo-v3-People-Counter
Create_video.py
Create_video.py
py
1,633
python
en
code
0
github-code
6
[ { "api_name": "os.listdir", "line_number": 11, "usage_type": "call" }, { "api_name": "os.path.splitext", "line_number": 15, "usage_type": "call" }, { "api_name": "os.path", "line_number": 15, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_num...
1363723921
from typing import Any, Dict, List, Type, TypeVar, Union from attrs import define as _attrs_define from attrs import field as _attrs_field from ..types import UNSET, Unset T = TypeVar("T", bound="FollowUpPriorityV2ResponseBody") @_attrs_define class FollowUpPriorityV2ResponseBody: """ Example: {'description': 'A follow-up that requires immediate attention.', 'id': '01GNW4BAQ7XRMFF6FHKNXDFPRW', 'name': 'Urgent', 'rank': 10} Attributes: id (str): Unique identifier for the follow-up priority option Example: 01GNW4BAQ7XRMFF6FHKNXDFPRW. name (str): Name of the follow-up priority option Example: Urgent. rank (int): Rank is used to order the follow-up priority options correctly Example: 10. description (Union[Unset, str]): Description of the follow-up priority option Example: A follow-up that requires immediate attention.. """ id: str name: str rank: int description: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: id = self.id name = self.name rank = self.rank description = self.description field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( { "id": id, "name": name, "rank": rank, } ) if description is not UNSET: field_dict["description"] = description return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() id = d.pop("id") name = d.pop("name") rank = d.pop("rank") description = d.pop("description", UNSET) follow_up_priority_v2_response_body = cls( id=id, name=name, rank=rank, description=description, ) follow_up_priority_v2_response_body.additional_properties = d return follow_up_priority_v2_response_body @property def additional_keys(self) -> List[str]: return list(self.additional_properties.keys()) def __getitem__(self, key: str) -> Any: return self.additional_properties[key] def __setitem__(self, key: str, value: Any) -> None: self.additional_properties[key] = value def __delitem__(self, key: str) -> None: del self.additional_properties[key] def __contains__(self, key: str) -> bool: return key in self.additional_properties
expobrain/python-incidentio-client
incident_io_client/models/follow_up_priority_v2_response_body.py
follow_up_priority_v2_response_body.py
py
2,629
python
en
code
4
github-code
6
[ { "api_name": "typing.TypeVar", "line_number": 8, "usage_type": "call" }, { "api_name": "typing.Union", "line_number": 29, "usage_type": "name" }, { "api_name": "types.Unset", "line_number": 29, "usage_type": "name" }, { "api_name": "types.UNSET", "line_number...
37407208814
from jinja2 import Environment, BaseLoader from io import BytesIO import plotly import base64 ''' export = ExportHTML('testclass.html') export.render() ''' class ExportHTML: __template_vars = {'title':'Hello World','body':'Hello World !!!'} __template_html = ''' <html> <head lang="en"> <meta charset="UTF-8"> <title>{{ title }}</title> <style> table { border-collapse: collapse; width: 100%; } th { text-align: center; background-color: #ffd700; color: black; } tr:nth-child(even) {background-color: #f2f2f2;} tr { text-align: right; page-break-inside: avoid; } thead { display: table-header-group; } tfoot { display: table-row-group; } .break-before { page-break-before: always; } </style> </head> <body> <h1>Header</h1> {{ body }} <h2 class="break-before">Next Page</h2> </body> </html> ''' def encode_graph(self, fig): tmpfile = BytesIO() fig.savefig(tmpfile, format='png', bbox_inches='tight') encoded = base64.b64encode(tmpfile.getvalue()).decode('utf-8') fig_html = '<img src=\'data:image/png;base64,{}\'>'.format(encoded) return fig_html def plotly_img_uri(self, fig, height=300, width=1200, orca_path='C:/Users/Administrator/anaconda3/orca_app/orca.exe'): plotly.io.orca.config.executable = orca_path img_uri = base64.b64encode(plotly.io.to_image(fig, width=width, height=height)).decode('ascii') return '<img style="width: {width}; height: {height}" '\ 'src="data:image/png;base64,{img_uri}" />'.format(width=width, height=height, img_uri=img_uri) @property def template_vars(self): return self.__template_vars @template_vars.setter def template_vars(self, var_dict): self.__template_vars = var_dict @property def template_html(self): return self.__template_html @template_html.setter def template_html(self, htmlString): self.__template_html = htmlString def render(self, output_file): template = Environment(loader=BaseLoader()).from_string(self.template_html) template_vars = self.template_vars html_out = template.render(template_vars) with open(output_file, "w") as fh: fh.write(html_out)
etq-quant/etqbankloan
Lib/etiqalib/export_html.py
export_html.py
py
2,768
python
en
code
0
github-code
6
[ { "api_name": "io.BytesIO", "line_number": 55, "usage_type": "call" }, { "api_name": "base64.b64encode", "line_number": 57, "usage_type": "call" }, { "api_name": "plotly.io", "line_number": 63, "usage_type": "attribute" }, { "api_name": "base64.b64encode", "li...
12948066350
import matplotlib.pyplot as plt tiempo = [0,1,2,3,4,5] sensor = [4,5,6,8,9, 10] plt.plot(tiempo,sensor,'--,r') plt.title('Grafico sensor contra el tiempo') plt.xlabel('Tiempo(s)') plt.ylabel('Voltaje(v)') plt.savefig('sensor.png') plt.show() # Nota: se le puede poner el simbolo para que se grafique('--'), si no se pone nada se grafica como una linea recta # DICCIONARIO diccionario = {} diccionario['NombresEstudiantes'] = ['Andrea', 'Nicolle', 'Isabel', 'Santiago'] diccionario['EdadEstudiantes'] = [18,20,19,15] diccionario['Peso'] = [60,55,70,78] print(diccionario) print(diccionario['NombresEstudiantes'][-1],diccionario['EdadEstudiantes'][-1],diccionario['Peso'][-1])
vero-obando/Programacion
Clases/Graficos/curvas.py
curvas.py
py
679
python
es
code
0
github-code
6
[ { "api_name": "matplotlib.pyplot.plot", "line_number": 4, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 4, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.title", "line_number": 5, "usage_type": "call" }, { "api_name": "matplot...
19809320159
import time import constants as cons import matplotlib.pyplot as plt from preprocessing.images_reader import ImagesReader start_time = time.time() print('reading images...') reader = ImagesReader(cons.PREPROCESSED_DATASET_DIR) train_images = reader.read_train_images() classes = [None] * len(train_images) samples = [None] * len(train_images) for i, image_class in enumerate(train_images): classes[i] = image_class samples[i] = len(train_images[image_class]) plt.plot(classes, samples) plt.show() end_time = time.time() print('done in {:.2f}s'.format(end_time - start_time))
sachokFoX/caltech_256
code/run_data_distribution_analysis.py
run_data_distribution_analysis.py
py
589
python
en
code
0
github-code
6
[ { "api_name": "time.time", "line_number": 6, "usage_type": "call" }, { "api_name": "preprocessing.images_reader.ImagesReader", "line_number": 9, "usage_type": "call" }, { "api_name": "constants.PREPROCESSED_DATASET_DIR", "line_number": 9, "usage_type": "attribute" }, ...
25965027391
import json import os from contextlib import suppress from math import sqrt from typing import Tuple import numpy as np import pandas as pd from openpyxl import load_workbook, styles, utils from PIL import Image def to_excel( image: Image, path: str, lower_image_size_by: int = 10, **spreadsheet_kwargs ) -> None: """ - Added on release 0.0.1; - Coded originally on https://github.com/Eric-Mendes/image2excel Saves an image as a `.xlsx` file by coloring its cells each pixel's color. ## Parameters * :param image: Your image opened using the `PIL.Image` module; * :param path: The path that you want to save your output file. Example: `/home/user/Documents/my_image.xlsx`; * :param lower_image_size_by: A factor that the function will divide your image's dimensions by. Defaults to `10`; * It is very important that you lower your image's dimensions because a big image might take the function a long time to process plus your spreadsheet will probably take a long time to load on any software that you use to open it; * :param **spreadsheet_kwargs: See below. ## Spreadsheet Kwargs Optional parameters to tweak the spreadsheet's appearance. * :param row_height (`float`): the rows' height. Defaults to `15`; * :param column_width (`float`): the columns' width. Defaults to `2.3`; * The default values on `row_height` and `column_width` were specifically thought out so that they make the cells squared, however - as any hardcoded value - they might not do the trick on your device. That is when you might want to tweak them a little bit. * :param delete_cell_value (`bool`): wheter to keep or not the text corresponding to that color. Defaults to `True`; * :param zoom_scale (`int`): how much to zoom in or out on the spreadsheet. Defaults to `20` which seems to be the default max zoom out on most spreadsheet softwares. ## Return * :return: `None`, but outputs a `.xlsx` file on the given `path`. """ image = image.convert("RGB") # Resizing image image = image.resize( (image.size[0] // lower_image_size_by, image.size[1] // lower_image_size_by) ) # OpenPyxl colors work in a weird way image_colors_processed = [ ["%02x%02x%02x" % tuple(item) for item in row] for row in np.array(image).tolist() ] df = pd.DataFrame(image_colors_processed) image_name = os.path.splitext(os.path.split(path)[1])[0] # Saving a DataFrame where each cell has a text corresponding to the RGB color its background should be df.to_excel(path, index=False, header=False) # Loading the excel file, painting each cell with its color and saving the updates wb = load_workbook(path) ws = wb.active ws.title = image_name for row in range(1, df.shape[0] + 1): for col in range(1, df.shape[1] + 1): cell = ws.cell(row=row, column=col) # Makes cells squared ws.row_dimensions[row].height = spreadsheet_kwargs.get("row_height", 15) ws.column_dimensions[ utils.get_column_letter(col) ].width = spreadsheet_kwargs.get("column_width", 2.3) # Painting the cell cell.fill = styles.PatternFill( start_color=cell.value, end_color=cell.value, fill_type="solid" ) if spreadsheet_kwargs.get("delete_cell_value", True): cell.value = None # Deletes the text from the cell # Saves spreadsheet already zoomed in or out ws.sheet_view.zoomScale = spreadsheet_kwargs.get("zoom_scale", 20) wb.save(path) def to_minecraft( image: Image, path: str, lower_image_size_by: int = 10, player_pos: Tuple[int, int, int] = (0, 0, 0), minecraft_version: str = '1.18.2', ) -> None: """ - Added on release 0.0.1; - Coded originally on https://github.com/Eric-Mendes/pixel-art-map Saves an image as a minecraft datapack that when loaded into your world will build a pixel art of it on the player's position. ## Parameters * :param image: Your image opened using the `PIL.Image` module; * :param path: The path that you want to save your datapack. Example: `/home/user/Documents/my_image_datapack`; * :param lower_image_size_by: A factor that the function will divide your image's dimensions by. Defaults to `10`; * :param player_pos: The player's (x, y, z) position. Defaults to `(0, 0, 0)`; * :param minecraft_version: The minecraft client version (x.xx.x). Default is `1.18.2`. ## Return * :return: `None`, but outputs a datapack on the given `path`. """ image = image.convert("RGB") # Makes the commands that the datapack will run when loaded def script(df, **kwargs): player_pos = [ kwargs.get("player_x", 0), kwargs.get("player_y", 0), kwargs.get("player_z", 0), ] z = (df != df.shift()).cumsum() zri = z.reset_index() ix_name = z.index.name co_name = z.columns.name for i in z: v = zri.groupby(i)[ix_name].agg(["first", "last"]) s = {co_name: i} e = {co_name: i} for _, r in v.iterrows(): s[ix_name] = r["first"] e[ix_name] = r["last"] material = df.loc[r["first"], i] yield f'fill {s["x"] + player_pos[0]} {0 + player_pos[1]} {s["z"] + player_pos[2]} {e["x"] + player_pos[0]} {0 + player_pos[1]} {e["z"] + player_pos[2]} {material.split(",")[0].strip()}' # Helper function. Loads the blocks an the colors they have when looked at via map, # and maps the pixels to the blocks blocks = [ { "rgb": (127, 178, 56), "blocks": ("grass_block", "slime_block"), }, { "rgb": (247, 233, 163), "blocks": ("sand", "birch_planks", "birch_log[axis=y]", "stripped_birch_log[axis=x]", "birch_wood", "stripped_birch_wood", "birch_sign", "birch_pressure_plate", "birch_trapdoor", "birch_stairs", "birch_slab", "birch_fence_gate", "birch_fence", "birch_door", "sandstone", "glowstone", "end_stone", "end_stone_brick_slab", "end_stone_brick_stairs", "end_stone_brick_wall", "bone_block", "turtle_egg", "scaffolding", "candle"), }, { "rgb": (199, 199, 199), "blocks": ("mushroom_stem", "cobweb", "white_bed[part=head]", "white_candle"), }, { "rgb": (255, 0, 0), "blocks": ("redstone_block", "tnt", "lava", "fire"), }, { "rgb": (160, 160, 255), "blocks": ("ice", "frosted_ice", "packed_ice", "blue_ice"), }, { "rgb": (167, 167, 167), "blocks": ("iron_block", "iron_door", "brewing_stand", "heavy_weighted_pressure_plate", "iron_trapdoor", "lantern", "anvil", "grindstone", "soul_lantern", "lodestone"), }, { "rgb": (0, 124, 0), "blocks": ("oak_sapling", "spruce_sapling", "birch_sapling", "jungle_sapling", "acacia_sapling", "dark_oak_sapling", "dandelion", "poppy", "blue_orchid", "allium", "azure_bluet", "red_tulip", "orange_tulip", "white_tulip", "pink_tulip", "oxeye_daisy", "cornflower", "lily_of_the_valley", "wither_rose", "sunflower", "lilac", "rose_bush", "peony", "wheat[age=7]", "sugar_cane[age=9]", "pumpkin_stem[age=7]", "melon_stem[age=7]", "lily_pad", "cocoa[age=2]", "carrots[age=7]", "potatoes[age=7]", "beetroots[age=7]", "sweet_berry_bush[age=3]", "grass", "fern", "vine", "oak_leaves", "spruce_leaves", "birch_leaves", "jungle_leaves", "acacia_leaves", "dark_oak_leaves", "azalea_leaves", "flowering_azalea_leaves", "cactus[age=9]", "bamboo[age=1]", "cave_vines", "spore_blossom", "flowering_azalea", "big_dripleaf", "small_dripleaf"), }, { "rgb": (255, 255, 255), "blocks": ("snow", "snow_block", "white_bed[part=foot]", "white_wool", "white_stained_glass", "white_carpet", "white_shulker_box", "white_glazed_terracotta", "white_concrete", "white_concrete_powder", "powder_snow"), }, { "rgb": (164, 168, 184), "blocks": ("clay", "infested_chiseled_stone_bricks", "infested_cobblestone", "infested_cracked_stone_bricks", "infested_mossy_stone_bricks", "infested_stone", "infested_stone_bricks"), }, { "rgb": (151, 109, 77), "blocks": ("coarse_dirt", "dirt", "farmland", "dirt_path", "granite_slab", "granite_stairs", "granite_wall", "polished_granite_slab", "polished_granite_stairs", "jungle_planks", "jungle_log[axis=y]", "stripped_jungle_log[axis=x]", "jungle_wood", "stripped_jungle_wood", "jungle_sign", "jungle_pressure_plate", "jungle_trapdoor", "jungle_stairs", "jungle_slab", "jungle_fence_gate", "jungle_fence", "jungle_door", "jukebox", "brown_mushroom_block", "rooted_dirt", "hanging_roots"), }, { "rgb": (112, 112, 112), "blocks": ("stone", "stone_slab", "stone_stairs", "andesite_slab", "andesite_stairs", "andesite_wall", "polished_andesite_slab", "polished_andesite_stairs", "cobblestone_slab", "cobblestone_stairs", "cobblestone_wall", "bedrock", "gold_ore", "iron_ore", "coal_ore", "lapis_lazuli_ore", "dispenser", "mossy_cobblestone_slab", "mossy_cobblestone_stairs", "mossy_cobblestone_wall", "spawner", "diamond_ore", "furnace", "stone_pressure_plate", "redstone_ore", "stone_bricks", "emerald_ore", "ender_chest", "dropper", "smooth_stone_slab", "observer", "smoker", "blast_furnace", "stonecutter", "sticky_piston", "piston", "piston_head", "gravel", "acacia_log[axis=z]", "cauldron", "hopper", "copper_ore"), }, { "rgb": (64, 64, 255), "blocks": ("water", "kelp", "seagrass", "bubble_column"), }, { "rgb": (143, 119, 72), "blocks": ("oak_planks", "oak_log[axis=y]", "stripped_oak_log[axis=x]", "oak_wood", "stripped_oak_wood", "oak_sign", "oak_pressure_plate", "oak_trapdoor", "oak_stairs", "oak_slab", "oak_fence_gate", "oak_fence", "oak_door", "note_block", "bookshelf", "chest", "crafting_table", "trapped_chest", "daylight_detector", "loom", "barrel", "cartography_table", "fletching_table", "lectern", "smithing_table", "composter", "bamboo_sapling", "dead_bush", "petrified_oak_slab", "beehive", "white_banner"), }, { "rgb": (255, 252, 245), "blocks": ("quartz_block", "diorite_stairs", "diorite_slab", "diorite_wall", "polished_diorite_stairs", "polished_diorite_slab", "birch_log[axis=x]", "sea_lantern", "target"), }, { "rgb": (216, 127, 51), "blocks": ("acacia_planks", "acacia_log[axis=y]", "stripped_acacia_log[axis=x]", "acacia_wood", "stripped_acacia_wood", "acacia_sign", "acacia_pressure_plate", "acacia_trapdoor", "acacia_stairs", "acacia_slab", "acacia_fence_gate", "acacia_fence", "acacia_door", "red_sand", "orange_wool", "orange_carpet", "orange_shulker_box", "orange_bed[part=foot]", "orange_stained_glass", "orange_glazed_terracotta", "orange_concrete", "orange_concrete_powder", "orange_candle", "pumpkin", "carved_pumpkin", "jack_o_lantern", "terracotta", "red_sandstone", "honey_block", "honeycomb_block", "copper_block", "lightning_rod", "raw_copper_block"), }, { "rgb": (178, 76, 216), "blocks": ("magenta_wool", "magenta_carpet", "magenta_shulker_box", "magenta_bed[part=foot]", "magenta_stained_glass", "magenta_glazed_terracotta", "magenta_concrete", "magenta_concrete_powder", "magenta_candle", "purpur_block"), }, { "rgb": (102, 153, 216), "blocks": ("light_blue_wool", "light_blue_carpet", "light_blue_shulker_box", "light_blue_bed[part=foot]", "light_blue_stained_glass", "light_blue_glazed_terracotta", "light_blue_concrete", "light_blue_concrete_powder", "light_blue_candle", "soul_fire"), }, { "rgb": (229, 229, 51), "blocks": ("sponge", "wet_sponge", "yellow_wool", "yellow_carpet", "yellow_shulker_box", "yellow_bed[part=foot]", "yellow_stained_glass", "yellow_glazed_terracotta", "yellow_concrete", "yellow_concrete_powder", "yellow_candle", "hay_bale", "horn_coral_block[waterlogged=true]", "bee_nest"), }, { "rgb": (127, 204, 25), "blocks": ("lime_wool", "lime_carpet", "lime_shulker_box", "lime_bed[part=foot]", "lime_stained_glass", "lime_glazed_terracotta", "lime_concrete", "lime_concrete_powder", "lime_candle", "melon"), }, { "rgb": (242, 127, 165), "blocks": ("pink_wool", "pink_carpet", "pink_shulker_box", "pink_bed[part=foot]", "pink_stained_glass", "pink_glazed_terracotta", "pink_concrete", "pink_concrete_powder", "pink_candle", "brain_coral_block[waterlogged=true]"), }, { "rgb": (76, 76, 76), "blocks": ("acacia_wood", "gray_wool", "gray_carpet", "gray_shulker_box", "gray_bed[part=foot]", "gray_stained_glass", "gray_glazed_terracotta", "gray_concrete", "gray_concrete_powder", "gray_candle", "dead_coral_block", "tinted_glass"), }, { "rgb": (153, 153, 153), "blocks": ("light_gray_wool", "light_gray_carpet", "light_gray_shulker_box", "light_gray_bed[part=foot]", "light_gray_stained_glass", "light_gray_glazed_terracotta", "light_gray_concrete", "light_gray_concrete_powder", "light_gray_candle", "structure_block", "jigsaw"), }, { "rgb": (76, 127, 153), "blocks": ("cyan_wool", "cyan_carpet", "cyan_shulker_box", "cyan_bed[part=foot]", "cyan_stained_glass", "cyan_glazed_terracotta", "cyan_concrete", "cyan_concrete_powder", "cyan_candle", "prismarine_slab", "prismarine_stairs", "prismarine_wall", "warped_roots", "warped_fungus", "twisting_vines", "nether_sprouts", "sculk_sensor"), }, { "rgb": (127, 63, 178), "blocks": ("shulker_box", "purple_wool", "purple_carpet", "purple_shulker_box", "purple_bed[part=foot]", "purple_stained_glass", "purple_glazed_terracotta", "purple_concrete", "purple_concrete_powder", "purple_candle", "mycelium", "chorus_plant", "chorus_flower", "repeating_command_block", "bubble_coral_block", "amethyst_block", "budding_amethyst", "amethyst_cluster"), }, { "rgb": (51, 76, 178), "blocks": ("blue_wool", "blue_carpet", "blue_shulker_box", "blue_bed[part=foot]", "blue_stained_glass", "blue_glazed_terracotta", "blue_concrete", "blue_concrete_powder", "blue_candle", "tube_coral_block"), }, { "rgb": (102, 76, 51), "blocks": ("dark_oak_planks", "dark_oak_log[axis=y]", "stripped_dark_oak_log[axis=x]", "dark_oak_wood", "stripped_dark_oak_wood", "dark_oak_sign", "dark_oak_pressure_plate", "dark_oak_trapdoor", "dark_oak_stairs", "dark_oak_slab", "dark_oak_fence_gate", "dark_oak_fence", "dark_oak_door", "spruce_log[axis=x]", "brown_wool", "brown_carpet", "brown_shulker_box", "brown_bed[part=foot]", "brown_stained_glass", "brown_glazed_terracotta", "brown_concrete", "brown_concrete_powder", "brown_candle", "soul_sand", "command_block", "brown_mushroom", "soul_soil"), }, { "rgb": (102, 127, 51), "blocks": ("green_wool", "green_carpet", "green_shulker_box", "green_bed[part=foot]", "green_stained_glass", "green_glazed_terracotta", "green_concrete", "green_concrete_powder", "green_candle", "end_portal_frame", "chain_command_block", "sea_pickle", "moss_carpet", "moss_block", "dried_kelp_block"), }, { "rgb": (153, 51, 51), "blocks": ("red_wool", "red_carpet", "red_shulker_box", "red_bed[part=foot]", "red_stained_glass", "red_glazed_terracotta", "red_concrete", "red_concrete_powder", "red_candle", "brick_slab", "brick_stairs", "brick_wall", "red_mushroom_block", "nether_wart", "enchanting_table", "nether_wart_block", "fire_coral_block", "red_mushroom", "shroomlight"), }, { "rgb": (25, 25, 25), "blocks": ("black_wool", "black_carpet", "black_shulker_box", "black_bed[part=foot]", "black_stained_glass", "black_glazed_terracotta", "black_concrete", "black_concrete_powder", "black_candle", "obsidian", "end_portal", "dragon_egg", "coal_block", "end_gateway", "basalt", "polished_basalt", "smooth_basalt", "netherite_block", "crying_obsidian", "respawn_anchor", "blackstone", "gilded_blackstone"), }, { "rgb": (250, 238, 77), "blocks": ("gold_block", "light_weighted_pressure_plate", "bell", "raw_gold_block"), }, { "rgb": (92, 219, 213), "blocks": ("diamond_block", "beacon", "prismarine_brick_slab", "prismarine_brick_stairs", "dark_prismarine_slab", "dark_prismarine_stairs", "conduit"), }, { "rgb": (74, 128, 255), "blocks": ("lapis_lazuli_block"), }, { "rgb": (0, 217, 58), "blocks": ("emerald_block"), }, { "rgb": (129, 86, 49), "blocks": ("podzol", "spruce_planks", "spruce_log[axis=y]", "stripped_spruce_log[axis=x]", "spruce_wood", "stripped_spruce_wood", "spruce_sign", "spruce_pressure_plate", "spruce_trapdoor", "spruce_stairs", "spruce_slab", "spruce_fence_gate", "spruce_fence", "spruce_door", "oak_log[axis=x]", "jungle_log[axis=x]", "campfire", "soul_campfire"), }, { "rgb": (112, 2, 0), "blocks": ("netherrack", "nether_brick_fence", "nether_brick_slab", "nether_brick_stairs", "nether_brick_wall", "nether_brick_chiseled", "nether_brick_cracked", "nether_gold_ore", "nether_quartz_ore", "magma_block", "red_nether_brick_slab", "red_nether_brick_stairs", "red_nether_brick_wall", "crimson_roots", "crimson_fungus", "weeping_vines"), }, { "rgb": (209, 177, 161), "blocks": ("white_terracotta", "calcite"), }, { "rgb": (159, 82, 36), "blocks": ("orange_terracotta"), }, { "rgb": (149, 87, 108), "blocks": ("magenta_terracotta"), }, { "rgb": (112, 108, 138), "blocks": ("light_blue_terracotta"), }, { "rgb": (186, 133, 36), "blocks": ("yellow_terracotta"), }, { "rgb": (103, 117, 53), "blocks": ("lime_terracotta"), }, { "rgb": (160, 77, 78), "blocks": ("pink_terracotta"), }, { "rgb": (57, 41, 35), "blocks": ("gray_terracotta", "tuff"), }, { "rgb": (135, 107, 98), "blocks": ("light_gray_terracotta", "exposed_copper"), }, { "rgb": (87, 92, 92), "blocks": ("cyan_terracotta"), }, { "rgb": (122, 73, 88), "blocks": ("purple_terracotta", "purple_shulker_box"), }, { "rgb": (76, 62, 92), "blocks": ("blue_terracotta"), }, { "rgb": (76, 50, 35), "blocks": ("brown_terracotta", "pointed_dripstone", "dripstone_block"), }, { "rgb": (76, 82, 42), "blocks": ("green_terracotta"), }, { "rgb": (142, 60, 46), "blocks": ("red_terracotta"), }, { "rgb": (37, 22, 16), "blocks": ("black_terracotta"), }, { "rgb": (189, 48, 49), "blocks": ("crimson_nylium"), }, { "rgb": (148, 63, 97), "blocks": ("crimson_planks", "crimson_log[axis=y]", "stripped_crimson_log[axis=x]", "crimson_wood", "stripped_crimson_wood", "crimson_sign", "crimson_pressure_plate", "crimson_trapdoor", "crimson_stairs", "crimson_slab", "crimson_fence_gate", "crimson_fence", "crimson_door"), }, { "rgb": (92, 25, 29), "blocks": ("crimson_hyphae", "stripped_crimson_hyphae"), }, { "rgb": (22, 126, 134), "blocks": ("warped_nylium", "oxidized_copper"), }, { "rgb": (58, 142, 140), "blocks": ("warped_planks", "warped_log[axis=y]", "stripped_warped_log[axis=x]", "warped_wood", "stripped_warped_wood", "warped_sign", "warped_pressure_plate", "warped_trapdoor", "warped_stairs", "warped_slab", "warped_fence_gate", "warped_fence", "warped_door", "weathered_copper"), }, { "rgb": (86, 44, 62), "blocks": ("warped_hyphae", "stripped_warped_hyphae"), }, { "rgb": (20, 180, 133), "blocks": ("warped_wart_block"), }, { "rgb": (100, 100, 100), "blocks": ("deepslate"), }, { "rgb": (216, 175, 147), "blocks": ("raw_iron_block"), }, { "rgb": (127, 167, 150), "blocks": ("glow_lichen"), }, ] def to_minecraft_color(pxl): color = None min_distance = None for item in blocks: # Calculates the "distance" between two RGB colors as if they # were points in a 3-dimensional space. # The closer they are, the more they look like each other. euclidean_distance = sqrt(sum([pow(p - c, 2) for p, c in zip(item["rgb"], pxl)])) if min_distance is None or euclidean_distance < min_distance: min_distance = euclidean_distance color = ", ".join("minecraft:"+block for block in item["blocks"]) return color # Resizing the image and mapping each pixel's color to a minecraft color image = image.resize( (image.size[0] // lower_image_size_by, image.size[1] // lower_image_size_by) ) image_colors_processed = [ [to_minecraft_color(pixel) for pixel in row] for row in np.array(image) ] # Getting the name that the image should have via the given path image_name = os.path.splitext(os.path.split(path)[1])[0] df = pd.DataFrame(image_colors_processed) # Creates - in an error proof manner - the folder structure of the datapack with suppress(FileExistsError): os.makedirs(f"{path}/data/minecraft/tags/functions") os.makedirs(f"{path}/data/pixelart-map/functions") if minecraft_version >= '1.13.0': if minecraft_version >= '1.13.0' and minecraft_version <= '1.14.4': datapack_version = 4 elif minecraft_version >= '1.15.0' & minecraft_version <= '1.16.1': datapack_version = 5 elif minecraft_version >= '1.16.2' & minecraft_version <= '1.16.5': datapack_version = 6 elif minecraft_version >= '1.17.0' & minecraft_version <= '1.17.1': datapack_version = 7 elif minecraft_version >= '1.18.0' & minecraft_version <= '1.18.1': datapack_version = 8 elif minecraft_version >= '1.18.2': datapack_version = 9 else: datapack_version = 4 raise ValueError("This versions is incompatible with datapacks (below 1.13.0) or the version is writen wrong (correct: x.xx.x | wrong: x.x, x.xx)") pack_mcmeta = { "pack": { "pack_format": datapack_version, "description": f"This datapack will generate the image ({image_name}) in your world", } } load_json = {"values": ["pixelart-map:load"]} tick_json = {"values": ["pixelart-map:tick"]} with open(f"{path}/pack.mcmeta", "w") as file: file.write(json.dumps(pack_mcmeta, indent=4)) with open(f"{path}/data/minecraft/tags/functions/load.json", "w") as file: file.write(json.dumps(load_json, indent=4)) with open(f"{path}/data/minecraft/tags/functions/tick.json", "w") as file: file.write(json.dumps(tick_json, indent=4)) with open(f"{path}/data/pixelart-map/functions/tick.mcfunction", "w") as file: file.write("") # Making the commands that when ran will build the image's pixel art. # This part's had a huge contribution from this thread: https://stackoverflow.com/questions/70512775/how-to-group-elements-in-dataframe-by-row/70546452#70546452 df = df.rename_axis(index="z", columns="x") a = list( script( df, player_x=player_pos[0], player_y=player_pos[1], player_z=player_pos[2], ) ) b = list( script( df.T, player_x=player_pos[0], player_y=player_pos[1], player_z=player_pos[2], ) ) res = min([a, b], key=len) with open(f"{path}/data/pixelart-map/functions/load.mcfunction", "w") as file: file.write("\n".join(res))
Henrique-CSS/unexpected-isaves
src/unexpected_isaves/save_image.py
save_image.py
py
24,926
python
en
code
null
github-code
6
[ { "api_name": "PIL.Image", "line_number": 14, "usage_type": "name" }, { "api_name": "numpy.array", "line_number": 53, "usage_type": "call" }, { "api_name": "pandas.DataFrame", "line_number": 56, "usage_type": "call" }, { "api_name": "os.path.splitext", "line_n...
40709996191
# coding=utf-8 from __future__ import absolute_import, division, print_function import torch import torch.nn as nn from torch.utils.data import DataLoader, Dataset from util.custom_dataset import FaceLandmarksDataset, Rescale, ToTensor import torchvision.models as models from torchvision import transforms import torch.nn.functional as F import numpy as np import matplotlib.pyplot as plt from torchvision import transforms, utils import torchvision class View(nn.Module): def __init__(self, shape): super(View, self).__init__() self.shape = shape def forward(self, x): return x.view(*self.shape) class InnerSum(nn.Module): def __init__(self): super(InnerSum, self).__init__() def forward(self, x): y = torch.zeros_like(x) for i in range(x.size(0)): y[i] = x[i].mul(x[i]) if len(y.shape) == 3: return y.sum(2) else: return y.sum(1) class ACNN(nn.Module): def __init__(self): super(ACNN, self).__init__() self.inner = InnerSum() self.pretrain = models.vgg16(pretrained=True).features[:28] self.VGG16 = self.pretrain[:21] self.PG_base = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(), nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(),) self.PG_attention = nn.Sequential(nn.MaxPool2d(2, stride=2), nn.Conv2d(512, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.ReLU(), nn.AdaptiveAvgPool2d((1, 1)), View((-1, 128)), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 1), nn.Sigmoid()) self.GG_base = self.pretrain[21:] self.GG_attention = nn.Sequential(nn.MaxPool2d(2, stride=2), nn.Conv2d(512, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.ReLU(), nn.AdaptiveAvgPool2d((1, 1)), View((-1, 128)), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 1), nn.Sigmoid()) self.PG24_base = nn.ModuleList([self.PG_base for _ in range(24)]) self.PG24_alpha = nn.ModuleList([self.PG_attention for _ in range(24)]) self.pad = nn.ReflectionPad2d(6) # self.crop = batch_slice(40, 40, 6, 6) self.crop = torchvision.ops.roi_pool self.PG_fc = nn.Linear(512*6*6, 64) self.GG_fc = nn.Linear(512*14*14, 512) self.fc1 = nn.Linear(2048, 1024) self.dropout = nn.Dropout(0.5) self.fc2 = nn.Linear(1024, 7) # def crop_layer(self, img: '(B, C, H, W)', landmarks: '(B, 24, 2)'): # # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # pad = nn.ReflectionPad2d(6) # padding for cropping # img = pad(img) # (B, 512, 36, 36) # total_crop = torch.zeros((img.size(0), landmarks.size(1), 512, 6, 6), device=self.device) # # for i in range(landmarks.size(0)): # Batch # # crop_per_batch = [] # for patch in range(landmarks.size(1)): # 24 landmarks # total_crop[i, patch, :, :, :] = img[i, :, (int(landmarks[i, patch, 0]) - 3): (int( # landmarks[i, patch, 0]) + 3), # (int(landmarks[i, patch, 1]) - 3): (int( # landmarks[i, patch, 1]) + 3)] # crop_img: (512, 6, 6) # # total_crop = total_crop.permute(1, 0, 2, 3, 4) # output: (24, B, 512, 6, 6) # return total_crop def _branch24(self, crop_img): PG_out = [] for x, base, alpha in zip(crop_img, self.PG24_base, self.PG24_alpha): PG_conv2 = base(x) PG_reshape = PG_conv2.view(-1, 512*6*6) PG_reshape = self.PG_fc(PG_reshape) PG_per = PG_reshape * alpha(PG_conv2).view(x.size(0), 1) PG_out.append(PG_per) return PG_out def forward(self, img, landmarks): img_feature = self.VGG16(img) # (B, 512, 28, 28) img_pad = self.pad(img_feature) # landmarks = landmarks.long() crop_img = self.crop(img_pad, landmarks, output_size=(6, 6)) crop_img = crop_img.view(24, -1, 512, 6, 6) GG_conv2 = self.GG_base(img_feature) GG_reshape = GG_conv2.view(-1, 512*14*14) GG_reshape = self.GG_fc(GG_reshape) GG_out = GG_reshape * self.GG_attention(GG_conv2).view(img_feature.size(0), 1) # crop_img = self.crop_layer(img_feature, landmarks) PG_out = self._branch24(crop_img) PG_total = torch.cat(PG_out, dim=1) total_out = torch.cat([GG_out, PG_total], dim=1) out = self.fc1(total_out) out = F.relu(self.dropout(out)) out = self.fc2(out) return out def landmark_resize(landmarks:'(B, 24, 2)')->'(B*24, 4)': bs = landmarks.size(0) batch = list(range(bs)) batch = np.array(batch * 24).reshape(24, -1).T point = np.array(list(range(24)) * bs).reshape(bs, -1) insert_point = np.insert(landmarks, 0, point, 2) insert_batch = np.insert(insert_point, 0, batch, 2) new_landmark = insert_batch.reshape(-1, 4) return new_landmark def data_normal(origin_data, size): # (-1, 1) size = size / 2 norm_data = origin_data.true_divide(size) - 1 return norm_data def grid_field(landmarks, cropsize=6): # landmarks: (B, 24, 2) total_crop = [] landmarks = landmark_resize(landmarks) # (B*24, 4) lm_batch = landmarks[:, 0].long() landmarks_x_l = landmarks[:, 2] - (cropsize / 2) landmarks_x_r = landmarks[:, 2] + (cropsize / 2) landmarks_y_l = landmarks[:, 3] - (cropsize / 2) landmarks_y_r = landmarks[:, 3] + (cropsize / 2) for i in range(landmarks.size(0)): new_h = torch.linspace(landmarks_x_l[i], landmarks_x_r[i] - 1, cropsize).view(-1, 1).repeat(1, cropsize) new_w = torch.linspace(landmarks_y_l[i], landmarks_y_r[i] - 1, cropsize).repeat(cropsize, 1) grid = torch.cat((new_w.unsqueeze(2), new_h.unsqueeze(2)), dim=2) grid = grid.unsqueeze(0) grid = data_normal(grid, size=28) total_crop.append(grid) total_crop = torch.cat(total_crop, dim=0) return lm_batch, total_crop def roi_select(landmarks: '(B, 4, 2)') -> '(B*24, 5)': landmarks = landmark_resize(landmarks) landmarks_right = landmarks[:, 2:] + 3 landmarks_left = landmarks[:, 2:] - 3 landmarks = torch.cat([landmarks[:, 0].view(-1, 1), landmarks_left, landmarks_right], dim=1) return landmarks # if __name__ == '__main__': # model = ACNN() # shuffle = False # device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu") # model.to(device) # train_set = FaceLandmarksDataset(csv_file='train_acnn.csv', root_dir='original/', # transform=ToTensor()) # test_set = FaceLandmarksDataset(csv_file='test_acnn.csv', root_dir='original/', # transform=ToTensor()) # train_loader = DataLoader(dataset=train_set, shuffle=shuffle, batch_size=4, num_workers=0, # pin_memory=True) # test_loader = DataLoader(dataset=test_set, shuffle=shuffle, batch_size=4, num_workers=8, # pin_memory=True) # for step, batch in enumerate(train_loader): # imgs, landmarks, targets = batch['image'], batch['landmarks'] / 8. + 6, batch['label'] # landmarks = roi_select(landmarks) # # imgs, landmarks, targets = imgs.to(device), landmarks.to(device), targets.to(device) # logits = model(imgs, landmarks) # print(logits.size()) # break
hanluyt/gACNN_pytorch
model_roi.py
model_roi.py
py
7,947
python
en
code
2
github-code
6
[ { "api_name": "torch.nn.Module", "line_number": 15, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 15, "usage_type": "name" }, { "api_name": "torch.nn.Module", "line_number": 23, "usage_type": "attribute" }, { "api_name": "torch.nn", "li...
29262983646
import sys import socket import platform import psutil import wmi import urllib.request from PyQt5.QtWidgets import QApplication, QMainWindow, QVBoxLayout, QPushButton, QTextEdit, QWidget from PyQt5.QtCore import Qt from PyQt5.QtGui import QFont, QColor class App(QMainWindow): def __init__(self, app): super().__init__() self.app = app self.initUI() def initUI(self): self.setWindowTitle('App') self.setGeometry(200, 200, 800, 600) central_widget = QWidget(self) self.setCentralWidget(central_widget) layout = QVBoxLayout() self.text_output = QTextEdit(self) self.text_output.setFont(QFont("Arial", 12)) layout.addWidget(self.text_output) button_ipv4_info = QPushButton('Get My IPv4', self) button_proxy_info = QPushButton('Check Proxy Info', self) button_system_info = QPushButton('Retrieve System Info', self) button_bios_info = QPushButton('Fetch BIOS Info', self) button_hostname_info = QPushButton('Get Hostname', self) button_ipv4_info.setFont(QFont("Arial", 10)) button_proxy_info.setFont(QFont("Arial", 10)) button_system_info.setFont(QFont("Arial", 10)) button_bios_info.setFont(QFont("Arial", 10)) button_hostname_info.setFont(QFont("Arial", 10)) button_ipv4_info.setStyleSheet("background-color: lightblue;") button_proxy_info.setStyleSheet("background-color: lightgreen;") button_system_info.setStyleSheet("background-color: lightcoral;") button_bios_info.setStyleSheet("background-color: lightsalmon;") button_hostname_info.setStyleSheet("background-color: lightyellow;") layout.addWidget(button_ipv4_info) layout.addWidget(button_proxy_info) layout.addWidget(button_system_info) layout.addWidget(button_bios_info) layout.addWidget(button_hostname_info) central_widget.setLayout(layout) button_ipv4_info.clicked.connect(self.fetch_ipv4_info) button_proxy_info.clicked.connect(self.check_proxy_info) button_system_info.clicked.connect(self.retrieve_system_info) button_bios_info.clicked.connect(self.fetch_bios_info) button_hostname_info.clicked.connect(self.get_host_name) def fetch_ipv4_info(self): hostname = socket.gethostname() ip = socket.gethostbyname(hostname) is_static = socket.gethostbyaddr(ip) interface = None if "Wi-Fi" in platform.platform(): interface = "Wi-Fi" elif "Ethernet" in platform.platform(): interface = "Ethernet" result = f"IPv4 Address: {ip}\nStatic: {is_static}\nNetwork Interface: {interface}" self.text_output.append(result) def check_proxy_info(self): proxy_handler = urllib.request.ProxyHandler() opener = urllib.request.build_opener(proxy_handler) try: opener.open("http://www.google.com", timeout=5) is_proxy_enabled = True except Exception: is_proxy_enabled = False proxy_status = "Proxy is enabled" if is_proxy_enabled else "Proxy is disabled" self.text_output.append(proxy_status) def retrieve_system_info(self): os_version = platform.platform() os_architecture = platform.architecture() num_cores = psutil.cpu_count(logical=False) ram = round(psutil.virtual_memory().total / (1024 ** 3), 2) result = f"Operating System Version: {os_version}\nArchitecture: {os_architecture}\nCPU Cores: {num_cores}\nRAM: {ram} GB" self.text_output.append(result) def fetch_bios_info(self): c = wmi.WMI() bios = c.Win32_BIOS()[0] result = f"BIOS Manufacturer: {bios.Manufacturer}\nBIOS Version: {bios.Version}\nBIOS Release Date: {bios.ReleaseDate}" self.text_output.append(result) def get_host_name(self): hostname = socket.gethostname() self.text_output.append(f"Hostname: {hostname}") if __name__ == '__main__': app = QApplication(sys.argv) window = App(app) window.show() sys.exit(app.exec_())
miko7ajradziw1llowicz/Zadanie-3-python
main.py
main.py
py
4,160
python
en
code
0
github-code
6
[ { "api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 11, "usage_type": "name" }, { "api_name": "PyQt5.QtWidgets.QWidget", "line_number": 21, "usage_type": "call" }, { "api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 24, "usage_type": "call" }, { "a...
10881296356
import unittest from pathlib import Path from tests import CdsTestMixin from . import CDSCase class DumpTest(CdsTestMixin, unittest.TestCase): def test_trace_json(self): with CDSCase(self, self.NAME_LIST, self.TEST_ARCHIVE) as cds: cds.run_trace('import json') cds.verify_files(check_archive=False) cds.verify(lambda _: self.assertIn( 'json', [line.strip() for line in Path(self.NAME_LIST).read_text().split('\n')])) def test_dump_archive_from_list_json(self): with open(self.NAME_LIST, 'w') as f: print('json', file=f) with CDSCase(self, self.NAME_LIST, self.TEST_ARCHIVE, clear_list=False) as cds: cds.run_dump()
alibaba/code-data-share-for-python
tests/test_cds/test_dump.py
test_dump.py
py
745
python
en
code
38
github-code
6
[ { "api_name": "tests.CdsTestMixin", "line_number": 8, "usage_type": "name" }, { "api_name": "unittest.TestCase", "line_number": 8, "usage_type": "attribute" }, { "api_name": "pathlib.Path", "line_number": 16, "usage_type": "call" } ]
70541333949
import os import re import spotipy from moviepy.editor import * from urllib.parse import quote from urllib import request as rq from youtube_dl import YoutubeDL from spotipy.oauth2 import SpotifyClientCredentials ## fix to skip use for PYTHONPATH sys.path.append(os.getcwd()) sys.path.append(os.path.join(os.getcwd(),"..","common")) from common.common import controller_common common = controller_common() class controller_spotify: def __init__(self,client_api,token_api,user): self.__CLIENT_ID = client_api self.__CLIENT_SECRET = token_api self.__USER_ID = user self.auth_manager = SpotifyClientCredentials( client_id=self.__CLIENT_ID, client_secret=self.__CLIENT_SECRET ) self.sp = spotipy.Spotify(auth_manager=self.auth_manager) def get_ydl_opts(self, path): return { "format": "bestaudio/best", "outtmpl": f"{path}/%(id)s.%(ext)s", "ignoreerrors": True, "postprocessors": [ { "key": "FFmpegExtractAudio", "preferredcodec": "mp3", "preferredquality": "320", } ], } def get_user_playlists(self): return [ {"value": pl.get("uri"), "name": pl.get("name")} for pl in self.sp.user_playlists(self.__USER_ID).get("items") ] def normalize_str(self, string): return string.translate(str.maketrans('\\/:*?"<>|', "__ ")) def get_playlist_details(self, pl_uri): offset = 0 fields = "items.track.track_number,items.track.name,items.track.artists.name,items.track.album.name,items.track.album.release_date,total,items.track.album.images" pl_name = self.sp.playlist(pl_uri)["name"] pl_items = self.sp.playlist_items( pl_uri, offset=offset, fields=fields, additional_types=["track"], )["items"] pl_tracks = [] while len(pl_items) > 0: for item in pl_items: if item["track"]: track_name = self.normalize_str(item["track"]["name"]) artist_name = self.normalize_str( item["track"]["artists"][0]["name"] ) pl_tracks.append( { "uri": quote( f'{track_name.replace(" ", "+")}+{artist_name.replace(" ", "+")}' ), "file_name": f"{artist_name} - {track_name}", "track_name": track_name, "artist_name": artist_name, "album_name": self.normalize_str( item["track"]["album"]["name"] ), "album_date": item["track"]["album"]["release_date"], "track_number": item["track"]["track_number"], "album_art": item["track"]["album"]["images"][0]["url"], } ) offset = offset + len(pl_items) pl_items = self.sp.playlist_items( pl_uri, offset=offset, fields=fields, additional_types=["track"], )["items"] return {"pl_name": pl_name, "pl_tracks": pl_tracks} def check_existing_tracks(self, playlist, path): existing_tracks = os.listdir(path) tracks = [ track for track in playlist["pl_tracks"] if f"{track['file_name']}.mp3" not in existing_tracks ] return tracks def download_tracks(self, pl_uri): count = 0 items = list() pl_details = self.get_playlist_details(pl_uri) path = common.create_download_directory(pl_details["pl_name"]) tracks = self.check_existing_tracks(pl_details, path) print( f"\n\033[1m\033[33m[info] Downloading {len(tracks)} tracks from {pl_details['pl_name']}\033[0m" ) with YoutubeDL(self.get_ydl_opts(path)) as ydl: for track in tracks: html = rq.urlopen( f"https://www.youtube.com/results?search_query={track['uri']}" ) video_ids = re.findall(r"watch\?v=(\S{11})", html.read().decode()) if video_ids: url = "https://www.youtube.com/watch?v=" + video_ids[0] print ( f"Add [{count}] - {url}" ) count = count + 1 items.append(url) res = common.thread_pool(items,path,"download") if res: common.converterto_mp3(pl_details["pl_name"])
alejan2x/FuckDownload
spotify/spotify.py
spotify.py
py
4,850
python
en
code
0
github-code
6
[ { "api_name": "os.getcwd", "line_number": 11, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 12, "usage_type": "call" }, { "api_name": "os.path", "line_number": 12, "usage_type": "attribute" }, { "api_name": "os.getcwd", "line_number": 12...
26656448918
#the code partial borrowed from # "Neural Network-based Reconstruction in Compressed Sensing #MRI Without Fully-sampled Training Data" import torch import torch.nn as nn import numpy as np import torch.nn.functional as F import util_torch as util_torch def absval(arr): """ Takes absolute value of last dimension, if complex. Input dims: (N, l, w, 2) Output dims: (N, l, w) """ # Expects input of size (N, l, w, 2) assert arr.shape[-1] == 2 return torch.norm(arr, dim=3) def scale(y, y_zf): """Scales inputs for numerical stability""" flat_yzf = torch.flatten(absval(y_zf), start_dim=1, end_dim=2) max_val_per_batch, _ = torch.max(flat_yzf, dim=1, keepdim=True) y = y / max_val_per_batch.view(len(y), 1, 1, 1) y_zf = y_zf / max_val_per_batch.view(len(y), 1, 1, 1) return y, y_zf class Upsample(nn.Module): """Upsamples input multi-channel image""" def __init__(self, scale_factor, mode, align_corners): super(Upsample, self).__init__() self.scale_factor = scale_factor self.mode = mode self.align_corners = align_corners def forward(self, x): return F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners) class ResBlock(nn.Module): '''5-layer CNN with residual output''' def __init__(self, n_ch_in=2, n_ch_out=2, nf=64, ks=3): super(ResBlock, self).__init__() self.n_ch_out = n_ch_out self.conv1 = nn.Conv2d(n_ch_in, nf, ks, padding = ks//2) self.conv2 = nn.Conv2d(nf, nf, ks, padding = ks//2) self.conv3 = nn.Conv2d(nf, nf, ks, padding = ks//2) self.conv4 = nn.Conv2d(nf, nf, ks, padding = ks//2) self.conv5 = nn.Conv2d(nf, n_ch_out, ks, padding = ks//2) self.relu = nn.ReLU(inplace=True) def forward(self, x): conv1_out = self.conv1(x) conv1_out = self.relu(conv1_out) conv2_out = self.conv2(conv1_out) conv2_out = self.relu(conv2_out) conv3_out = self.conv3(conv2_out) conv3_out = self.relu(conv3_out) conv4_out = self.conv4(conv3_out) conv4_out = self.relu(conv4_out) conv5_out = self.conv5(conv4_out) x_res = x[:,:self.n_ch_out,:,:] + conv5_out return x_res class Net(nn.Module): def __init__(self, K, lmbda, device, n_hidden=64): super(Net, self).__init__() #self.mask = mask self.lmbda = lmbda self.resblocks = nn.ModuleList() self.device = device for i in range(K): resblock = ResBlock(n_ch_in=2, nf=n_hidden) self.resblocks.append(resblock) self.block_final = ResBlock(n_ch_in=2, nf=n_hidden) def forward(self, ksp_input, sensemap, window = 1, mask = None): if mask is None: mask=torch.not_equal(ksp_input, 0) dtype=torch.complex64 mask = mask.type(dtype) x = util_torch.transpose_model(ksp_input * window, sensemap) x = util_torch.complex_to_channels(x)#;print(x.shape);quit() #ksp_input, x = scale(ksp_input, x) for i in range(len(self.resblocks)): # z-minimization x = x.permute(0, 3, 1, 2) z = self.resblocks[i](x) z = z.permute(0, 2, 3, 1) z = util_torch.channels_to_complex(z) # x-minimization #z_ksp = utils.fft(z) z_ksp = util_torch.model_forward(z, sensemap) #x_ksp = losslayer.data_consistency(z_ksp, y, self.mask, self.lmbda) x_ksp = (1 - mask) * z_ksp + mask * (self.lmbda*z_ksp + ksp_input) / (1 + self.lmbda) #x = utils.ifft(x_ksp) x = util_torch.transpose_model(x_ksp, sensemap) x = util_torch.complex_to_channels(x) x = x.permute(0, 3, 1, 2) x = self.block_final(x) return x
ikjalata/MRIunsup
model.py
model.py
py
3,925
python
en
code
0
github-code
6
[ { "api_name": "torch.norm", "line_number": 18, "usage_type": "call" }, { "api_name": "torch.flatten", "line_number": 22, "usage_type": "call" }, { "api_name": "torch.max", "line_number": 23, "usage_type": "call" }, { "api_name": "torch.nn.Module", "line_number...
23341249880
import json as js import csv import sys import jinja2 import os from datetime import datetime # import smtplib # read customers file to get information about customers def get_customers(customers_file, error): TITLE = [] FIRST_NAME = [] LAST_NAME = [] EMAIL = [] with open(customers_file, mode='r') as csv_file: customers = csv.DictReader(csv_file, delimiter=',') errorData = [] for customer in customers: if customer["EMAIL"] != '': TITLE.append(customer["TITLE"]) FIRST_NAME.append(customer["FIRST_NAME"]) LAST_NAME.append(customer["LAST_NAME"]) EMAIL.append(customer["EMAIL"]) else: errorData.append([customer["TITLE"], customer["FIRST_NAME"], customer["LAST_NAME"], customer["EMAIL"]]) with open(error, mode='w', newline='') as f: errorCustomer = csv.writer(f) errorCustomer.writerow(['TITLE','FIRST_NAME','LAST_NAME','EMAIL']) for customer in errorData: errorCustomer.writerow(customer) return TITLE, FIRST_NAME, LAST_NAME, EMAIL def read_template(email_template_file): with open(email_template_file, mode='r') as email_template: template = js.load(email_template) return template # Can use CLI Python Library to parse agv from CMD such as argparse, getopt,... def main(email_template, customers, path_output_emails, error): # how to use smtp send email # s = smtplib.SMTP(host='host_address', port=port) # s.starttls() # s.login(MY_ADDRESS, PASSWORD) TITLE, FIRST_NAME, LAST_NAME, EMAIL = get_customers(customers, error) template = read_template(email_template) if os.path.isdir(path_output_emails): os.chdir(path_output_emails) else: os.mkdir(path_output_emails) os.chdir(path_output_emails) now = datetime.now() env = jinja2.Environment() outputJsonFile = open("output.json", "w") resultData = [] for title, first_name, last_name, email in zip(TITLE, FIRST_NAME, LAST_NAME, EMAIL): data = {} body_template = env.from_string(template["body"]) data["from"] = template["from"] data["to"] = email data["subject"] = template["subject"] data["mineType"] = template["mineType"] data["body"] = body_template.render(TITLE=title, FIRST_NAME=first_name, LAST_NAME=last_name, TODAY=now.strftime('%d %b %Y')) resultData.append(data) # s.send_message(data) # del data output = js.dumps(resultData, indent=4) outputJsonFile.write(output) outputJsonFile.close() if __name__ == '__main__': main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
thanhthien272/sendEmailPython
send_email.py
send_email.py
py
2,750
python
en
code
0
github-code
6
[ { "api_name": "csv.DictReader", "line_number": 17, "usage_type": "call" }, { "api_name": "csv.writer", "line_number": 28, "usage_type": "call" }, { "api_name": "json.load", "line_number": 37, "usage_type": "call" }, { "api_name": "os.path.isdir", "line_number"...
31975334850
import numpy as np import scipy.ndimage import scipy.misc import glob import torch from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.optim as optim def loadPsf(psftype, fileformat): path='/gdata/zhoutk/Deconv/'+psftype files=glob.glob(path+'/'+'*'+fileformat) length=len(files) if length==0: print(path+'/') print('invalid psf file path') return im0=scipy.misc.imread(files[0]) shape=im0.shape psf=np.zeros((length, shape[0], shape[1])) files.sort() for i, file in enumerate(files): #print(file) psf[i,:,:]=scipy.misc.imread(file) #print(type(psf)) #print(psf.shape) return psf def convolvePsf3D(volumn, psf, psnr): ##normalize with its largest content psf=psf/np.sum(psf) #change from int8 to float64 volumn=volumn.astype('float64') #convolve psf with volumn #print(volumn.shape) #print(psf.shape) print('max_volumn: ', np.max(volumn)) #volumn=scipy.ndimage.zoom(volumn, 2.0) if torch.cuda.is_available(): psf=psf[:99, :, :] for i in range(len(psf.shape)): psf=np.flip(psf, i) psf=torch.from_numpy(psf.copy()).unsqueeze(0).unsqueeze(0).type(torch.FloatTensor).cuda() psf=Variable(psf, requires_grad=False) volumn=torch.from_numpy(volumn.copy()).unsqueeze(0).unsqueeze(0).type(torch.FloatTensor).cuda() volumn=Variable(volumn, requires_grad=False) output=F.conv3d(volumn, psf, padding=(49, 12, 12)) output=output.squeeze().cpu().data.numpy() else: output=scipy.ndimage.filters.convolve(volumn, psf, mode='constant') print('convolve output shape: ', output.shape) print('max_output: ', np.max(output)) #noise level --- gaussian noise sigma=np.max(output)/np.power(10, psnr/20) print('gaussian noise level:', sigma) noise=np.random.normal(0, sigma, output.shape) #add noise to the output output=np.clip(output+noise, 0, np.max(output)) #output=output[0:101,0:101,0:101] return output
rickyim/DeconvNet
source/PSFConv.py
PSFConv.py
py
2,123
python
en
code
0
github-code
6
[ { "api_name": "glob.glob", "line_number": 15, "usage_type": "call" }, { "api_name": "scipy.ndimage.misc.imread", "line_number": 21, "usage_type": "call" }, { "api_name": "scipy.ndimage.misc", "line_number": 21, "usage_type": "attribute" }, { "api_name": "scipy.ndi...
28118192230
# -*- coding:utf-8 -*- from PySide2.QtCore import Signal from PySide2.QtWidgets import QDialog from core.options import MultiOption from ui.base.constants import ITEM_SEPARATORS from ui.base.ui_add_items import Ui_AddItemsDialog # noinspection PyTypeChecker from utils import warn, splitItems, isEmpty # noinspection PyTypeChecker class AddItemsDialog(QDialog, Ui_AddItemsDialog): ADD_EXCLUDE_MODULES = 0 ADD_HIDDEN_IMPORTS = 1 COLLECT_ALL_SUBMODULES = 3 COLLECT_ALL_DATA = 4 COLLECT_ALL_BINARIES = 5 COLLECT_ALL = 6 COPY_METADATA = 7 DEEP_COPY_METADATA = 8 DEFAULT_ITEMS_SEP = ";" itemsAdded = Signal(MultiOption, list) def __init__(self, parent): super().__init__(parent) self._action = -1 self._option = None self.setupUi() def setupUi(self, _=None): super(AddItemsDialog, self).setupUi(self) self.multiItemSeparatorCombo.addItems(ITEM_SEPARATORS.keys()) self.addButton.clicked.connect(self.onAddItem) def onAddItem(self): content = self.itemsEdit.toPlainText().replace("\n", "").replace("\r", "").strip() if isEmpty(content): warn(self, self.tr(u"Warning"), self.tr("Items cannot be empty!")) return content = content.replace(";", ";").replace(",", ",") sepKey = self.multiItemSeparatorCombo.currentText() items = splitItems(content, sepKey, self.DEFAULT_ITEMS_SEP) self.itemsAdded.emit(self._option, items) self.accept() def display(self, action, option): self._action = action self._option = option self.updateTitle() self.show() def updateTitle(self): if self._action == self.ADD_EXCLUDE_MODULES: self.setWindowTitle(self.tr("Add Exclude Modules")) elif self._action == self.ADD_HIDDEN_IMPORTS: self.setWindowTitle(self.tr("Add Hidden Imports")) elif self._action == self.COLLECT_ALL_SUBMODULES: self.setWindowTitle(self.tr("Collect all submodules from:")) elif self._action == self.COLLECT_ALL_DATA: self.setWindowTitle(self.tr("Collect all data from:")) elif self._action == self.COLLECT_ALL_BINARIES: self.setWindowTitle(self.tr("Collect all binaries from:")) elif self._action == self.COLLECT_ALL: self.setWindowTitle(self.tr("Collect all(submodules,data, bin...) from:")) elif self._action == self.COPY_METADATA: self.setWindowTitle(self.tr("Copy metadata for:")) elif self._action == self.DEEP_COPY_METADATA: self.setWindowTitle(self.tr("Copy metadata for(recursively):")) else: raise ValueError("unknown action") def hideEvent(self, event): super().hideEvent(event) self._action = -1 self._option = None self.setWindowTitle("") self.itemsEdit.setText("")
zimolab/PyInstallerGUI
ui/add_items_ui.py
add_items_ui.py
py
2,937
python
en
code
10
github-code
6
[ { "api_name": "PySide2.QtWidgets.QDialog", "line_number": 12, "usage_type": "name" }, { "api_name": "ui.base.ui_add_items.Ui_AddItemsDialog", "line_number": 12, "usage_type": "name" }, { "api_name": "PySide2.QtCore.Signal", "line_number": 24, "usage_type": "call" }, {...
26735943730
import numpy as np from .utils import LinearAnnealer,ExponentialAnnealer import tqdm import torch import torch.nn as nn import wandb from progress.bar import Bar from array2gif import write_gif import copy from .utils import set_seed from .utils import save_rewards_meanvar_plot,get_logger,MLP,ReplayMemory import logging import time from torch.distributions.categorical import Categorical # With spinning up help ;) class VPG: def __init__(self, env, config): for k, v in config.items(): setattr(self, k, v) print(config) self.env = env self.config = copy.deepcopy(config) self.reset(self.seed) def reset(self, seed): self.seed = seed set_seed(self.seed) self.env.seed(self.seed) self.env.action_space.seed(self.seed) obs_size = self.env.observation_space.n if 'n' in self.env.observation_space.__dict__ else self.env.observation_space._shape[0] self.policy = MLP(self.nUnits, obs_size,self.env.action_space.n).to(self.device) self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=self.lr) def get_policy(self,s): return Categorical(logits=self.policy(s)) def get_action(self,s): return self.get_policy(s).sample() def update_net(self,ep_mem): def r_to_go(rewards): return torch.cumsum(rewards.flip(dims=[0]),dim=0).flip(dims=[0]) s = torch.stack([exp.s for exp in ep_mem.memory]) a = torch.stack([exp.a for exp in ep_mem.memory]) r = torch.stack([exp.r for exp in ep_mem.memory]) r = r_to_go(r) self.optimizer.zero_grad() ep_loss = -(self.get_policy(s).log_prob(a) * r).mean() ep_loss.backward() self.optimizer.step() return ep_loss def train(self): bar = Bar('{}'.format('Training'), max=self.nepisodes) self.logger = get_logger("VPG",self.env.spec.name) episode_rewards = [] eval_rewards = [] n_experience = 0 last_eval_mean = 0 last_eval_std = 0 step = 0 for ep in (range(self.nepisodes)): self.policy.train() replaymem = ReplayMemory(10000,1) state = self.env.reset(seed=self.seed) ep_reward = 0 for t in range(1,self.max_steps): action = self.get_action(torch.tensor(state).unsqueeze(0).float()) new_state, reward, done, info = self.env.step(action.item()) ep_reward += reward replaymem.add_exp(torch.tensor(state).unsqueeze(0).float(),action,reward,torch.tensor(new_state).unsqueeze(0).float(),int(done)) state = new_state step += 1 if done: break self.update_net(replaymem) if self.num_eval_episodes > 0 and ((ep % self.eval_freq )==0): temp_eval_rewards = [] for _ in range(self.num_eval_episodes): temp_eval_rewards.append(self.evaluate()) last_eval_mean = np.mean(temp_eval_rewards) last_eval_std = np.std(temp_eval_rewards) eval_rewards.append(temp_eval_rewards) if self.use_wandb: wandb.log({"episode_reward": ep_reward,'eval_reward_mean':last_eval_mean,'eval_reward_std':last_eval_std}) episode_rewards.append(ep_reward) ep_info = ('Episode '+str(ep)+' reward: ' + str(ep_reward) + ' Mean r over last 20 episodes :' + str(np.mean(episode_rewards[-20:]).item())+' last eval mean,std ' +str(last_eval_mean)+' '+str(last_eval_std)) if "cart" in self.env.spec.name.lower() and np.mean(episode_rewards[-20:]).item() > 480: print("Solved cartpole exiting early") bar.finish() self.logger.info(ep_info) return eval_rewards, np.mean(episode_rewards[-30:]).item() self.logger.info( ep_info) Bar.suffix = ep_info bar.next() bar.finish() return eval_rewards, np.mean(episode_rewards[-30:]).item() def show_results(self): self.evaluate(save_gif=True) def evaluate(self,save_gif = False): self.policy.eval() state = self.env.reset(seed=self.seed) total_reward = 0 frames = [] for t in range(1,self.max_steps): action = self.get_action(torch.tensor(state).unsqueeze(0).float()) new_state, reward, done, info = self.env.step(action.item()) if save_gif: img = self.env.render(mode="rgb_array") frames.append(img) total_reward += reward state = new_state if done : break if save_gif: write_gif([np.transpose(f, axes=[2,0, 1]) for f in frames], 'gifs/vpg_'+self.env.spec.name+'.gif', fps=30) if self.use_wandb: wandb.log({"loss": total_reward}) return total_reward
gauthierboeshertz/reel
algos/plearners/vpg.py
vpg.py
py
5,225
python
en
code
0
github-code
6
[ { "api_name": "copy.deepcopy", "line_number": 26, "usage_type": "call" }, { "api_name": "utils.set_seed", "line_number": 32, "usage_type": "call" }, { "api_name": "utils.MLP", "line_number": 36, "usage_type": "call" }, { "api_name": "torch.optim.Adam", "line_n...
15257337134
# -*- coding: utf-8 -*- """ Created on Sat Nov 10 17:30:55 2018 @author: Wioletta """ import cv2 from localbinarypatterns import LocalBinaryPatterns img = cv2.imread('yaleB01_P00A+000E+00.pgm') cv2.imshow('Image',img) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) desc = LocalBinaryPatterns(24, 8) hist = desc.describe(gray) cv2.imshow('Histogram', hist)
wiolettakolasa/IO
test.py
test.py
py
360
python
en
code
0
github-code
6
[ { "api_name": "cv2.imread", "line_number": 11, "usage_type": "call" }, { "api_name": "cv2.imshow", "line_number": 13, "usage_type": "call" }, { "api_name": "cv2.cvtColor", "line_number": 14, "usage_type": "call" }, { "api_name": "cv2.COLOR_BGR2GRAY", "line_num...
22043825261
from flask import Response import json from presentation.contracts import HttpController, HttpRequest def adapt_route(flask_request, controller: HttpController): request = HttpRequest( params=flask_request.args, body=flask_request.json ) data = controller.handle(request) return Response( json.dumps(data.body), status=data.status, mimetype='application/json' ) try: request = HttpRequest( params=flask_request.args, body=flask_request.json ) data = controller.handle(request) return Response( json.dumps(data.body), status=data.status, mimetype='application/json' ) except Exception as e: return Response( json.dumps({"error": "Internal server error"}), status=500, mimetype='application/json' )
panda-coder/py-clean-flask
src/main/adapters/flask_route_adapter.py
flask_route_adapter.py
py
924
python
en
code
1
github-code
6
[ { "api_name": "presentation.contracts.HttpController", "line_number": 6, "usage_type": "name" }, { "api_name": "presentation.contracts.HttpRequest", "line_number": 7, "usage_type": "call" }, { "api_name": "flask.Response", "line_number": 12, "usage_type": "call" }, { ...
71174455228
# -*- coding: utf-8 -*- import time, functools def metric(fn): def decorator(func): @functools.wraps(func) def wrapper(*args,**kw): print(fn) if fn.__str__()==fn else print('no metric args') start_time=time.time() return (func(*args,**kw),print('%s executed in %s ms' % (func.__name__, time.time()-start_time)))[0] return wrapper return decorator if fn.__str__()==fn else decorator(fn) @metric def fast(x, y): time.sleep(0.0012) return x + y; @metric('test') def slow(x, y, z): time.sleep(0.1234) return x * y * z; f = fast(11, 22) s = slow(11, 22, 33) if f != 33: print('测试失败!') elif s != 7986: print('测试失败!') else: print('测试成功!')
kfusac/LearnPython
LiaoxuefengPython/5_FunctionalProgramming/decorator.py
decorator.py
py
756
python
en
code
0
github-code
6
[ { "api_name": "time.time", "line_number": 9, "usage_type": "call" }, { "api_name": "time.time", "line_number": 10, "usage_type": "call" }, { "api_name": "functools.wraps", "line_number": 6, "usage_type": "call" }, { "api_name": "time.sleep", "line_number": 16,...
72052703228
import sys, json from urllib.request import urlopen from collections import OrderedDict list_host = 'http://localhost:5000' list_url = list_host + '/api/3/action/organization_list' get_url = list_host + '/api/3/action/organization_show' contents = urlopen(list_url) org_list = json.load(contents)['result'] for org_name in org_list: org_url = get_url + "?id=" + org_name print("=== Loading " +org_name + " from " + org_url) org_content = urlopen(org_url) org_obj = json.load(org_content)['result'] org = OrderedDict() for key in ('name', 'title', 'description', 'site', 'email', 'region', 'identifier'): if key in org_obj and org_obj[key]: org[key] = org_obj[key] org_filename = "orgs/"+org_name+".json" with open(org_filename,"w+") as f: f.write(json.dumps(org, indent=4)) print("=== Saved in "+org_filename+"\n")
italia/public-opendata-sources
export_orgs.py
export_orgs.py
py
888
python
en
code
17
github-code
6
[ { "api_name": "urllib.request.urlopen", "line_number": 9, "usage_type": "call" }, { "api_name": "json.load", "line_number": 10, "usage_type": "call" }, { "api_name": "urllib.request.urlopen", "line_number": 17, "usage_type": "call" }, { "api_name": "json.load", ...
20538743319
# https://leetcode.com/problems/rotting-oranges/ """ Time complexity:- O(N) Space Complexity:- O(N) """ """ Intuition: The algorithm uses Breadth-First Search (BFS) to simulate the rotting process, starting from initially rotten oranges. The queue (q) is used to keep track of the rotten oranges and their coordinates. The process continues until either all fresh oranges are rotten or there are no more rotten oranges. The time variable keeps track of the minutes passed during the rotting process. If there are still fresh oranges after the simulation, it means some oranges cannot be rotten, and -1 is returned. The algorithm follows a simple and intuitive approach of simulating the rotting process through BFS traversal. """ import collections from typing import List class Solution: def orangesRotting(self, grid: List[List[int]]) -> int: q = collections.deque() # Using deque for efficient pop and append operations fresh = 0 # Counter for fresh oranges time = 0 # Variable to track time (minutes) # Iterate through the grid to identify fresh and rotten oranges for r in range(len(grid)): for c in range(len(grid[0])): if grid[r][c] == 1: fresh += 1 if grid[r][c] == 2: q.append((r, c)) # Add coordinates of rotten oranges to the queue # Directions to check neighboring cells (up, down, left, right) directions = [[0, 1], [0, -1], [1, 0], [-1, 0]] # BFS traversal to simulate rotting process while fresh > 0 and q: length = len(q) for i in range(length): r, c = q.popleft() # Pop the front of the queue for dr, dc in directions: row, col = r + dr, c + dc # Check if the neighboring cell is in bounds and contains a fresh orange if ( row in range(len(grid)) and col in range(len(grid[0])) and grid[row][col] == 1 ): grid[row][col] = 2 # Mark the orange as rotten q.append((row, col)) # Add the coordinates to the queue fresh -= 1 # Decrease the count of fresh oranges time += 1 # Increment time after processing each minute # Return the time required if all fresh oranges are rotten, otherwise return -1 return time if fresh == 0 else -1
Amit258012/100daysofcode
Day92/rotten_oranges.py
rotten_oranges.py
py
2,532
python
en
code
0
github-code
6
[ { "api_name": "typing.List", "line_number": 22, "usage_type": "name" }, { "api_name": "collections.deque", "line_number": 23, "usage_type": "call" } ]
28383267446
import copy import functools import os import random import torch import torch.nn.functional as F import blobfile as bf import torchvision.utils as vutils import numpy as np import torch as th import torch.distributed as dist from torch.nn.parallel.distributed import DistributedDataParallel as DDP from torch.optim import Adam from ..models.unet.fp16_util import zero_grad from tqdm import tqdm from ..utils import dist_util import matplotlib.pyplot as plt from .plotters import ImPlotter from .config_getters import get_model class IPFStepBase(th.nn.Module): def __init__( self, model, forward_diffusion, backward_diffusion, data_loader, prior_loader, cache_data_loader = None, args = None, forward_model = None, cache_loader = False, resume_checkpoint = 0, checkpoint_directory = './', plot_directory = './', ): super().__init__() self.set_seed(dist.get_rank()+0) ema_rate = args.ema_rate save_interval=args.save_interval lr_anneal_steps = 0 self.args = args self.model = model self.forward_diffusion = forward_diffusion self.backward_diffusion = backward_diffusion self.forward_model = forward_model self.prior_loader = prior_loader self.data_loader = data_loader self.cache_data_loader = cache_data_loader self.num_steps = self.args.nit self.num_iter = self.args.num_iter self.lr_anneal_steps = lr_anneal_steps self.batch_size = self.args.batch_size self.cache_loader = cache_loader self.cache_refresh = self.args.cache_refresh_stride self.lr = self.args.lr self.classes = self.args.num_data_classes > 0 self.weight_decay = self.args.weight_decay self.ema_rate = ( [ema_rate] if isinstance(ema_rate, float) else [float(x) for x in ema_rate.split(",")] ) self.save_interval = save_interval self.checkpoint_dir = checkpoint_directory self.plot_dir = plot_directory self.plotter = ImPlotter(im_dir=self.plot_dir, plot_level=1) self.step = 0 self.resume_step = resume_checkpoint self.resume_checkpoint = resume_checkpoint self.global_batch = self.batch_size * dist.get_world_size() self.model_params = list(self.model.parameters()) self.master_params = self.model_params self.sync_cuda = th.cuda.is_available() self._load_and_sync_parameters() # Optimizers self.opt = Adam(self.master_params, lr=self.lr) if self.resume_step: self._load_optimizer_state() # Model was resumed, either due to a restart or a checkpoint # being specified at the command line. self.ema_params = [ self._load_ema_parameters(rate) for rate in self.ema_rate ] else: self.ema_params = [ copy.deepcopy(self.master_params) for _ in range(len(self.ema_rate)) ] if th.cuda.is_available(): self.use_ddp = True self.ddp_model = DDP( self.model, device_ids=[dist_util.dev()], output_device=dist_util.dev(), broadcast_buffers=False, bucket_cap_mb=128, find_unused_parameters=False, ) else: if dist.get_world_size() > 1: self.use_ddp = False self.ddp_model = self.model def _load_and_sync_parameters(self): resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint if resume_checkpoint: self.resume_step = parse_resume_step_from_filename(resume_checkpoint) if dist.get_rank() == 0: self.model.load_state_dict( dist_util.load_state_dict( resume_checkpoint, map_location=dist_util.dev() ) ) dist_util.sync_params(self.model.parameters()) def _load_ema_parameters(self, rate): ema_params = copy.deepcopy(self.master_params) main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate) if ema_checkpoint: if dist.get_rank() == 0: state_dict = dist_util.load_state_dict( ema_checkpoint, map_location=dist_util.dev() ) ema_params = self._state_dict_to_master_params(state_dict) dist_util.sync_params(ema_params) return ema_params def _load_optimizer_state(self): main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint opt_checkpoint = bf.join( bf.dirname(main_checkpoint), f"opt{self.resume_step:06}.pt" ) if bf.exists(opt_checkpoint): state_dict = dist_util.load_state_dict( opt_checkpoint, map_location=dist_util.dev() ) self.opt.load_state_dict(state_dict) def optimize_step(self): #self._anneal_lr() # if self.args.grad_clipping: # clipping_param = self.args.grad_clip # total_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), clipping_param) self.opt.step() for rate, params in zip(self.ema_rate, self.ema_params): update_ema(params, self.master_params, rate=rate) def _anneal_lr(self): if not self.lr_anneal_steps: return frac_done = (self.step + self.resume_step) / self.lr_anneal_steps lr = self.lr * (1 - frac_done) for param_group in self.opt.param_groups: param_group["lr"] = lr def save(self): if dist.get_rank() == 0: self.set_seed(0) init_samples, labels = next(self.prior_loader) init_samples = init_samples.to(dist_util.dev()) labels = labels.to(dist_util.dev()) if labels is not None else None sample_model = get_model(self.args) for rate, params in zip(self.ema_rate, self.ema_params): state_dict = self._master_params_to_state_dict(params) sample_model.load_state_dict(state_dict) sample_model = sample_model.to(dist_util.dev()) x_tot_plot = self.backward_diffusion.sample(init_samples, labels, t_batch=None, net=sample_model) filename = 'ema{0}_step{1}.png'.format(rate, self.step) self.plotter.plot(init_samples, x_tot_plot, filename) sample_model = None torch.cuda.empty_cache() # init_samples, labels = next(self.data_loader) # init_samples = init_samples.to(dist_util.dev()) # labels = labels.to(dist_util.dev()) if labels is not None else None # x_tot_plot = self.forward_diffusion.sample(init_samples, labels, t_batch=None, net=self.forward_model) # filename = 'sample{0}_step{1}.png'.format(rate, self.step) # self.plotter.plot(init_samples, x_tot_plot, filename) def save_checkpoint(rate, params): state_dict = self._master_params_to_state_dict(params) if dist.get_rank() == 0: if not rate: filename = f"model{(self.step+self.resume_step):06d}.pt" else: filename = f"ema_{rate}_{(self.step+self.resume_step):06d}.pt" with bf.BlobFile(bf.join(self.checkpoint_dir, filename), "wb") as f: th.save(state_dict, f) save_checkpoint(0, self.master_params) for rate, params in zip(self.ema_rate, self.ema_params): save_checkpoint(rate, params) if dist.get_rank() == 0: with bf.BlobFile( bf.join(self.checkpoint_dir, f"opt{(self.step+self.resume_step):06d}.pt"), "wb", ) as f: th.save(self.opt.state_dict(), f) dist.barrier() def _master_params_to_state_dict(self, master_params): state_dict = self.model.state_dict() for i, (name, _value) in enumerate(self.model.named_parameters()): assert name in state_dict state_dict[name] = master_params[i] return state_dict def _state_dict_to_master_params(self, state_dict): params = [state_dict[name] for name, _ in self.model.named_parameters()] return params def log_step(self): return def get_blob_logdir(self): return self.plot_dir def set_seed(self, seed=0): torch.manual_seed(seed) random.seed(seed) np.random.seed(seed) torch.cuda.manual_seed_all(seed) def parse_resume_step_from_filename(filename): """ Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the checkpoint's number of steps. """ split = filename.split("model") if len(split) < 2: return 0 split1 = split[-1].split(".")[0] try: return int(split1) except ValueError: return 0 def find_resume_checkpoint(): # On your infrastructure, you may want to override this to automatically # discover the latest checkpoint on your blob storage, etc. return None def find_ema_checkpoint(main_checkpoint, step, rate): if main_checkpoint is None: return None filename = f"ema_{rate}_{(step):06d}.pt" path = bf.join(bf.dirname(main_checkpoint), filename) if bf.exists(path): return path return None def update_ema(target_params, source_params, rate=0.99): """ Update target parameters to be closer to those of source parameters using an exponential moving average. :param target_params: the target parameter sequence. :param source_params: the source parameter sequence. :param rate: the EMA rate (closer to 1 means slower). """ for targ, src in zip(target_params, source_params): targ.detach().mul_(rate).add_(src, alpha=1 - rate)
JTT94/schrodinger_bridge
bridge/trainer/ipf_base.py
ipf_base.py
py
10,216
python
en
code
0
github-code
6
[ { "api_name": "torch.nn", "line_number": 22, "usage_type": "attribute" }, { "api_name": "torch.distributed.get_rank", "line_number": 41, "usage_type": "call" }, { "api_name": "torch.distributed", "line_number": 41, "usage_type": "name" }, { "api_name": "plotters.I...
30728237360
import fileinput import sys from collections import deque, defaultdict, Counter from functools import lru_cache from itertools import permutations, combinations, combinations_with_replacement, product sys.setrecursionlimit(10000000) dd = defaultdict(lambda: 0) dx = [0, 0, -1, 1] # NSWE dy = [-1, 1, 0, 0] # NSWE p1 = 6 p2 = 10 def part1(p1, p2): die = 1 score1 = score2 = 0 turn = True rolled = 0 while (score1 < 1000 and score2 < 1000): d1 = die die = die % 100 + 1 d2 = die die = die % 100 + 1 d3 = die die = die % 100 + 1 rolled += 3 roll = d1+d2+d3 if turn: p1 = (p1 - 1 + roll) % 10 + 1 score1 += p1 else: p2 = (p2 - 1 + roll) % 10 + 1 score2 += p2 turn = not turn return min(score1, score2) * rolled @lru_cache(maxsize=None) def old_dp(p1, p2, score1, score2, turn): if score1 >= 21: return Counter({"p1": 1}) elif score2 >= 21: return Counter({"p2": 1}) s = Counter() for i, j, k in product([1, 2, 3], repeat=3): if turn: z = (p1 - 1 + i+j+k) % 10 + 1 s += old_dp(z, p2, score1 + z, score2, not turn) else: z = (p2 - 1 + i+j+k) % 10 + 1 s += old_dp(p1, z, score1, score2+z, not turn) return s dices = [] for i, j, k in product([1, 2, 3], repeat=3): dices.append(i+j+k) dices = Counter(dices) @lru_cache(maxsize=None) def dp(p1, p2, score1, score2): if score1 >= 21: return (1,0) elif score2 >= 21: return (0,1) a = b = 0 for k,times in dices.items(): z = (p1 - 1 + k) % 10 + 1 x,y = dp(p2, z, score2, score1 + z) a+=times * x b+=times * y return (b,a) # print(part1(p1, p2), max(old_dp(p1, p2, 0, 0, True))) print(part1(p1, p2), max(dp(p1, p2, 0, 0)))
mdaw323/alg
adventofcode2021/21.py
21.py
py
1,904
python
en
code
0
github-code
6
[ { "api_name": "sys.setrecursionlimit", "line_number": 6, "usage_type": "call" }, { "api_name": "collections.defaultdict", "line_number": 7, "usage_type": "call" }, { "api_name": "collections.Counter", "line_number": 44, "usage_type": "call" }, { "api_name": "colle...
26396707826
# Establish the Python Logger import logging # built in python library that does not need to be installed import time from datetime import datetime import os import talking_code as tc speaking_log = False speaking_steps = False def set_speaking_log(on_off_setting = False): global speaking_log speaking_log = on_off_setting def get_speaking_log(): return speaking_log def set_speaking_steps(on_off_setting = False): global speaking_steps speaking_steps = on_off_setting def get_speaking_steps(): return speaking_steps def talk(speech): tc.say(speech) return def set_start_time(): start_time = time.time() return(start_time) def create_logger_Start(solution_name, start_time): logging.basicConfig(level=logging.INFO, filename=solution_name + '.log', filemode='w', format='%(asctime)s - %(levelname)s - %(message)s') process_start_time_stamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'[:-3]) logging.info(f'START {solution_name} ' + ('=' * 45) ) logging.info(f'START {solution_name} Start Time = {process_start_time_stamp}') logging.info(f'{solution_name} Step 0 - Initialize the configuration file parser') # return f'logger_started for {solution_name} at {process_start_time_stamp}' return logging def create_logger_start(solution_name, start_time): logging.basicConfig(level=logging.INFO, filename=solution_name + '.log', filemode='w', format='%(asctime)s - %(levelname)s - %(message)s') process_start_time_stamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'[:-3]) logging.info(f'START {solution_name} ' + ('=' * 45) ) logging.info(f'START {solution_name} Start Time = {process_start_time_stamp}') logging.info(f'{solution_name} Step 0 - Initialize the configuration file parser') # return f'logger_started for {solution_name} at {process_start_time_stamp}' return logging def append_log_file(solution_name): log_filename=solution_name + '.log' historical_log_filename=solution_name + '_history.log' with open(log_filename) as log_file: log_content = log_file.read() with open(historical_log_filename,'a') as historical_log_file: print(120*' ', file=historical_log_file) print(120*'>', file=historical_log_file) print(log_content, file=historical_log_file) print(120*'<', file=historical_log_file) print(120*' ', file=historical_log_file) return(log_content) def calculate_process_performance(solution_name, process_start_time): import time stop_time = time.time() # establish the stop time of the overall process. process_duration = stop_time - process_start_time process_stop_time_stamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'[:-3]) logging.info(f'PERFORMANCE {solution_name} The total process duration was:{process_duration:.2f}') logging.info(f'PERFORMANCE {solution_name} Stop Time = {process_stop_time_stamp}') status = f'END {solution_name} Duration Classification Error - Process Duration UNKNOWN' if process_duration > 600.0: logging.info(f'PERFORMANCE {solution_name} LONG process duration greater than 10 Minutes:{process_duration:.2f}') logging.info(f'PERFORMANCE {solution_name} Performance optimization is required') elif process_duration > 120.0: logging.info(f'PERFORMANCE {solution_name} Medium process duration greater than 3 minutes:{process_duration:.2f}') logging.info(f'PERFORMANCE {solution_name} Performance optimization is optional') elif process_duration > 3.0: logging.info(f'PERFORMANCE {solution_name} Low process duration less than 3 minutes:{process_duration:.2f}') logging.info(f'PERFORMANCE {solution_name} Performance optimization is optional') elif process_duration < 3.0: logging.info(f'PERFORMANCE {solution_name} Short process duration less than 3 Seconds:{process_duration:.2f}') logging.info(f'PERFORMANCE {solution_name} Performance optimization is not reccomended') else: status = f'PERFORMANCE {solution_name} Duration Classification Error - Process Duration UNKNOWN' logging.info(f'END {solution_name} ' + ('=' * 45) ) return(status) def set_start_time(): start_time = time.time() return(start_time) def pvlog(log_level, log_string): global speaking_log global speaking_steps print(log_string) if speaking_log: tc.say(log_string) if speaking_steps: if log_string.find("Step") > -1: tc.say(log_string) if log_level == 'debug': logging.debug(log_string) if log_level == 'info': logging.info(log_string) if log_level == 'warn': logging.warn(log_string) if log_level == 'error': logging.error(log_string) if log_level == 'critical': logging.critical(log_string)
JoeEberle/kids_ABC_book
quick_logger.py
quick_logger.py
py
5,079
python
en
code
1
github-code
6
[ { "api_name": "talking_code.say", "line_number": 25, "usage_type": "call" }, { "api_name": "time.time", "line_number": 29, "usage_type": "call" }, { "api_name": "logging.basicConfig", "line_number": 33, "usage_type": "call" }, { "api_name": "logging.INFO", "li...
16616067005
from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.chrome.service import Service from webdriver_manager.chrome import ChromeDriverManager from selenium.common.exceptions import NoSuchElementException import logging def has_booking_started(url: str) -> bool: options = webdriver.ChromeOptions() options.add_argument("--headless=new") options.add_argument('--no-sandbox') options.add_argument('--disable-dev-shm-usage') options.add_experimental_option('excludeSwitches', ['enable-logging']) driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=options) driver.implicitly_wait(0.5) driver.get(url) try: if driver.find_elements(By.XPATH, '//button[@data-name="get-tickets"]'): logging.info("Ticket booking has started") return True else: logging.info("Ticket booking hasn't started yet") return False except NoSuchElementException: logging.info("Ticket booking hasn't started yet") return False finally: driver.close()
CreatorSky/cineplex-notifier
utils/selenium_utils.py
selenium_utils.py
py
1,128
python
en
code
0
github-code
6
[ { "api_name": "selenium.webdriver.ChromeOptions", "line_number": 10, "usage_type": "call" }, { "api_name": "selenium.webdriver", "line_number": 10, "usage_type": "name" }, { "api_name": "selenium.webdriver.Chrome", "line_number": 15, "usage_type": "call" }, { "api...
70929713788
# -*- coding: utf-8 -*- """ Created on Wed Sep 6 11:55:47 2023 @author: Gilberto """ import pandas as pd from datetime import datetime, timedelta class StraightLineAmortization: def __init__(self, settlement_date, maturity_date, first_payment_date, notional_amount, rate, basis_numerator, basis_denominator, amortization_years, payment_frequency): self.settlement_date = datetime.strptime(settlement_date, "%m/%d/%Y") if isinstance(settlement_date, str) else settlement_date self.maturity_date = datetime.strptime(maturity_date, "%m/%d/%Y") if isinstance(maturity_date, str) else maturity_date self.first_payment_date = datetime.strptime(first_payment_date, "%m/%d/%Y") if isinstance(first_payment_date, str) else first_payment_date self.notional_amount = notional_amount self.rate = rate/100 self.basis_numerator = basis_numerator self.basis_denominator = basis_denominator self.amortization_years = amortization_years # Add the payment_frequency variable self.payment_frequency = payment_frequency # Adjust the num_periods and monthly_principal_payment based on payment_frequency if self.payment_frequency == "1M": self.num_periods = self.amortization_years * 12 elif self.payment_frequency == "3M": self.num_periods = self.amortization_years * 4 elif self.payment_frequency == "6M": self.num_periods = self.amortization_years * 2 self.period_principal_payment = self.notional_amount / self.num_periods def compute_days(self, start_date, end_date): if self.basis_numerator == "ACT": days = (end_date - start_date).days else: days = 30 # assuming each month has 30 days if self.basis_denominator == 360: return days else: return days / 365.0 * 360.0 def get_next_dates(self, current_date): if current_date == self.settlement_date: return self.first_payment_date, self.first_payment_date # Calculate next_month and next_year based on payment_frequency if self.payment_frequency == "1M": months_increment = 1 elif self.payment_frequency == "3M": months_increment = 3 elif self.payment_frequency == "6M": months_increment = 6 next_month = (current_date.month + months_increment - 1) % 12 + 1 next_year = current_date.year + (current_date.month - 1 + months_increment) // 12 period_end_date = current_date.replace(year=next_year, month=next_month, day=self.first_payment_date.day) payment_date = period_end_date # If it's a weekend, move to the next business day for payment date while payment_date.weekday() >= 5: payment_date += timedelta(days=1) return period_end_date, payment_date def generate_schedule(self): data = [] current_date = self.settlement_date payment_number = 1 notional_amount = self.notional_amount while current_date < self.maturity_date and payment_number <= self.num_periods: period_start_date = current_date period_end_date, payment_date = self.get_next_dates(current_date) days_in_period = self.compute_days(period_start_date, period_end_date) actual_days_in_period = (period_end_date - period_start_date).days interest_for_period = (notional_amount * self.rate * days_in_period) / self.basis_denominator period_payment = round(interest_for_period + self.period_principal_payment,2) notional_amount -= self.period_principal_payment data.append([period_start_date, period_end_date, payment_date, payment_number, notional_amount + self.period_principal_payment, period_payment, self.period_principal_payment, actual_days_in_period]) current_date = period_end_date # Start next period the same day as the previous period's end date payment_number += 1 df = pd.DataFrame(data, columns=['Period Start Date', 'Period End Date', 'Payment Date', 'Payment Number', 'Outstanding Balance', 'Period Payment', 'Principal Payment', 'Actual Days in Period']) return df # Usage sla = StraightLineAmortization("8/1/2022", "8/1/2032", "9/1/2022", 600000, 7.03, "ACT", 360, 25, "3M") amortization_schedule = sla.generate_schedule() print(amortization_schedule)
gdelacruzv/Amortization_calculator
straightline_v2.py
straightline_v2.py
py
4,582
python
en
code
0
github-code
6
[ { "api_name": "datetime.datetime.strptime", "line_number": 14, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 14, "usage_type": "name" }, { "api_name": "datetime.datetime.strptime", "line_number": 15, "usage_type": "call" }, { "api_name"...
29188042884
""" Problem Statement A zero-indexed array A of length N contains all integers from 0 to N-1. Find and return the longest length of set S, where S[i] = {A[i], A[A[i]], A[A[A[i]]], ... } subjected to the rule below. Suppose the first element in S starts with the selection of element A[i] of index = i, the next element in S should be A[A[i]], and then A[A[A[i]]]… By that analogy, we stop adding right before a duplicate element occurs in S. Example: Input: A = [5,4,0,3,1,6,2] Output: 4 Explanation: A[0] = 5, A[1] = 4, A[2] = 0, A[3] = 3, A[4] = 1, A[5] = 6, A[6] = 2. One of the longest S[K]: S[0] = {A[0], A[5], A[6], A[2]} = {5, 6, 2, 0} """ from typing import List def was_visited(value): return value < 0 def was_not_visited(value): return value >= 0 class Solution: @classmethod def array_nesting(cls, nums: List[int]) -> int: result = 0 for index, _ in enumerate(nums): if was_visited(nums[index]): continue else: next_pos = nums[index] count = 0 while was_not_visited(nums[next_pos]): actual_pos = next_pos next_pos = nums[next_pos] count += 1 nums[actual_pos] = -1 result = count if count > result else result return result
walterjgsp/algorithms
Python/problems/problem_0008_array_nesting.py
problem_0008_array_nesting.py
py
1,388
python
en
code
6
github-code
6
[ { "api_name": "typing.List", "line_number": 35, "usage_type": "name" } ]
10976102669
import numpy as np import matplotlib.pyplot as plt def linear_LS(xi, yi): a = np.empty(2) n = xi.shape[0] c0 = np.sum(xi) c1 = np.sum(xi ** 2) c2 = np.sum(yi) c3 = np.sum(xi * yi) a[0] = (c0*c3 - c1*c2) / (c0*c0 - n*c1) a[1] = (c0*c2 - n*c3) / (c0*c0 - n*c1) return a def parabolic_LS(xi, yi): n = xi.shape[0] x1 = np.sum(xi) x2 = np.sum(xi ** 2) x3 = np.sum(xi ** 3) x4 = np.sum(xi ** 4) y1 = np.sum(yi) x1y1 = np.sum(xi * yi) x2y1 = np.sum(xi**2 * yi) A_mat = np.array([[n, x1, x2], [x1, x2, x3], [x2, x3, x4]]) b_vec = np.array([y1, x1y1, x2y1]) a_vec = np.linalg.solve(A_mat, b_vec) return a_vec def sketch(x, y, flag): if flag == 1: name = "linear" else: name = "parabolic" plt.plot(x, y, label=name) plt.xlabel("x") plt.ylabel("T") plt.grid() plt.legend() def scatter(x, y): plt.scatter(x, y, label="data", color="black") plt.xlabel("x") plt.ylabel("T") plt.grid() plt.legend() def main(): distance = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]) temperature = np.array([14.6, 18.5, 36.6, 30.8, 59.2, 60.1, 62.2, 79.4, 99.9]) scatter(distance, temperature) x_plot = np.linspace(0, 10, 100) coef = linear_LS(distance, temperature) y_plot = coef[0] + coef[1]*x_plot print(coef) sketch(x_plot, y_plot, 1) x_plot = np.linspace(0, 10, 100) coef = parabolic_LS(distance, temperature) y_plot = coef[0] + coef[1]*x_plot + coef[2]*x_plot**2 print(coef) sketch(x_plot, y_plot, 2) plt.show() if __name__ == '__main__': main() input("请按任意键以继续......")
LiBingbin/Computational_Physics
PythonProject/hw04/hw04_t2.py
hw04_t2.py
py
1,781
python
en
code
0
github-code
6
[ { "api_name": "numpy.empty", "line_number": 6, "usage_type": "call" }, { "api_name": "numpy.sum", "line_number": 8, "usage_type": "call" }, { "api_name": "numpy.sum", "line_number": 9, "usage_type": "call" }, { "api_name": "numpy.sum", "line_number": 10, "...
73730402429
import boto3 import logging import os import json import time from datetime import datetime from jsonpath_ng.ext import parse import helpers logger = logging.getLogger() logger.setLevel(logging.INFO) utl = helpers.Utils() dyn = helpers.Dyn() ssm = boto3.client('ssm') ec2 = boto3.client('ec2') appValue = os.getenv('TAG_APP_VALUE') appName = os.getenv('APP_NAME') def getNicInformation(instance): logger.info(instance + "- getNicInformation") ssm_rsp = ssm.send_command( InstanceIds=[instance], DocumentName='AWS-RunShellScript', TimeoutSeconds=30, Parameters={ 'commands':[ "NIC=$(ifconfig -a | grep UP,BROADCAST | awk '{print substr($1, 1, length($1)-1)}');aws ssm put-parameter --name '/amplify/minecraftserverdashboard/" + instance + "/nic' --type 'String' --value $NIC" ] }, ) resp = checkExecutionLoop(instance,ssm_rsp["Command"]["CommandId"]) logger.info(resp) def minecraftInit(instance): logger.info(instance + " - minecraftInit") instanceInfo = dyn.GetInstanceAttr(instance) logger.info(instanceInfo) if instanceInfo['code'] != 200: logger.warning("Instance data does not exist") return False if 'runCommand' in instanceInfo['msg'] and 'workingDir' in instanceInfo['msg']: script = os.path.join(instanceInfo['msg']['workingDir'],instanceInfo['msg']['runCommand']) #script = instanceInfo['msg']['runCommand'] ssm_rsp = ssm.send_command( InstanceIds=[instance], DocumentName='AWS-RunShellScript', TimeoutSeconds=30, Parameters={ 'commands':[ script ], 'workingDirectory':[ instanceInfo['msg']['workingDir'] ], }, ) logger.info(ssm_rsp) else: logger.warning("RunCommand or Working Directories are not defined") return False def cwAgentStatusCheck(instance): logger.info(instance + " - cwAgentStatusCheck") ssmAgentStatus = ssmExecCommands(instance,"AmazonCloudWatch-ManageAgent",{"action": ["status"],"mode": ["ec2"]}) #logger.info(ssmAgentStatus) # Checking Agent Status if Success. Failed messages occurs when the CloudWatch Agent is not installed. if ssmAgentStatus["Status"] == "Success": agentDetails="" jpexpr = parse("$.pluginsDetails[?(@.Name[:] == 'ControlCloudWatchAgentLinux')].Output") for i in jpexpr.find(ssmAgentStatus): agentDetails = i.value if len(agentDetails) > 5: agentDetailsJson = json.loads(agentDetails) if agentDetailsJson["status"] == "running": logger.info("Agent is already running. Version :" + agentDetailsJson["version"]) # AmazonCloudWatch Agent configuration logger.info("Configuring agent") ssmAgentConfig = ssmExecCommands(instance,"AmazonCloudWatch-ManageAgent",{"action": ["configure"],"mode": ["ec2"],"optionalConfigurationLocation": ["/amplify/minecraftserverdashboard/amazoncloudwatch-linux"],"optionalConfigurationSource": ["ssm"],"optionalRestart": ["yes"]}) logger.info(ssmAgentConfig) return { "code": 200, "msg": "Agent is already running. Version :" + agentDetailsJson["version"] } else: logger.info("Agent Status: " + agentDetailsJson["status"] + " - configuration Status: " + agentDetailsJson["configstatus"]) return { "code": 400, "msg":"Agent Status: " + agentDetailsJson["status"] + " - configuration Status: " + agentDetailsJson["configstatus"] } else: logger.warning(agentDetailsJson) return { "code": 500, "msg": "Detailed information not available"} else: return { "code": 500, "msg": "Failed" } def cwAgentInstall(instance): ssmInstallAgent = ssmExecCommands(instance,"AWS-ConfigureAWSPackage",{"action": ["Install"],"name": ["AmazonCloudWatchAgent"]}) #logger.info(ssmInstallAgent) # Checking Agent Status if Success. Failed messages occurs when the CloudWatch Agent is not installed. if ssmInstallAgent["Status"] == "Success": # AmazonCloudWatch Agent installation jpexpr = parse("$.pluginsDetails[?(@.Name[:] == 'configurePackage')].Output") for i in jpexpr.find(ssmInstallAgent): agentDetails = i.value logger.info(agentDetails) # AmazonCloudWatch Agent configuration logger.info("Configuring agent") ssmAgentConfig = ssmExecCommands(instance,"AmazonCloudWatch-ManageAgent",{"action": ["configure"],"mode": ["ec2"],"optionalConfigurationLocation": ["/amplify/minecraftserverdashboard/amazoncloudwatch-linux"],"optionalConfigurationSource": ["ssm"],"optionalRestart": ["yes"]}) logger.info(ssmAgentConfig) def scriptExec(instance): ssmRunScript = ssmExecCommands(instance,"AWS-RunRemoteScript",{"sourceType": ["GitHub"],"sourceInfo": ["{\"owner\":\"arturlr\", \"repository\": \"minecraft-server-dashboard\", \"path\": \"scripts/adding_cron.sh\", \"getOptions\": \"branch:dev\" }"],"commandLine": ["bash adding_cron.sh"]}) logger.info(ssmRunScript) def sendCommand(instance, param, docName): ssm_rsp = ssm.send_command( InstanceIds=[instance], DocumentName=docName, TimeoutSeconds=30, Parameters=param ) # logger.info("sendCommand " + instance + " - " + ssm_rsp["Command"]["Status"]) return { "CommandId": ssm_rsp["Command"]["CommandId"], "Status": ssm_rsp["Command"]["Status"] } def listCommand(instance, commandId): ssm_rsp = ssm.list_commands( CommandId=commandId, InstanceId=instance, ) logger.info("listCommand " + instance + " - " + ssm_rsp["Commands"][0]["Status"]) return { "Status": ssm_rsp["Commands"][0]["Status"] } def getCommandDetails(instance, commandId): ssm_rsp = ssm.list_command_invocations( CommandId=commandId, InstanceId=instance, Details=True ) if 'CommandPlugins' in ssm_rsp["CommandInvocations"][0]: pluginsDetails = ssm_rsp["CommandInvocations"][0]["CommandPlugins"] logger.info("getCommandDetails " + instance + " - " + ssm_rsp["CommandInvocations"][0]["Status"]) return { "Status": ssm_rsp["CommandInvocations"][0]["Status"], "pluginsDetails": pluginsDetails } def checkExecutionLoop(instanceId, commandId, sleepTime=5): loopCount = 0 while True: checkStatusCommand = listCommand(instanceId, commandId) logger.info(instanceId + " - " + commandId + " - " + checkStatusCommand["Status"]) if checkStatusCommand["Status"] == "Success": getStatusDetails = getCommandDetails(instanceId, commandId) return getStatusDetails elif checkStatusCommand["Status"] == "Failed": return "Failed" elif loopCount > 5: logger.error("Timeout - Cancelling the Command") logger.error(checkStatusCommand) ssm.cancel_command( CommandId=commandId, InstanceIds=[instanceId] ) return "Cancelled" else: loopCount = loopCount + 1 time.sleep(sleepTime) def ssmExecCommands(instanceId, docName, params): logger.info("ssmExecCommands " + instanceId + " - " + docName) command = sendCommand(instanceId, params, docName) response = checkExecutionLoop(instanceId,command["CommandId"]) return response def handler(event, context): try: instanceId = event["instanceId"] # Execute minecraft initialization minecraftInit(instanceId) # Nic Value getNicInformation(instanceId) ## CloudWatch Agent Steps cwAgentStatus = cwAgentStatusCheck(instanceId) if cwAgentStatus['code'] != 200: cwAgentInstall(instanceId) scriptExec(instanceId) return { "code": 200, "msg": "CW Agent installed and Script executed"} else: return cwAgentStatus except Exception as e: logger.error('Something went wrong: ' + str(e)) return { "code": 500, "msg": str(e) }
arturlr/minecraft-server-dashboard
lambdas/configServer/index.py
index.py
py
8,475
python
en
code
2
github-code
6
[ { "api_name": "logging.getLogger", "line_number": 10, "usage_type": "call" }, { "api_name": "logging.INFO", "line_number": 11, "usage_type": "attribute" }, { "api_name": "helpers.Utils", "line_number": 13, "usage_type": "call" }, { "api_name": "helpers.Dyn", "...
46057888236
# -*- coding: utf-8 -*- import logging import datetime import pytz __all__ = ['timezones'] logger = logging.getLogger('django') def is_standard_time(time_zone, date_time): try: dst_delta = time_zone.dst(date_time, is_dst=False) except TypeError: dst_delta = time_zone.dst(date_time) return dst_delta == datetime.timedelta(0) def utc_offset(time_zone, fixed_dt=None): tz = pytz.timezone(time_zone) now = fixed_dt or datetime.datetime.now() for __ in range(72): if is_standard_time(time_zone=tz, date_time=now): break now += datetime.timedelta(days=30) else: logger.warning( 'Standard Time not found for %s, will use DST.' % time_zone) return tz.localize(now, is_dst=False).strftime('%z') def offset_to_int(offset): assert offset[0] in ('-', '+') sign, hour, minutes = offset[0], offset[1:3], offset[3:5] utc_offset_int = int(hour) + int(minutes) / 100 if sign == '-': utc_offset_int *= -1 return utc_offset_int def timezones_by_offset(): return sorted( ((utc_offset(tz), tz) for tz in pytz.common_timezones), key=lambda x: (offset_to_int(x[0]), x[1])) def timezone_format(time_zone, offset): zone_parts = time_zone.split('/') zone = zone_parts[0] if len(zone_parts) > 1: zone_label = ', '.join(zone_parts[1:]).replace('_', ' ') else: zone_label = zone return zone, '(UTC{}) {}'.format(offset, zone_label) def timezones(): """ Result format:: [ ("Africa", [ ("Africa/Abidjan", "(UTC...) Abidjan"), ("Africa/Accra", "(UTC...) Accra"), #... ]), ("America", [ ("America/Argentina/Buenos_Aires", "(UTC...) Argentina, Buenos Aires"), #... ]), #... ] """ timezones_cache = {} for offset, time_zone in timezones_by_offset(): zone, pretty_time_zone = timezone_format(time_zone, offset) (timezones_cache .setdefault(zone, []) .append((time_zone, pretty_time_zone))) return sorted( timezones_cache.items(), key=lambda x: x[0])
nitely/Spirit
spirit/core/utils/timezone.py
timezone.py
py
2,280
python
en
code
1,153
github-code
6
[ { "api_name": "logging.getLogger", "line_number": 11, "usage_type": "call" }, { "api_name": "datetime.timedelta", "line_number": 20, "usage_type": "call" }, { "api_name": "pytz.timezone", "line_number": 24, "usage_type": "call" }, { "api_name": "datetime.datetime....
74519748667
import pyautogui from random import random import pyscreenshot as ImageGrab import math import cv2 as cv from utilities import inventory as inv from utilities.core import get_bounding_rect_from_pts def get_pickup_rects(o_img, cnts): # returns bounding rect(s) of item(s) to pickup items = [] line_pts = [] for c in cnts: # compute the center of each contour M = cv.moments(c) if M["m00"] != 0: cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"]) line_pts.append((cX, cY)) # cv.rectangle(output,(minx,miny),(maxx,maxy),(0,255,255),2) maxslope = 0 item_split_indices = [0] for x in range(1, len(line_pts) - 2): x1, y1 = (line_pts[x - 1]) x2, y2 = (line_pts[x]) if (x2 - x1) != 0: m = (y2 - y1) / (x2 - x1) if m > maxslope: maxslope = m if (m <= 0.05) and (m >= -0.05): cv.line(o_img, line_pts[x - 1], line_pts[x], (0, 255, 255), 2) else: # split lines based on slope and dist # print(item_split_indices) # print(math.dist((x1,y1),(x2,y2))) if (math.dist((x1, y1), (x2, y2))) >= 40 and x != 1: item_split_indices.append(x) if len(item_split_indices) == 1: rect = get_bounding_rect_from_pts(line_pts[1:len(line_pts)]) if rect not in items: items.append(rect) elif (len(item_split_indices) > 1): for i in range(0, len(item_split_indices) - 1): if i == 0: rect = get_bounding_rect_from_pts(line_pts[1:item_split_indices[i + 1]]) elif (i + 1 <= len(item_split_indices) + 1): rect = get_bounding_rect_from_pts(line_pts[item_split_indices[i]:item_split_indices[i + 1]]) if rect not in items: items.append(rect) for item in items: cv.rectangle(o_img, item[0], item[1], (0, 0, 255), 1) return items def bury_bones(min_bone_count): inv_img, bones = inv.get_item_rects(inv.get_icon(526)) inv_img2, big_bones = inv.get_item_rects(inv.get_icon(532)) if len(bones) + len(big_bones) > min_bone_count: for rect in bones: rect[0] pt = ((rect[0][0] + 625 + rect[1][0] + 625) / 2, (rect[0][1] + 485 + rect[1][1] + 485) / 2) pyautogui.moveTo(pt) pyautogui.click(clicks=1, interval=random() * 3, button='left') for rect in big_bones: rect[0] pt = ((rect[0][0] + 625 + rect[1][0] + 625) / 2, (rect[0][1] + 485 + rect[1][1] + 485) / 2) pyautogui.moveTo(pt) pyautogui.click(clicks=1, interval=random() * 3, button='left') def start(targets, items, players, target_mask, selected): combat_img = ImageGrab.grab(bbox=[10, 50, 140, 69]) enemy_name = pytesseract.image_to_string(combat_img) for name in marked_enemy_names: if enemy_name != None: if similar(enemy_name, name) > 0.7: enemy_name = name min_dist = 3000 other_players_target = None for r in targets: for p in players: dist = math.sqrt(math.pow(r[0] - p[0][0], 2) + math.pow(r[1] - p[0][1], 2)) if p in items: players.remove(p) if dist < 200: if r in targets: if (dist < min_dist): if r[2] > 30: other_players_target = r targets.remove(r) continue if r[2] > 10 and r in targets: cv.rectangle(target_mask, (r[0], r[1]), (r[0] + r[2], r[1] + r[3]), (0, 255, 255), 2) if other_players_target is not None: if len(players) >= 1: if players[0] not in items: cv.line(target_mask, (other_players_target[0], other_players_target[1]), (players[0][0][0], players[0][0][1]), (0, 255, 255), 2) cv.rectangle(target_mask, (other_players_target[0], other_players_target[1]), ( other_players_target[0] + other_players_target[2], other_players_target[1] + other_players_target[3]), (0, 40, 255), 2) if random() > 0.95: bury_bones(0) if enemy_name not in marked_enemy_names: if len(items) >= 1: # item pickup item = items[0] min = item[0] max = item[1] pyautogui.moveTo((min[0] + max[0]) / 2, ((min[1] + max[1]) / 2) + 10) pyautogui.click(clicks=2, interval=random(), button='left') elif len(rects[4]) > 0: # need combat panel screengrab and imagetotext # if in combat dont move cursor or click # if selected != targets[0]: selected = targets[0] pyautogui.moveTo((selected[0] + selected[2] / 2), (selected[1] + selected[3] / 2)) pyautogui.click(button='left', clicks=1, interval=random())
009988b/2007scape-bot-functions
skills/combat.py
combat.py
py
5,029
python
en
code
0
github-code
6
[ { "api_name": "cv2.moments", "line_number": 15, "usage_type": "call" }, { "api_name": "cv2.line", "line_number": 31, "usage_type": "call" }, { "api_name": "math.dist", "line_number": 36, "usage_type": "call" }, { "api_name": "utilities.core.get_bounding_rect_from_...
35613748044
from collections import OrderedDict # from datetime import datetime from django.conf import settings from django.db import models from django.utils import timezone from jsonfield import JSONField # Create your models here. class fhir_Consent(models.Model): """ Store User:application consent in fhir format """ user = models.ForeignKey(settings.AUTH_USER_MODEL) application = models.ForeignKey(settings.OAUTH2_PROVIDER_APPLICATION_MODEL) consent = JSONField(load_kwargs={'object_pairs_hook': OrderedDict}) created = models.DateTimeField(blank=True, null=True) revoked = models.DateTimeField(blank=True, null=True) valid_until = models.DateTimeField(blank=True, null=True) key = models.TextField(max_length=250, blank=True, null=True) def save(self, *args, **kwargs): ''' On save, update timestamps ''' if not self.id: self.created = timezone.now() # Update the key field self.key = self.user.username + ":" + self.application.name + "[" self.key += self.created.strftime('%Y-%m-%dT%H:%M.%S') + "]" if self.valid_until: # print("\nChecking valid_until" # " still valid:%s\nType:%s" % (self.valid_until, # type(self.valid_until))) if self.valid_until <= timezone.now(): if not self.revoked: self.revoked = self.valid_until return super(fhir_Consent, self).save(*args, **kwargs) def revoke_consent(self, confirm=False, *args, **kwargs): if confirm is True: if not self.revoked: self.revoked = timezone.now() return super(fhir_Consent, self).save(*args, **kwargs) def status(self): consent_status = None if self.revoked: consent_status = "REVOKED" else: consent_status = "VALID" return consent_status def granted(self): if self.created and self.revoked: valid = False else: valid = True return valid def __str__(self): name = '%s %s (%s)' % (self.user.first_name, self.user.last_name, self.user.username) return ("%s:%s" % (name, self.application.name)) def __unicode__(self): name = '%s %s (%s)' % (self.user.first_name, self.user.last_name, self.user.username) return ("%s:%s" % (name, self.application.name))
shihuaxing/hhs_oauth_server
apps/fhir/fhir_consent/models.py
models.py
py
2,578
python
en
code
null
github-code
6
[ { "api_name": "django.db.models.Model", "line_number": 13, "usage_type": "attribute" }, { "api_name": "django.db.models", "line_number": 13, "usage_type": "name" }, { "api_name": "django.db.models.ForeignKey", "line_number": 16, "usage_type": "call" }, { "api_name...
30827675825
import os import sys import json import logging from time import time from PyQt5.Qt import PYQT_VERSION_STR from PyQt5.QtCore import ( QT_VERSION_STR, QStandardPaths, QSysInfo, QLocale, QLibraryInfo, QTranslator ) from novelwriter.error import logException, formatException from novelwriter.common import splitVersionNumber, formatTimeStamp, NWConfigParser from novelwriter.constants import nwFiles, nwUnicode logger = logging.getLogger(__name__) class Config: LANG_NW = 1 LANG_PROJ = 2 def __init__(self): # Set Application Variables self.appName = "novelWriter" self.appHandle = self.appName.lower() # Config Error Handling self.hasError = False # True if the config class encountered an error self.errData = [] # List of error messages # Set Paths self.cmdOpen = None # Path from command line for project to be opened on launch self.confPath = None # Folder where the config is saved self.confFile = None # The config file name self.dataPath = None # Folder where app data is stored self.lastPath = None # The last user-selected folder (browse dialogs) self.appPath = None # The full path to the novelwriter package folder self.appRoot = None # The full path to the novelwriter root folder self.appIcon = None # The full path to the novelwriter icon file self.assetPath = None # The full path to the novelwriter/assets folder self.pdfDocs = None # The location of the PDF manual, if it exists # Runtime Settings and Variables self.confChanged = False # True whenever the config has chenged, false after save # General self.guiTheme = "" # GUI theme self.guiSyntax = "" # Syntax theme self.guiIcons = "" # Icon theme self.guiFont = "" # Defaults to system default font self.guiFontSize = 11 # Is overridden if system default is loaded self.guiScale = 1.0 # Set automatically by Theme class self.lastNotes = "0x0" # The latest release notes that have been shown self.setDefaultGuiTheme() self.setDefaultSyntaxTheme() self.setDefaultIconTheme() # Localisation self.qLocal = QLocale.system() self.guiLang = self.qLocal.name() self.qtLangPath = QLibraryInfo.location(QLibraryInfo.TranslationsPath) self.nwLangPath = None self.qtTrans = {} # Sizes self.winGeometry = [1200, 650] self.prefGeometry = [700, 615] self.treeColWidth = [200, 50, 30] self.novelColWidth = [200, 50] self.projColWidth = [200, 60, 140] self.mainPanePos = [300, 800] self.docPanePos = [400, 400] self.viewPanePos = [500, 150] self.outlnPanePos = [500, 150] self.isFullScreen = False # Features self.hideVScroll = False # Hide vertical scroll bars on main widgets self.hideHScroll = False # Hide horizontal scroll bars on main widgets self.emphLabels = True # Add emphasis to H1 and H2 item labels # Project self.autoSaveProj = 60 # Interval for auto-saving project in seconds self.autoSaveDoc = 30 # Interval for auto-saving document in seconds # Text Editor self.textFont = None # Editor font self.textSize = 12 # Editor font size self.textWidth = 600 # Editor text width self.textMargin = 40 # Editor/viewer text margin self.tabWidth = 40 # Editor tabulator width self.focusWidth = 800 # Focus Mode text width self.hideFocusFooter = False # Hide document footer in Focus Mode self.showFullPath = True # Show full document path in editor header self.autoSelect = True # Auto-select word when applying format with no selection self.doJustify = False # Justify text self.showTabsNSpaces = False # Show tabs and spaces in edior self.showLineEndings = False # Show line endings in editor self.showMultiSpaces = True # Highlight multiple spaces in the text self.doReplace = True # Enable auto-replace as you type self.doReplaceSQuote = True # Smart single quotes self.doReplaceDQuote = True # Smart double quotes self.doReplaceDash = True # Replace multiple hyphens with dashes self.doReplaceDots = True # Replace three dots with ellipsis self.scrollPastEnd = 25 # Number of lines to scroll past end of document self.autoScroll = False # Typewriter-like scrolling self.autoScrollPos = 30 # Start point for typewriter-like scrolling self.wordCountTimer = 5.0 # Interval for word count update in seconds self.bigDocLimit = 800 # Size threshold for heavy editor features in kilobytes self.incNotesWCount = True # The status bar word count includes notes self.highlightQuotes = True # Highlight text in quotes self.allowOpenSQuote = False # Allow open-ended single quotes self.allowOpenDQuote = True # Allow open-ended double quotes self.highlightEmph = True # Add colour to text emphasis self.stopWhenIdle = True # Stop the status bar clock when the user is idle self.userIdleTime = 300 # Time of inactivity to consider user idle # User-Selected Symbols self.fmtApostrophe = nwUnicode.U_RSQUO self.fmtSingleQuotes = [nwUnicode.U_LSQUO, nwUnicode.U_RSQUO] self.fmtDoubleQuotes = [nwUnicode.U_LDQUO, nwUnicode.U_RDQUO] self.fmtPadBefore = "" self.fmtPadAfter = "" self.fmtPadThin = False # Spell Checking self.spellLanguage = None # Search Bar Switches self.searchCase = False self.searchWord = False self.searchRegEx = False self.searchLoop = False self.searchNextFile = False self.searchMatchCap = False # Backup self.backupPath = "" self.backupOnClose = False self.askBeforeBackup = True # State self.showRefPanel = True # The reference panel for the viewer is visible self.viewComments = True # Comments are shown in the viewer self.viewSynopsis = True # Synopsis is shown in the viewer # Check Qt5 Versions verQt = splitVersionNumber(QT_VERSION_STR) self.verQtString = QT_VERSION_STR self.verQtMajor = verQt[0] self.verQtMinor = verQt[1] self.verQtPatch = verQt[2] self.verQtValue = verQt[3] verQt = splitVersionNumber(PYQT_VERSION_STR) self.verPyQtString = PYQT_VERSION_STR self.verPyQtMajor = verQt[0] self.verPyQtMinor = verQt[1] self.verPyQtPatch = verQt[2] self.verPyQtValue = verQt[3] # Check Python Version self.verPyString = sys.version.split()[0] self.verPyMajor = sys.version_info[0] self.verPyMinor = sys.version_info[1] self.verPyPatch = sys.version_info[2] self.verPyHexVal = sys.hexversion # Check OS Type self.osType = sys.platform self.osLinux = False self.osWindows = False self.osDarwin = False self.osUnknown = False if self.osType.startswith("linux"): self.osLinux = True elif self.osType.startswith("darwin"): self.osDarwin = True elif self.osType.startswith("win32"): self.osWindows = True elif self.osType.startswith("cygwin"): self.osWindows = True else: self.osUnknown = True # Other System Info self.hostName = "Unknown" self.kernelVer = "Unknown" # Packages self.hasEnchant = False # The pyenchant package # Recent Cache self.recentProj = {} return ## # Methods ## def pxInt(self, theSize): """Used to scale fixed gui sizes by the screen scale factor. This function returns an int, which is always rounded down. """ return int(theSize*self.guiScale) def rpxInt(self, theSize): """Used to un-scale fixed gui sizes by the screen scale factor. This function returns an int, which is always rounded down. """ return int(theSize/self.guiScale) ## # Config Actions ## def initConfig(self, confPath=None, dataPath=None): """Initialise the config class. The manual setting of confPath and dataPath is mainly intended for the test suite. """ logger.debug("Initialising Config ...") if confPath is None: confRoot = QStandardPaths.writableLocation(QStandardPaths.ConfigLocation) self.confPath = os.path.join(os.path.abspath(confRoot), self.appHandle) else: logger.info("Setting config from alternative path: %s", confPath) self.confPath = confPath if dataPath is None: if self.verQtValue >= 50400: dataRoot = QStandardPaths.writableLocation(QStandardPaths.AppDataLocation) else: dataRoot = QStandardPaths.writableLocation(QStandardPaths.DataLocation) self.dataPath = os.path.join(os.path.abspath(dataRoot), self.appHandle) else: logger.info("Setting data path from alternative path: %s", dataPath) self.dataPath = dataPath logger.verbose("Config path: %s", self.confPath) logger.verbose("Data path: %s", self.dataPath) # Check Data Path Subdirs dataDirs = ["syntax", "themes"] for dataDir in dataDirs: dirPath = os.path.join(self.dataPath, dataDir) if not os.path.isdir(dirPath): try: os.mkdir(dirPath) logger.info("Created folder: %s", dirPath) except Exception: logger.error("Could not create folder: %s", dirPath) logException() self.confFile = self.appHandle+".conf" self.lastPath = os.path.expanduser("~") self.appPath = getattr(sys, "_MEIPASS", os.path.abspath(os.path.dirname(__file__))) self.appRoot = os.path.abspath(os.path.join(self.appPath, os.path.pardir)) if os.path.isfile(self.appRoot): # novelWriter is packaged as a single file, so the app and # root paths are the same, and equal to the folder that # contains the single executable. self.appRoot = os.path.dirname(self.appRoot) self.appPath = self.appRoot # Assets self.assetPath = os.path.join(self.appPath, "assets") self.appIcon = os.path.join(self.assetPath, "icons", "novelwriter.svg") # Internationalisation self.nwLangPath = os.path.join(self.assetPath, "i18n") logger.debug("Assets: %s", self.assetPath) logger.verbose("App path: %s", self.appPath) logger.verbose("Last path: %s", self.lastPath) # If the config folder does not exist, create it. # This assumes that the os config folder itself exists. if not os.path.isdir(self.confPath): try: os.mkdir(self.confPath) except Exception as exc: logger.error("Could not create folder: %s", self.confPath) logException() self.hasError = True self.errData.append("Could not create folder: %s" % self.confPath) self.errData.append(formatException(exc)) self.confPath = None # Check if config file exists if self.confPath is not None: if os.path.isfile(os.path.join(self.confPath, self.confFile)): # If it exists, load it self.loadConfig() else: # If it does not exist, save a copy of the default values self.saveConfig() # If the data folder does not exist, create it. # This assumes that the os data folder itself exists. if self.dataPath is not None: if not os.path.isdir(self.dataPath): try: os.mkdir(self.dataPath) except Exception as exc: logger.error("Could not create folder: %s", self.dataPath) logException() self.hasError = True self.errData.append("Could not create folder: %s" % self.dataPath) self.errData.append(formatException(exc)) self.dataPath = None # Host and Kernel if self.verQtValue >= 50600: self.hostName = QSysInfo.machineHostName() self.kernelVer = QSysInfo.kernelVersion() # Load recent projects cache self.loadRecentCache() # Check the availability of optional packages self._checkOptionalPackages() if self.spellLanguage is None: self.spellLanguage = "en" # Look for a PDF version of the manual pdfDocs = os.path.join(self.assetPath, "manual.pdf") if os.path.isfile(pdfDocs): logger.debug("Found manual: %s", pdfDocs) self.pdfDocs = pdfDocs logger.debug("Config initialisation complete") return True def initLocalisation(self, nwApp): """Initialise the localisation of the GUI. """ self.qLocal = QLocale(self.guiLang) QLocale.setDefault(self.qLocal) self.qtTrans = {} langList = [ (self.qtLangPath, "qtbase"), # Qt 5.x (self.nwLangPath, "qtbase"), # Alternative Qt 5.x (self.nwLangPath, "nw"), # novelWriter ] for lngPath, lngBase in langList: for lngCode in self.qLocal.uiLanguages(): qTrans = QTranslator() lngFile = "%s_%s" % (lngBase, lngCode.replace("-", "_")) if lngFile not in self.qtTrans: if qTrans.load(lngFile, lngPath): logger.debug("Loaded: %s", os.path.join(lngPath, lngFile)) nwApp.installTranslator(qTrans) self.qtTrans[lngFile] = qTrans return def listLanguages(self, lngSet): """List localisation files in the i18n folder. The default GUI language 'en_GB' is British English. """ if lngSet == self.LANG_NW: fPre = "nw_" fExt = ".qm" langList = {"en_GB": QLocale("en_GB").nativeLanguageName().title()} elif lngSet == self.LANG_PROJ: fPre = "project_" fExt = ".json" langList = {"en_GB": QLocale("en_GB").nativeLanguageName().title()} else: return [] for qmFile in os.listdir(self.nwLangPath): if not os.path.isfile(os.path.join(self.nwLangPath, qmFile)): continue if not qmFile.startswith(fPre) or not qmFile.endswith(fExt): continue qmLang = qmFile[len(fPre):-len(fExt)] qmName = QLocale(qmLang).nativeLanguageName().title() if qmLang and qmName and qmLang != "en_GB": langList[qmLang] = qmName return sorted(langList.items(), key=lambda x: x[0]) def loadConfig(self): """Load preferences from file and replace default settings. """ logger.debug("Loading config file") if self.confPath is None: return False theConf = NWConfigParser() cnfPath = os.path.join(self.confPath, self.confFile) try: with open(cnfPath, mode="r", encoding="utf-8") as inFile: theConf.read_file(inFile) except Exception as exc: logger.error("Could not load config file") logException() self.hasError = True self.errData.append("Could not load config file") self.errData.append(formatException(exc)) return False # Main cnfSec = "Main" self.guiTheme = theConf.rdStr(cnfSec, "theme", self.guiTheme) self.guiSyntax = theConf.rdStr(cnfSec, "syntax", self.guiSyntax) self.guiIcons = theConf.rdStr(cnfSec, "icons", self.guiIcons) self.guiFont = theConf.rdStr(cnfSec, "guifont", self.guiFont) self.guiFontSize = theConf.rdInt(cnfSec, "guifontsize", self.guiFontSize) self.lastNotes = theConf.rdStr(cnfSec, "lastnotes", self.lastNotes) self.guiLang = theConf.rdStr(cnfSec, "guilang", self.guiLang) self.hideVScroll = theConf.rdBool(cnfSec, "hidevscroll", self.hideVScroll) self.hideHScroll = theConf.rdBool(cnfSec, "hidehscroll", self.hideHScroll) # Sizes cnfSec = "Sizes" self.winGeometry = theConf.rdIntList(cnfSec, "geometry", self.winGeometry) self.prefGeometry = theConf.rdIntList(cnfSec, "preferences", self.prefGeometry) self.treeColWidth = theConf.rdIntList(cnfSec, "treecols", self.treeColWidth) self.novelColWidth = theConf.rdIntList(cnfSec, "novelcols", self.novelColWidth) self.projColWidth = theConf.rdIntList(cnfSec, "projcols", self.projColWidth) self.mainPanePos = theConf.rdIntList(cnfSec, "mainpane", self.mainPanePos) self.docPanePos = theConf.rdIntList(cnfSec, "docpane", self.docPanePos) self.viewPanePos = theConf.rdIntList(cnfSec, "viewpane", self.viewPanePos) self.outlnPanePos = theConf.rdIntList(cnfSec, "outlinepane", self.outlnPanePos) self.isFullScreen = theConf.rdBool(cnfSec, "fullscreen", self.isFullScreen) # Project cnfSec = "Project" self.autoSaveProj = theConf.rdInt(cnfSec, "autosaveproject", self.autoSaveProj) self.autoSaveDoc = theConf.rdInt(cnfSec, "autosavedoc", self.autoSaveDoc) self.emphLabels = theConf.rdBool(cnfSec, "emphlabels", self.emphLabels) # Editor cnfSec = "Editor" self.textFont = theConf.rdStr(cnfSec, "textfont", self.textFont) self.textSize = theConf.rdInt(cnfSec, "textsize", self.textSize) self.textWidth = theConf.rdInt(cnfSec, "width", self.textWidth) self.textMargin = theConf.rdInt(cnfSec, "margin", self.textMargin) self.tabWidth = theConf.rdInt(cnfSec, "tabwidth", self.tabWidth) self.focusWidth = theConf.rdInt(cnfSec, "focuswidth", self.focusWidth) self.hideFocusFooter = theConf.rdBool(cnfSec, "hidefocusfooter", self.hideFocusFooter) self.doJustify = theConf.rdBool(cnfSec, "justify", self.doJustify) self.autoSelect = theConf.rdBool(cnfSec, "autoselect", self.autoSelect) self.doReplace = theConf.rdBool(cnfSec, "autoreplace", self.doReplace) self.doReplaceSQuote = theConf.rdBool(cnfSec, "repsquotes", self.doReplaceSQuote) self.doReplaceDQuote = theConf.rdBool(cnfSec, "repdquotes", self.doReplaceDQuote) self.doReplaceDash = theConf.rdBool(cnfSec, "repdash", self.doReplaceDash) self.doReplaceDots = theConf.rdBool(cnfSec, "repdots", self.doReplaceDots) self.scrollPastEnd = theConf.rdInt(cnfSec, "scrollpastend", self.scrollPastEnd) self.autoScroll = theConf.rdBool(cnfSec, "autoscroll", self.autoScroll) self.autoScrollPos = theConf.rdInt(cnfSec, "autoscrollpos", self.autoScrollPos) self.fmtSingleQuotes = theConf.rdStrList(cnfSec, "fmtsinglequote", self.fmtSingleQuotes) self.fmtDoubleQuotes = theConf.rdStrList(cnfSec, "fmtdoublequote", self.fmtDoubleQuotes) self.fmtPadBefore = theConf.rdStr(cnfSec, "fmtpadbefore", self.fmtPadBefore) self.fmtPadAfter = theConf.rdStr(cnfSec, "fmtpadafter", self.fmtPadAfter) self.fmtPadThin = theConf.rdBool(cnfSec, "fmtpadthin", self.fmtPadThin) self.spellLanguage = theConf.rdStr(cnfSec, "spellcheck", self.spellLanguage) self.showTabsNSpaces = theConf.rdBool(cnfSec, "showtabsnspaces", self.showTabsNSpaces) self.showLineEndings = theConf.rdBool(cnfSec, "showlineendings", self.showLineEndings) self.showMultiSpaces = theConf.rdBool(cnfSec, "showmultispaces", self.showMultiSpaces) self.wordCountTimer = theConf.rdFlt(cnfSec, "wordcounttimer", self.wordCountTimer) self.bigDocLimit = theConf.rdInt(cnfSec, "bigdoclimit", self.bigDocLimit) self.incNotesWCount = theConf.rdBool(cnfSec, "incnoteswcount", self.incNotesWCount) self.showFullPath = theConf.rdBool(cnfSec, "showfullpath", self.showFullPath) self.highlightQuotes = theConf.rdBool(cnfSec, "highlightquotes", self.highlightQuotes) self.allowOpenSQuote = theConf.rdBool(cnfSec, "allowopensquote", self.allowOpenSQuote) self.allowOpenDQuote = theConf.rdBool(cnfSec, "allowopendquote", self.allowOpenDQuote) self.highlightEmph = theConf.rdBool(cnfSec, "highlightemph", self.highlightEmph) self.stopWhenIdle = theConf.rdBool(cnfSec, "stopwhenidle", self.stopWhenIdle) self.userIdleTime = theConf.rdInt(cnfSec, "useridletime", self.userIdleTime) # Backup cnfSec = "Backup" self.backupPath = theConf.rdStr(cnfSec, "backuppath", self.backupPath) self.backupOnClose = theConf.rdBool(cnfSec, "backuponclose", self.backupOnClose) self.askBeforeBackup = theConf.rdBool(cnfSec, "askbeforebackup", self.askBeforeBackup) # State cnfSec = "State" self.showRefPanel = theConf.rdBool(cnfSec, "showrefpanel", self.showRefPanel) self.viewComments = theConf.rdBool(cnfSec, "viewcomments", self.viewComments) self.viewSynopsis = theConf.rdBool(cnfSec, "viewsynopsis", self.viewSynopsis) self.searchCase = theConf.rdBool(cnfSec, "searchcase", self.searchCase) self.searchWord = theConf.rdBool(cnfSec, "searchword", self.searchWord) self.searchRegEx = theConf.rdBool(cnfSec, "searchregex", self.searchRegEx) self.searchLoop = theConf.rdBool(cnfSec, "searchloop", self.searchLoop) self.searchNextFile = theConf.rdBool(cnfSec, "searchnextfile", self.searchNextFile) self.searchMatchCap = theConf.rdBool(cnfSec, "searchmatchcap", self.searchMatchCap) # Path cnfSec = "Path" self.lastPath = theConf.rdStr(cnfSec, "lastpath", self.lastPath) # Check Certain Values for None self.spellLanguage = self._checkNone(self.spellLanguage) # If we're using straight quotes, disable auto-replace if self.fmtSingleQuotes == ["'", "'"] and self.doReplaceSQuote: logger.info("Using straight single quotes, so disabling auto-replace") self.doReplaceSQuote = False if self.fmtDoubleQuotes == ['"', '"'] and self.doReplaceDQuote: logger.info("Using straight double quotes, so disabling auto-replace") self.doReplaceDQuote = False # Check deprecated settings if self.guiIcons in ("typicons_colour_dark", "typicons_grey_dark"): self.guiIcons = "typicons_dark" elif self.guiIcons in ("typicons_colour_light", "typicons_grey_light"): self.guiIcons = "typicons_light" return True def saveConfig(self): """Save the current preferences to file. """ logger.debug("Saving config file") if self.confPath is None: return False theConf = NWConfigParser() theConf["Main"] = { "timestamp": formatTimeStamp(time()), "theme": str(self.guiTheme), "syntax": str(self.guiSyntax), "icons": str(self.guiIcons), "guifont": str(self.guiFont), "guifontsize": str(self.guiFontSize), "lastnotes": str(self.lastNotes), "guilang": str(self.guiLang), "hidevscroll": str(self.hideVScroll), "hidehscroll": str(self.hideHScroll), } theConf["Sizes"] = { "geometry": self._packList(self.winGeometry), "preferences": self._packList(self.prefGeometry), "treecols": self._packList(self.treeColWidth), "novelcols": self._packList(self.novelColWidth), "projcols": self._packList(self.projColWidth), "mainpane": self._packList(self.mainPanePos), "docpane": self._packList(self.docPanePos), "viewpane": self._packList(self.viewPanePos), "outlinepane": self._packList(self.outlnPanePos), "fullscreen": str(self.isFullScreen), } theConf["Project"] = { "autosaveproject": str(self.autoSaveProj), "autosavedoc": str(self.autoSaveDoc), "emphlabels": str(self.emphLabels), } theConf["Editor"] = { "textfont": str(self.textFont), "textsize": str(self.textSize), "width": str(self.textWidth), "margin": str(self.textMargin), "tabwidth": str(self.tabWidth), "focuswidth": str(self.focusWidth), "hidefocusfooter": str(self.hideFocusFooter), "justify": str(self.doJustify), "autoselect": str(self.autoSelect), "autoreplace": str(self.doReplace), "repsquotes": str(self.doReplaceSQuote), "repdquotes": str(self.doReplaceDQuote), "repdash": str(self.doReplaceDash), "repdots": str(self.doReplaceDots), "scrollpastend": str(self.scrollPastEnd), "autoscroll": str(self.autoScroll), "autoscrollpos": str(self.autoScrollPos), "fmtsinglequote": self._packList(self.fmtSingleQuotes), "fmtdoublequote": self._packList(self.fmtDoubleQuotes), "fmtpadbefore": str(self.fmtPadBefore), "fmtpadafter": str(self.fmtPadAfter), "fmtpadthin": str(self.fmtPadThin), "spellcheck": str(self.spellLanguage), "showtabsnspaces": str(self.showTabsNSpaces), "showlineendings": str(self.showLineEndings), "showmultispaces": str(self.showMultiSpaces), "wordcounttimer": str(self.wordCountTimer), "bigdoclimit": str(self.bigDocLimit), "incnoteswcount": str(self.incNotesWCount), "showfullpath": str(self.showFullPath), "highlightquotes": str(self.highlightQuotes), "allowopensquote": str(self.allowOpenSQuote), "allowopendquote": str(self.allowOpenDQuote), "highlightemph": str(self.highlightEmph), "stopwhenidle": str(self.stopWhenIdle), "useridletime": str(self.userIdleTime), } theConf["Backup"] = { "backuppath": str(self.backupPath), "backuponclose": str(self.backupOnClose), "askbeforebackup": str(self.askBeforeBackup), } theConf["State"] = { "showrefpanel": str(self.showRefPanel), "viewcomments": str(self.viewComments), "viewsynopsis": str(self.viewSynopsis), "searchcase": str(self.searchCase), "searchword": str(self.searchWord), "searchregex": str(self.searchRegEx), "searchloop": str(self.searchLoop), "searchnextfile": str(self.searchNextFile), "searchmatchcap": str(self.searchMatchCap), } theConf["Path"] = { "lastpath": str(self.lastPath), } # Write config file cnfPath = os.path.join(self.confPath, self.confFile) try: with open(cnfPath, mode="w", encoding="utf-8") as outFile: theConf.write(outFile) self.confChanged = False except Exception as exc: logger.error("Could not save config file") logException() self.hasError = True self.errData.append("Could not save config file") self.errData.append(formatException(exc)) return False return True def loadRecentCache(self): """Load the cache file for recent projects. """ if self.dataPath is None: return False self.recentProj = {} cacheFile = os.path.join(self.dataPath, nwFiles.RECENT_FILE) if not os.path.isfile(cacheFile): return True try: with open(cacheFile, mode="r", encoding="utf-8") as inFile: theData = json.load(inFile) for projPath, theEntry in theData.items(): self.recentProj[projPath] = { "title": theEntry.get("title", ""), "time": theEntry.get("time", 0), "words": theEntry.get("words", 0), } except Exception as exc: self.hasError = True self.errData.append("Could not load recent project cache") self.errData.append(formatException(exc)) return False return True def saveRecentCache(self): """Save the cache dictionary of recent projects. """ if self.dataPath is None: return False cacheFile = os.path.join(self.dataPath, nwFiles.RECENT_FILE) cacheTemp = os.path.join(self.dataPath, nwFiles.RECENT_FILE+"~") try: with open(cacheTemp, mode="w+", encoding="utf-8") as outFile: json.dump(self.recentProj, outFile, indent=2) except Exception as exc: self.hasError = True self.errData.append("Could not save recent project cache") self.errData.append(formatException(exc)) return False if os.path.isfile(cacheFile): os.unlink(cacheFile) os.rename(cacheTemp, cacheFile) return True def updateRecentCache(self, projPath, projTitle, wordCount, saveTime): """Add or update recent cache information on a given project. """ self.recentProj[os.path.abspath(projPath)] = { "title": projTitle, "time": int(saveTime), "words": int(wordCount), } return True def removeFromRecentCache(self, thePath): """Trying to remove a path from the recent projects cache. """ if thePath in self.recentProj: del self.recentProj[thePath] logger.verbose("Removed recent: %s", thePath) self.saveRecentCache() else: logger.error("Unknown recent: %s", thePath) return False return True ## # Setters ## def setConfPath(self, newPath): """Set the path and filename to the config file. """ if newPath is None: return True if not os.path.isfile(newPath): logger.error("File not found, using default config path instead") return False self.confPath = os.path.dirname(newPath) self.confFile = os.path.basename(newPath) return True def setDataPath(self, newPath): """Set the data path. """ if newPath is None: return True if not os.path.isdir(newPath): logger.error("Path not found, using default data path instead") return False self.dataPath = os.path.abspath(newPath) return True def setLastPath(self, lastPath): """Set the last used path (by the user). """ if lastPath is None or lastPath == "": self.lastPath = "" else: self.lastPath = os.path.dirname(lastPath) return True def setWinSize(self, newWidth, newHeight): """Set the size of the main window, but only if the change is larger than 5 pixels. The OS window manager will sometimes adjust it a bit, and we don't want the main window to shrink or grow each time the app is opened. """ newWidth = int(newWidth/self.guiScale) newHeight = int(newHeight/self.guiScale) if abs(self.winGeometry[0] - newWidth) > 5: self.winGeometry[0] = newWidth self.confChanged = True if abs(self.winGeometry[1] - newHeight) > 5: self.winGeometry[1] = newHeight self.confChanged = True return True def setPreferencesSize(self, newWidth, newHeight): """Sat the size of the Preferences dialog window. """ self.prefGeometry[0] = int(newWidth/self.guiScale) self.prefGeometry[1] = int(newHeight/self.guiScale) self.confChanged = True return True def setTreeColWidths(self, colWidths): """Set the column widths of the main project tree. """ self.treeColWidth = [int(x/self.guiScale) for x in colWidths] self.confChanged = True return True def setNovelColWidths(self, colWidths): """Set the column widths of the novel tree. """ self.novelColWidth = [int(x/self.guiScale) for x in colWidths] self.confChanged = True return True def setProjColWidths(self, colWidths): """Set the column widths of the Load Project dialog. """ self.projColWidth = [int(x/self.guiScale) for x in colWidths] self.confChanged = True return True def setMainPanePos(self, panePos): """Set the position of the main GUI splitter. """ self.mainPanePos = [int(x/self.guiScale) for x in panePos] self.confChanged = True return True def setDocPanePos(self, panePos): """Set the position of the main editor/viewer splitter. """ self.docPanePos = [int(x/self.guiScale) for x in panePos] self.confChanged = True return True def setViewPanePos(self, panePos): """Set the position of the viewer meta data splitter. """ self.viewPanePos = [int(x/self.guiScale) for x in panePos] self.confChanged = True return True def setOutlinePanePos(self, panePos): """Set the position of the outline details splitter. """ self.outlnPanePos = [int(x/self.guiScale) for x in panePos] self.confChanged = True return True def setShowRefPanel(self, checkState): """Set the visibility state of the reference panel. """ self.showRefPanel = checkState self.confChanged = True return self.showRefPanel def setViewComments(self, viewState): """Set the visibility state of comments in the viewer. """ self.viewComments = viewState self.confChanged = True return self.viewComments def setViewSynopsis(self, viewState): """Set the visibility state of synopsis comments in the viewer. """ self.viewSynopsis = viewState self.confChanged = True return self.viewSynopsis ## # Default Setters ## def setDefaultGuiTheme(self): """Reset the GUI theme to default value. """ self.guiTheme = "default" def setDefaultSyntaxTheme(self): """Reset the syntax theme to default value. """ self.guiSyntax = "default_light" def setDefaultIconTheme(self): """Reset the icon theme to default value. """ self.guiIcons = "typicons_light" ## # Getters ## def getWinSize(self): return [int(x*self.guiScale) for x in self.winGeometry] def getPreferencesSize(self): return [int(x*self.guiScale) for x in self.prefGeometry] def getTreeColWidths(self): return [int(x*self.guiScale) for x in self.treeColWidth] def getNovelColWidths(self): return [int(x*self.guiScale) for x in self.novelColWidth] def getProjColWidths(self): return [int(x*self.guiScale) for x in self.projColWidth] def getMainPanePos(self): return [int(x*self.guiScale) for x in self.mainPanePos] def getDocPanePos(self): return [int(x*self.guiScale) for x in self.docPanePos] def getViewPanePos(self): return [int(x*self.guiScale) for x in self.viewPanePos] def getOutlinePanePos(self): return [int(x*self.guiScale) for x in self.outlnPanePos] def getTextWidth(self, focusMode=False): if focusMode: return self.pxInt(max(self.focusWidth, 200)) else: return self.pxInt(max(self.textWidth, 200)) def getTextMargin(self): return self.pxInt(max(self.textMargin, 0)) def getTabWidth(self): return self.pxInt(max(self.tabWidth, 0)) def getErrData(self): """Compile and return error messages from the initialisation of the Config class, and clear the error buffer. """ errMessage = "<br>".join(self.errData) self.hasError = False self.errData = [] return errMessage ## # Internal Functions ## def _packList(self, inData): """Pack a list of items into a comma-separated string for saving to the config file. """ return ", ".join([str(inVal) for inVal in inData]) def _checkNone(self, checkVal): """Return a NoneType if the value corresponds to None, otherwise return the value unchanged. """ if checkVal is None: return None if isinstance(checkVal, str): if checkVal.lower() == "none": return None return checkVal def _checkOptionalPackages(self): """Cheks if we have the optional packages used by some features. """ try: import enchant # noqa: F401 self.hasEnchant = True logger.debug("Checking package 'pyenchant': OK") except Exception: self.hasEnchant = False logger.debug("Checking package 'pyenchant': Missing") return # END Class Config
vaelue/novelWriter
novelwriter/config.py
config.py
py
38,609
python
en
code
null
github-code
6
[ { "api_name": "logging.getLogger", "line_number": 18, "usage_type": "call" }, { "api_name": "PyQt5.QtCore.QLocale.system", "line_number": 65, "usage_type": "call" }, { "api_name": "PyQt5.QtCore.QLocale", "line_number": 65, "usage_type": "name" }, { "api_name": "Py...
32681042902
import numpy as np from .BaseNB import BaseNaiveBayes # 高斯贝叶斯 class GaussianNaiveBayes(BaseNaiveBayes): # 训练 def fit(self, X: np.ndarray, y: np.ndarray): """ X: train dataset, shape = (n_samples, n_features) y: target, shape = (n_samples, ) """ # 计算 y 的先验概率 y_prior_proba = [] self.classes_ = np.unique(y) for c in self.classes_: c_count = (y == c).sum() y_prior_proba.append(c_count / np.size(y)) self._y_prior_proba = np.array(y_prior_proba) # 计算连续变量 x 的高斯分布参数 features = X.shape[1] self._theta = np.zeros((np.size(self.classes_), features)) self._sigma = np.zeros((np.size(self.classes_), features)) for i in range(np.size(self.classes_)): x_c = X[y == self.classes_[i]] self._theta[i, :] = np.mean(x_c, axis=0) self._sigma[i, :] = np.var(x_c, axis=0) return self def _joint_log_likelihood(self, X: np.ndarray): jll = [] for i in range(np.size(self.classes_)): log_prior = np.log(self._y_prior_proba[i]) # 高斯公式取对数 x_given_y = - 0.5 * np.sum(np.log(2. * np.pi * self._sigma[i, :])) x_given_y -= 0.5 * np.sum(((X - self._theta[i, :]) ** 2) / (self._sigma[i, :]), axis=1) jll.append(log_prior + x_given_y) jll = np.array(jll).T return jll def __repr__(self): return "<GaussianNaiveBayes>" if __name__ == "__main__": from sklearn.datasets import load_iris from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split from sklearn.naive_bayes import GaussianNB test = np.array([[1, 'S'], [1, 'M'], [1, 'M'], [1, 'S'], [1, 'S'], [2, 'S'], [2, 'M'], [2, 'M'], [2, 'L'], [2, 'L'], [3, 'L'], [3, 'M'], [3, 'M'], [3, 'L'], [3, 'L']]) iris = load_iris() X = iris.data y = iris.target X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) gnb = GaussianNaiveBayes().fit(X_train, y_train) log_proba = gnb.predict_log_proba(X_test) proba = gnb.predict_proba(X_test) y_pred = gnb.predict(X_test) print(accuracy_score(y_test, y_pred))
HuipengXu/Statistical-learning-method
naive_bayes/GaussianNB.py
GaussianNB.py
py
2,327
python
en
code
7
github-code
6
[ { "api_name": "BaseNB.BaseNaiveBayes", "line_number": 5, "usage_type": "name" }, { "api_name": "numpy.ndarray", "line_number": 8, "usage_type": "attribute" }, { "api_name": "numpy.unique", "line_number": 15, "usage_type": "call" }, { "api_name": "numpy.size", ...
6264519181
import matplotlib.pyplot as plt class Drawer(): def __init__(self,y_pred, y_test, target_names,X_test,eigenfaces): self.y_pred=y_pred self.y_test=y_test self.target_names=target_names self.X_test=X_test self.eigenfaces=eigenfaces def plot_gallery(self,images, titles, h, w, n_row=3, n_col=4): """Helper function to plot a gallery of portraits""" plt.figure(figsize=(1.8 * n_col, 2.4 * n_row)) plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35) for i in range(n_row * n_col): plt.subplot(n_row, n_col, i + 1) plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray) plt.title(titles[i], size=12) plt.xticks(()) plt.yticks(()) # plot the result of the prediction on a portion of the test set # def title(self): # pred_name = self.target_names[self.y_pred[self.i]].rsplit(' ', 1)[-1] # true_name = self.target_names[self.y_test[self.i]].rsplit(' ', 1)[-1] # return 'predicted: %s\ntrue: %s' % (pred_name, true_name) def show(self,h,w): prediction_titles = [] for i in range(self.y_pred.shape[0]): pred_name = self.target_names[self.y_pred[i]].rsplit(' ', 1)[-1] true_name = self.target_names[self.y_test[i]].rsplit(' ', 1)[-1] prediction_titles.append('predicted: %s\ntrue: %s' % (pred_name, true_name)) self.plot_gallery(self.X_test, prediction_titles, h, w) # plot the gallery of the most significative eigenfaces eigenface_titles = ["eigenface %d" % i for i in range(self.eigenfaces.shape[0])] self.plot_gallery(self.eigenfaces, eigenface_titles, h, w) plt.show()
bartekskrabacz/python_project
src/python_project/Drawer.py
Drawer.py
py
1,774
python
en
code
0
github-code
6
[ { "api_name": "matplotlib.pyplot.figure", "line_number": 14, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 15, "usage_type": "call" }, { "api_...
23995078592
import os from collections import deque from typing import Dict, List, Optional, Any import langchain import openai import pinecone from langchain.chains import LLMChain from langchain.chains.base import Chain from langchain.agents import AgentType, ZeroShotAgent, Tool, AgentExecutor, initialize_agent from langchain.llms import OpenAI, LlamaCpp, BaseLLM from langchain.prompts import PromptTemplate from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory from langchain.utilities import GoogleSearchAPIWrapper from langchain.vectorstores.base import VectorStore from pydantic import BaseModel, Field from langchain.embeddings import OpenAIEmbeddings import faiss from langchain.vectorstores import FAISS from langchain.docstore import InMemoryDocstore # Define your embedding model embeddings_model = OpenAIEmbeddings() # Initialize the vectorstore as empty embedding_size = 1536 index = faiss.IndexFlatL2(embedding_size) vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}) # Initialize our LLM llm = OpenAI(temperature=0) class TaskCreationChain(LLMChain): """Chain to generates tasks.""" @classmethod def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain: """Get the response parser.""" task_creation_template = ( "You are a task creation AI that uses the result of an execution agent" " to create new tasks with the following objective: {objective}," " The last completed task has the result: {result}." " This result was based on this task description: {task_description}." " These are incomplete tasks: {incomplete_tasks}." " Based on the result, create new tasks to be completed" " by the AI system that do not overlap with incomplete tasks." " Return the tasks as an array." ) prompt = PromptTemplate( template=task_creation_template, input_variables=[ "result", "task_description", "incomplete_tasks", "objective", ], ) return cls(prompt=prompt, llm=llm, verbose=verbose) class TaskPrioritizationChain(LLMChain): """Chain to prioritize tasks.""" @classmethod def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain: """Get the response parser.""" task_prioritization_template = ( "You are a task prioritization AI tasked with cleaning the formatting of and reprioritizing" " the following tasks: {task_names}." " Consider the ultimate objective of your team: {objective}." " Do not remove any tasks. Return the result as a numbered list, like:" " #. First task" " #. Second task" " Start the task list with number {next_task_id}." ) prompt = PromptTemplate( template=task_prioritization_template, input_variables=["task_names", "next_task_id", "objective"], ) return cls(prompt=prompt, llm=llm, verbose=verbose) class ExecutionChain(AgentExecutor): """Chain to execute tasks.""" @classmethod def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> AgentExecutor: """Get the response parser.""" template = """This is a conversation between a human and a bot: {chat_history} Write a summary of the conversation for {input}: """ prompt = PromptTemplate(template=template, input_variables=["chat_history", "input"]) memory = ConversationBufferMemory(memory_key="chat_history") read_memory = ReadOnlySharedMemory(memory=memory) summary_chain = LLMChain(memory=read_memory, prompt=prompt, llm=llm, verbose=True) search = GoogleSearchAPIWrapper() tools = [ Tool( name = "Search", func=search.run, description="useful for when you need to answer questions about current events" ), Tool( name = "Summary", func=summary_chain.run, description="useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary." ) ] prefix = """ You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}. """ suffix = """ Your task: {task}. Response: {agent_scratchpad} """ prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix, input_variables=["objective", "context", "task", "agent_scratchpad"], ) llm_chain = LLMChain(llm=llm, prompt=prompt) agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=verbose) return cls.from_agent_and_tools(agent=agent, tools=tools, verbose=verbose, memory=memory) todo_template = """ You are an expert in walking through your thoughts. You are able to take the main objective, as well as the tasks, and create step by step observations. Here is the objective: {objective} """ todo_prompt = PromptTemplate(template=todo_template, input_variables=["objective"]) todo_chain = LLMChain(llm=OpenAI(temperature=0), prompt=todo_prompt) search = GoogleSearchAPIWrapper() tools = [ Tool( name="Search", func=search.run, description="useful for when you need to answer questions about current events", ), Tool( name="TODO", func=todo_chain.run, description="useful for when you need to come up brainstorming for the current task at hand. Input: an objective to create a todo list for as well as the task. Output: a rational thought process behind the objective and task, enough to help you craft a perfect response.", ), ] prefix = """You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}.""" suffix = """Question: {task} {agent_scratchpad}""" zeroshot_prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix, input_variables=["objective", "task", "context", "agent_scratchpad"], ) def get_next_task( task_creation_chain: LLMChain, result: Dict, task_description: str, task_list: List[str], objective: str, ) -> List[Dict]: """Get the next task.""" incomplete_tasks = ", ".join(task_list) response = task_creation_chain.run( result=result, task_description=task_description, incomplete_tasks=incomplete_tasks, objective=objective, ) new_tasks = response.split("\n") return [{"task_name": task_name} for task_name in new_tasks if task_name.strip()] def prioritize_tasks( task_prioritization_chain: LLMChain, this_task_id: int, task_list: List[Dict], objective: str, ) -> List[Dict]: """Prioritize tasks.""" task_names = [t["task_name"] for t in task_list] next_task_id = int(this_task_id) + 1 response = task_prioritization_chain.run( task_names=task_names, next_task_id=next_task_id, objective=objective ) new_tasks = response.split("\n") prioritized_task_list = [] for task_string in new_tasks: if not task_string.strip(): continue task_parts = task_string.strip().split(".", 1) if len(task_parts) == 2: task_id = task_parts[0].strip() task_name = task_parts[1].strip() prioritized_task_list.append({"task_id": task_id, "task_name": task_name}) return prioritized_task_list def _get_top_tasks(vectorstore, query: str, k: int) -> List[str]: """Get the top k tasks based on the query.""" results = vectorstore.similarity_search_with_score(query, k=k) if not results: return [] sorted_results, _ = zip(*sorted(results, key=lambda x: x[1], reverse=True)) return [str(item.metadata["task"]) for item in sorted_results] def execute_task( vectorstore, execution_chain: LLMChain, objective: str, task: str, k: int = 5 ) -> str: """Execute a task.""" context = _get_top_tasks(vectorstore, query=objective, k=k) return execution_chain.run(objective=objective, context=context, task=task) class BabyAGI(Chain, BaseModel): """Controller model for the BabyAGI agent.""" task_list: deque = Field(default_factory=deque) task_creation_chain: TaskCreationChain = Field(...) task_prioritization_chain: TaskPrioritizationChain = Field(...) execution_chain: ExecutionChain = Field(...) task_id_counter: int = Field(1) vectorstore: VectorStore = Field(init=False) max_iterations: Optional[int] = None class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def add_task(self, task: Dict): self.task_list.append(task) def print_task_list(self): print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m") for t in self.task_list: print(str(t["task_id"]) + ": " + t["task_name"]) def print_next_task(self, task: Dict): print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m") print(str(task["task_id"]) + ": " + task["task_name"]) def print_task_result(self, result: str): print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m") print(result) @property def input_keys(self) -> List[str]: return ["objective"] @property def output_keys(self) -> List[str]: return [] def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Run the agent.""" objective = inputs["objective"] first_task = inputs.get("first_task", "Make a todo list") self.add_task({"task_id": 1, "task_name": first_task}) num_iters = 0 while True: if self.task_list: self.print_task_list() # Step 1: Pull the first task task = self.task_list.popleft() self.print_next_task(task) # Step 2: Execute the task result = execute_task( self.vectorstore, self.execution_chain, objective, task["task_name"] ) this_task_id = int(task["task_id"]) self.print_task_result(result) # Step 3: Store the result in Pinecone result_id = f"result_{task['task_id']}" self.vectorstore.add_texts( texts=[result], metadatas=[{"task": task["task_name"]}], ids=[result_id], ) # Step 4: Create new tasks and reprioritize task list new_tasks = get_next_task( self.task_creation_chain, result, task["task_name"], [t["task_name"] for t in self.task_list], objective, ) for new_task in new_tasks: self.task_id_counter += 1 new_task.update({"task_id": self.task_id_counter}) self.add_task(new_task) self.task_list = deque( prioritize_tasks( self.task_prioritization_chain, this_task_id, list(self.task_list), objective, ) ) num_iters += 1 if self.max_iterations is not None and num_iters == self.max_iterations: print( "\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m" ) break return {} @classmethod def from_llm( cls, llm: BaseLLM, vectorstore: VectorStore, verbose: bool = False, **kwargs ) -> "BabyAGI": """Initialize the BabyAGI Controller.""" task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose) task_prioritization_chain = TaskPrioritizationChain.from_llm(llm, verbose=verbose) llm_chain = LLMChain(llm=llm, prompt=zeroshot_prompt) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names) agent_executor = AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, verbose=True ) return cls( task_creation_chain=task_creation_chain, task_prioritization_chain=task_prioritization_chain, execution_chain=agent_executor, vectorstore=vectorstore, **kwargs, ) # Logging of LLMChains verbose = False # If None, will keep on going forever max_iterations: Optional[int] = None baby_agi = BabyAGI.from_llm( llm=llm, vectorstore=vectorstore, verbose=verbose, max_iterations=max_iterations ) baby_agi({"objective": 'Write a cohesive and articulated story about a man named Jack who conquers the world.'})
satpat2590/somelangchainfun
main.py
main.py
py
13,333
python
en
code
2
github-code
6
[ { "api_name": "langchain.embeddings.OpenAIEmbeddings", "line_number": 22, "usage_type": "call" }, { "api_name": "faiss.IndexFlatL2", "line_number": 26, "usage_type": "call" }, { "api_name": "langchain.vectorstores.FAISS", "line_number": 27, "usage_type": "call" }, { ...
31734423723
import sqlite3 class sqlcommands: def Open(self): if self.bOpen is False: self.conn = sqlite3.connect(self.db) self.curs = self.conn.cursor() self.bOpen = True return True def __init__(self, table): self.db = './~newsqlcommands.sqlt3' self.conn = None self.curs = None self.bOpen = False self.fields = None self.table_name = table def CreateTable(self): sqlCommand = f"""CREATE TABLE if not EXISTS {self.table_name} ( EntryNumber INTEGER PRIMARY KEY, TimeIn TEXT, DateIn TEXT, TimeOut TEXT, DateOut TEXT);""" self.curs.execute(sqlCommand) def NewIn(self, field): if self.bOpen: self.curs.execute(f"INSERT INTO {self.table_name} (DateIn, TimeIn) VALUES (date('now', 'localtime'), '{field}');") self.conn.commit() self.fields = field return True return False def NewOut(self, field): if self.bOpen: self.curs.execute(f"UPDATE {self.table_name} SET DateOut = date('now', 'localtime'), TimeOut = '{field}' WHERE TimeIn = '{self.fields}';") self.conn.commit() return True return False def FillMissing(self, time_out, date_out, ifempty): if self.bOpen: self.curs.execute(f"UPDATE {self.table_name} SET DateOut = '{date_out}', TimeOut = '{time_out}' WHERE TimeIn = '{ifempty}';") self.conn.commit() return True return False def GetAll(self): self.curs.execute("SELECT TimeIn, TimeOut FROM " + self.table_name + ";") ans = self.curs.fetchall() return ans def GetLast(self): self.curs.execute(f"SELECT TimeIn FROM {self.table_name} WHERE ROWID IN ( SELECT max( ROWID ) FROM {self.table_name});") nochar = "!@,(){}[]' " naked = "" self.fields = self.curs.fetchone() for each in self.fields: for char in nochar: each = str(each).replace(char,"") naked = naked + each self.fields = naked return self.fields def GetSome(self, count): self.curs.execute("SELECT * FROM " + self.table_name + " ORDER BY DateIn LIMIT 5 OFFSET %s" % count + ";") ans = self.curs.fetchall() return ans def GetPrimary(self): self.curs.execute("SELECT TimeIn FROM " + self.table_name + ";") ans = self.curs.fetchall() return ans def CountDateIn(self): self.curs.execute("SELECT COUNT(DateIn) FROM " + self.table_name + ";") ans = self.curs.fetchone() return ans def DelTable(self): if self.bOpen: self.curs.execute("DROP TABLE IF EXISTS " + self.table_name + ";") return True return False def GetOne(self, TimeIn, inout): if self.bOpen: if inout == "TimeIn": self.curs.execute(f"Select DateIn from {self.table_name} where {inout} = '{TimeIn}';") else: self.curs.execute(f"Select DateOut from {self.table_name} where {inout} = '{TimeIn}';") Lalist = self.curs.fetchone() self.curs.execute(f"Select {inout} from {self.table_name} where {inout} = '{TimeIn}';") Lelist = self.curs.fetchone() self.curs.execute(f"SELECT TimeIn FROM {self.table_name} WHERE TimeOut is null;") saved = self.curs.fetchone() return Lelist, Lalist, saved return None def DelEm(self, TimeIns): if self.bOpen: if len(TimeIns) != 0: self.curs.execute("DELETE from " + self.table_name + " where " + " or ".join(("TimeIn = " + str(n) for n in TimeIns))) self.conn.commit() return True return False def UpdateOne(self, new, time, oldday, oldtime, inout): if self.bOpen: if inout == "TimeIn": self.curs.execute(f"UPDATE {self.table_name} SET DateIn = '{new} ', {inout} = '{time}' WHERE DateIn = '{oldday}' and {inout} = '{oldtime}';") else: self.curs.execute(f"UPDATE {self.table_name} SET DateOut = '{new} ', {inout} = '{time}' WHERE DateOut = '{oldday}' and {inout} = '{oldtime}';") self.conn.commit() return True return False def DelOne(self, TimeIn): if self.bOpen: self.curs.execute("DELETE from " + self.table_name + " where TimeIn = '" + TimeIn + "';") return True return False def End(self): if self.bOpen: self.conn.commit() self.bOpen = False return True
Loondas/PythonGameTimer
GameTimer/cgi-bin/newsqlcommands.py
newsqlcommands.py
py
4,718
python
en
code
0
github-code
6
[ { "api_name": "sqlite3.connect", "line_number": 8, "usage_type": "call" } ]
34215736207
import logging import os import gzip import filetype import multiprocessing as mp import pandas as pd from moonstone.normalization.reads.read_downsize import DownsizePair logger = logging.getLogger(__name__) def pair_up(seq_files_info): paired_list = [] query = None for key in seq_files_info: if key not in paired_list: # Any one pairs should generate ONLY one loop. query = seq_files_info[key][0] # set the query to the header of the sequence file file_counter = 0 # Confirm that we find two files fwd_reads_file = None # reset the file results rev_reads_file = None for key_a in seq_files_info: # Loop through again to find the matching header if query in seq_files_info[key_a][0]: # If the headers match it is either forward or reverse reads. if seq_files_info[key_a][1] == '1': fwd_reads_file = key_a file_counter += 1 if seq_files_info[key_a][1] == '2': rev_reads_file = key_a file_counter += 1 if fwd_reads_file and rev_reads_file: logger.info('\nForward file = %s\nReverse file = %s' % (fwd_reads_file, rev_reads_file)) paired_list.append(fwd_reads_file) paired_list.append(rev_reads_file) logger.info(f'List of Paired Reads Files:\n{paired_list}') return paired_list def plot_reads(file_info_dict): logger.info('Generating plot of number of reads') # generate a dataframe from the file information dictionary # to include filename as the index files: list = [] reads: list = [] for key in file_info_dict: files.append(key) reads.append(file_info_dict[key][2]) df = pd.DataFrame(index=files, data=reads, columns=['reads']) return df class DownsizeDir: """Used to downsize all reads in a given directory to the same number of reads. Reads are downsized by random selection of raw reads generating a subset from which alpha diversity can be calculated. Note that removal of data, while useful for diversity assessment, is no longer considered good practice. https://doi.org/10.1371/journal.pcbi.1003531 """ def __init__(self, n=1000, processes=1, seed=62375, in_dir='./', out_dir=''): logger.info(f'Starting instance of {__class__.__name__} in {__name__}.') self.in_dir = in_dir self.downsize_to = n self.seed = seed if out_dir: self.out_dir = out_dir else: self.out_dir = in_dir + 'downsized/' logger.info('No output directory specified.\nCreating default: %s ' % self.out_dir) if not os.path.exists(self.out_dir): os.mkdir(self.out_dir) else: logger.info('Looks like %s exists.' % self.out_dir) if processes > mp.cpu_count(): logger.warning('Number of requested processes [%i] is greater that the number of system CPUs [%i]' % (processes, mp.cpu_count())) self.processes = mp.cpu_count() logger.info('Number of processes set to maximum number of detected CPUs [%i].' % self.processes) else: self.processes = processes logger.info('Number of processes set to %i ' % self.processes) def detect_seq_reads(self): """The provided directory might contain files that are not sequence reads. This module attempts to identify ONLY files with sequence data.""" logger.info(f'Detecting sequence files in {self.in_dir}') seq_files = [f for f in os.listdir(self.in_dir) if os.path.isfile(self.in_dir + f)] logger.info(f'List of Sequencing Files Found:\n{seq_files}') return seq_files def read_info(self, seq_file): """Gather information on the number of reads for each of the sequence reads in the given directory. Number of reads can be plotted or reported. Files names and headers are used to match pairs. Both compressed and gzipped files are accepted. Function returns a dictionary where the filename is the key and the value is a list of information: {file: [header, F/R, Number of reads, format]} # e.g. {'forward.fastq': ['@A00709:44:HYG57DSXX:2:1101:10737:1266', '1', 100257, 'Uncompressed/FASTQ']""" seq_files_info = {} detect_type = filetype.guess(self.in_dir + seq_file) if detect_type: if detect_type.mime == 'application/gzip': logger.info('Detected gzipped file for %s' % seq_file) file = gzip.open(self.in_dir + seq_file, 'r') read_num = sum(1 for _ in gzip.open(self.in_dir + seq_file)) // 4 header = file.readline().decode().split(' ')[0] file.seek(0, 0) pair = file.readline().decode().split(' ')[1][0] file.close() seq_files_info[seq_file] = [header, pair, read_num, detect_type.mime] return seq_files_info if not detect_type: logger.info('Assuming uncompressed fastq file for %s' % seq_file) file = open(self.in_dir + seq_file, 'r') read_num = sum(1 for _ in open(self.in_dir + seq_file)) // 4 header = file.readline().split(' ')[0] file.seek(0, 0) pair = file.readline().split(' ')[1][0] file.close() seq_files_info[seq_file] = [header, pair, read_num, 'Uncompressed/FASTQ'] return seq_files_info def down_dir_pair(self): files_to_downsize = self.detect_seq_reads() logging.info('Found %i files.' % len(files_to_downsize)) '''This is a quick but efficient multiprocessing implementation to handle retrieving information from files in the target directory. The Pool is created, the number of workers = the class 'processes' attribute. Results are expected as a dictionary, so the resulting 'list of dictionaries' is converted with handy dict comprehension. ''' with mp.Pool(processes=self.processes) as pool: results = pool.map(self.read_info, files_to_downsize, chunksize=1) file_info_dict = {k: v for result in results for k, v in result.items()} list_to_downsize = pair_up(file_info_dict) worker_parameters = [] for k in range(len(list_to_downsize)//2): # number of files divided by 2: one instance per pair worker_parameters.append({'raw_file_f': list_to_downsize[k * 2], 'raw_file_r': list_to_downsize[k * 2 + 1], 'read_info': file_info_dict[list_to_downsize[k * 2]], 'in_dir': self.in_dir, 'out_dir': self.out_dir, 'n': self.downsize_to }) with mp.Pool(processes=self.processes) as pool: check = pool.map(self.instantiate, worker_parameters, chunksize=1) # noqa logger.info('Done!') def instantiate(self, wp): logger.info('Instantiating with parameters: %s' % wp) instance = DownsizePair(**wp) instance.downsize_pair()
motleystate/moonstone
moonstone/normalization/reads/downsize_dir.py
downsize_dir.py
py
7,336
python
en
code
0
github-code
6
[ { "api_name": "logging.getLogger", "line_number": 9, "usage_type": "call" }, { "api_name": "pandas.DataFrame", "line_number": 50, "usage_type": "call" }, { "api_name": "os.path.exists", "line_number": 71, "usage_type": "call" }, { "api_name": "os.path", "line_...
18625152569
import os import json import requests from bs4 import BeautifulSoup from datetime import datetime as dt import pandas as pd from __common__ import user_choice def __load_json__(file): """ """ try: with open(file) as rf: data = json.load(rf) return data except: return {} def __get_links__(url, base_url): """ """ r = requests.get(url) soup = BeautifulSoup(r.text) link_dict = {} ignore = ['[To Parent Directory]', 'DUPLICATE'] for link in soup.findAll('a'): if link.text in ignore : pass else: link_dict[link.text] = base_url+link.get('href') return link_dict def __gen_file_name__(data_keys, resource_type): """ """ if resource_type == 'Archive': i = -1 else: i = -2 name = set(map(lambda x: '-'.join(x.split('_')[:i]).lower(), data_keys)) name = list(name) if len(name) > 1: print("More than one file type detected!") return None else: if resource_type == 'Archive': name = '-'.join([name[0],resource_type.lower()]) else: name = name[0] return name + '.csv' def __gen_tracker_df__(data_files, data_keys, resource_type): """ """ if resource_type == 'Archive': timestamp = pd.to_datetime(list(map(lambda x: x.split('_')[-1][:-4], data_keys))) version = resource_type else: timestamp = pd.to_datetime(list(map(lambda x: x.split('_')[-2], data_keys))) version = list(map(lambda x: 'V'+x.split('_')[-1][:-4], data_keys)) data_dict = { 'TIMESTAMP': timestamp, 'VERSION': version, 'DOWNLOADED': False, 'DOWNLOAD_DATE': pd.to_datetime('19000101'), 'URL': list(data_files.values()) } return pd.DataFrame(data_dict) def __merge_trackers__(old_df, new_df): """ Add anything from new_df that is not in old_df already """ merge_cols = ['TIMESTAMP', 'VERSION'] old_temp = old_df.copy()[merge_cols] new_temp = new_df.copy()[merge_cols] merge_df = pd.merge(old_temp, new_temp, on=merge_cols, how='outer', indicator=True) # rows no longer in CURRENT NEM reports old_rows = merge_df[merge_df["_merge"]=="left_only"] old_rows = old_rows[merge_cols].merge(old_df, on=merge_cols) # keep only old rows that have been downloaded # else they need to extracted from ARCHIVE or older records old_rows = old_rows[old_rows.DOWNLOADED] # for common rows, must keep old_df records com_rows = merge_df[merge_df["_merge"]=="both"] com_rows = com_rows[merge_cols].merge(old_df, on=merge_cols) # for new rows, use latest records new_rows = merge_df[merge_df["_merge"]=="right_only"] new_rows = new_rows[merge_cols].merge(new_df, on=merge_cols) return pd.concat([old_rows, com_rows, new_rows], ignore_index=True) class NEM_tracker: """ Class object for tracking various resources on NEM website. Can potentially be extended to other websites too. """ def __init__(self, data_dir, base_url="http://nemweb.com.au"): """ """ self.data_dir = data_dir self.base_url = base_url self.resource_path = os.path.join(data_dir, 'resources.json') self.resources = __load_json__(self.resource_path) self.selected_resource = None self.tracker_dir = os.path.join(data_dir, 'resource_trackers') if os.path.isdir(self.tracker_dir): pass else: os.makedirs(self.tracker_dir) def resources_report(self): """ """ for k, v in self.resources.items(): print('\n%s\nLast update: %s'%(k, v['last_update'])) def update_resource(self, resource): """ """ url = self.base_url + resource data_files = __get_links__(url, self.base_url) data_keys = list(data_files.keys()) resource_type = resource.split('/')[2] file_name = __gen_file_name__(data_keys, resource_type) self.resources[resource] = { 'url': url, 'type': resource_type, 'tracker_file': file_name, 'last_update': dt.now().strftime('%Y-%m-%d-%H:%M:%S') } with open(self.resource_path, 'w') as wf: json.dump(self.resources, wf) new_df = __gen_tracker_df__(data_files, data_keys, resource_type) file_path = os.path.join(self.tracker_dir, file_name) if file_name in os.listdir(self.tracker_dir): old_df = pd.read_csv(file_path, parse_dates=[ 'TIMESTAMP', 'DOWNLOAD_DATE' ]) tracker_df = __merge_trackers__(old_df, new_df) else: tracker_df = new_df tracker_df.to_csv(file_path, index=False) def add_resources(self, resources): """ For adding new resources. Resources must be specified as the relative URL path from the domain to the directory where all relevant data files are stored. E.g. /Reports/Current/Next_Day_Intermittent_DS/ """ if type(resources) is not list: resources = [resources] else: pass for resource in resources: self.update_resource(resource) def bulk_update(self): """ For updating existing resources. """ for resource in self.resources.keys(): self.update_resource(resource) def select_resource(self, resource=None): """ Select a resource for further processing. """ d = {} if resource is None: res_list = self.resources.keys() name = user_choice(res_list) else: name = resource d['name'] = name tracker_file = self.resources[name]['tracker_file'] d['tracker_path'] = os.path.join(self.tracker_dir, tracker_file) d['resource_dir'] = os.path.join(self.data_dir, tracker_file[:-4]) self.selected_resource = d
robbie-manolache/energy-market-analysis
nemtel/tracker.py
tracker.py
py
6,567
python
en
code
0
github-code
6
[ { "api_name": "json.load", "line_number": 14, "usage_type": "call" }, { "api_name": "requests.get", "line_number": 22, "usage_type": "call" }, { "api_name": "bs4.BeautifulSoup", "line_number": 23, "usage_type": "call" }, { "api_name": "pandas.to_datetime", "li...
26120509391
import os import sys import csv from collections import Counter, defaultdict import pandas as pd from statsmodels.stats.inter_rater import aggregate_raters, fleiss_kappa #from pptx import Presentation # configure Django so we can use models from the annotate app sys.path.append('/home/nejl/Dropbox/projects/tator/repo/tator') os.environ['DJANGO_SETTINGS_MODULE'] = 'tagit.settings' import django django.setup() from django.contrib.auth.models import User from annotate.models import Query, Annotation, UserResponse from templates import slide_template # TODO: need to add sampling method that samples equally from dividing the # probability mass of the distribution into thirds: top most frequent, middle, # and bottom. # TODO: fix analysis to have a global collection filter and then make sure adding # annotations to queries in a different collection does not change the results def split_data_frame_by_prob(df, column, nbins): # splits a dataframe into 'nbins' of equal probability mass # using column specified by 'coilumn' df = df.sort_values(column, ascending=False) values = df[column] bin_probability = 1/nbins total = sum(values) cutoffs = [] cumulative_total = 0 next_bin_probability = bin_probability for i, count in enumerate(values): cumulative_total += count if cumulative_total/total < next_bin_probability: continue cutoffs.append(i) next_bin_probability += bin_probability start = 0 new_dfs = [] while cutoffs: cutoff = cutoffs.pop(0) if len(cutoffs) == 0: # last item; get the rest new_dfs.append(df[start:]) else: new_dfs.append(df[start:cutoff]) start = cutoff return new_dfs def load_queries(path): with open(path) as csvfile: df = pd.read_csv(csvfile, delimiter=';') return df def clean_queries(df): """Returns the input DataFrame of queries cleaned""" # filter out queries with length less than 2 characters long df = df[df['querystring'].str.len() > 1] return df def split_num(num, splits): """Returns the number 'num' divided into a list of numbers of size 'splits' """ splits = [int(num/splits)+1]*(num%splits) + [int(num/splits)]*(splits-num%splits) assert sum(splits) == num return splits def import_queries(path, collection, sample='first', limit=None, allow_dupes=False): df = load_queries(path) df = clean_queries(df) if not allow_dupes: # remove existing queries from candidate queries to sample existing = [query.text for query in Query.objects.all()] df = df[~df['querystring'].isin(existing)] if limit is not None: if sample == 'first': df = df[:limit] elif sample == 'random': df = df.sample(limit) elif sample == 'proportional': df = df.sample(limit, weights='countqstring') elif sample == 'split': split_size = 3 splits = split_data_frame_by_prob(df, 'countqstring', split_size) sizes = split_num(limit, split_size) sub_samples = [] for size, split_df in zip(sizes, splits): sub_samples.append(split_df.sample(size, weights='countqstring')) df = pd.concat(sub_samples) assert len(df) == limit else: print('Unknown sampling method') return for i, values in enumerate(df.values.tolist()): text, count = values Query.objects.create(text=text, count=count, collection=collection) print("Added {} queries to the database.\n".format(i+1)) print(df.describe()) def pretty_print_counter(counter, reverse=False): lines = [] for key, value in sorted(counter.items(), reverse=reverse): lines.append("{}: {}".format(key, value)) return "\n".join(lines) def get_user_results(username, collection=None): # for each user, display the number of results # user lines = ["*** Annotator: {} ***".format(username)] lines.append("===================================\n") responses = UserResponse.objects.filter(user__username=username) if collection is not None: responses = responses.filter(query__collection=collection) annotations = [r for r in responses if r.annotation] skipped = [r for r in responses if r.skipped] lines.append("{} Skipped Queries:\n".format(len(skipped))) for response in skipped: line =' "{}"\n --- "{}"'.format(response.query.text, response.skipped.description) lines.append(line) lines.append("\n{} Annotations:\n".format(len(annotations))) lines.append(Annotation._meta.get_field('is_geo').verbose_name) q1 = Counter(r.annotation.is_geo for r in annotations) lines.append(pretty_print_counter(q1, reverse=True)) lines.append("") lines.append(Annotation._meta.get_field('loc_type').verbose_name) q2 = Counter(r.annotation.loc_type for r in annotations) lines.append(pretty_print_counter(q2, reverse=True)) lines.append("") lines.append(Annotation._meta.get_field('query_type').verbose_name) q3 = Counter(r.annotation.query_type for r in annotations) lines.append(pretty_print_counter(q3)) return "\n".join(lines) def do_iaa_pairs(user_pairs, questions=(1,2,3), collection=None, level='fine'): results = defaultdict(list) for question in questions: for users in user_pairs: kappa = get_iaa(question, users=users, collection=collection, level=level) results[question].append(kappa) return results def print_iaa_pairs(results, user_pairs): print(' '+' '.join(', '.join(user) for user in user_pairs)) for question, kappas in results.items(): ks = ''.join("{:0<5.3} ".format(k) for k in kappas) print("Q{}: {}".format(question, ks)) def get_iaa(question_num, queries=None, users=None, collection=None, level='fine'): data = get_annotations(question_num, queries=queries, users=users, level=level, collection=collection) #n_cat = Annotation.get_num_categories(question_num) results = aggregate_raters(data, n_cat=None) kappa = fleiss_kappa(results[0]) return kappa def get_annotations(question_num, queries=None, users=None, level='fine', collection=None): assert level in ('fine', 'coarse') queries = Query.objects.exclude(responses__skipped__isnull=False).distinct() if collection is not None: queries = queries.filter(collection=collection) if queries is not None: queries = queries.filter(pk__in=queries) data = [] for query in queries: # get all non-skipped results responses = query.responses.exclude(skipped__isnull=False) if users is not None: # restrict annotations to supplied users responses = responses.filter(user__username__in=users) results = [r.annotation.get_question(question_num) for r in responses] if question_num in (2,3) and level == 'coarse': # use course grained agreement results = [r[0] for r in results] if results: data.append(results) return data def show_agreement(question_num, users, collection=None, skip_agree=True): lines = [] queries = Query.objects.exclude(responses__skipped__isnull=False).distinct() if collection is not None: queries = queries.filter(collection=collection) queries = sorted(queries, key=lambda x:x.pk) users.sort() col_width = max(len(u) for u in users) + 2 lines.append("".join("{u:{width}}".format(u=u, width=col_width) for u in users)) agree = 0 disagree = 0 for query in queries: responses = query.responses.order_by('user__username') answers = [r.annotation.get_question(question_num) for r in responses] if skip_agree and len(set(answers)) <= 1: # all annotators agree, skip agree += 1 continue disagree += 1 line = "".join("{a:<{width}}".format(a=a, width=col_width) for a in answers) + query.text lines.append(line) start = [ "Question {}:".format(question_num), "Number all agree: {}".format(agree), "Number with some disagreement: {}".format(disagree), "" ] return "\n".join(start + lines) def get_results(users): queries = Query.objects.exclude(responses__skipped__isnull=False).distinct() queries = sorted(queries, key=lambda x:x.pk) users.sort() rest_cols = ["Q{}_{}".format(num, user) for user in users for num in (1,2,3)] header = ['id', 'query'] + rest_cols rows = [header] for query in queries: row = [query.pk, query.text] responses = query.responses.order_by('user__username') for response in responses: row.append(response.annotation.get_question(1)) row.append(response.annotation.get_question(2)) row.append(response.annotation.get_question(3)) rows.append(row) return rows def export_results_csv(users, outfile='annotations.csv'): results = get_results(users) with open(outfile, 'w', encoding='utf8', newline='') as csvfile: writer = csv.writer(csvfile, delimiter=',') writer.writerows(results) def make_slides_latex(users, csv=None, outfile='slides/slides.tex'): if csv is None: results = get_results(users) else: with open(csv, encoding='utf8') as csvfile: results = list(csv.reader(csvfile)) lines = [] header = results[0] for i, query in enumerate(results[1:]): row1 = r"Q1 & {} & {} & {}\\".format(query[2], query[5], query[8]) row2 = r"Q2 & {} & {} & {}\\".format(query[3], query[6], query[9]) row3 = r"Q3 & {} & {} & {}\\".format(query[4], query[7], query[10]) rows = "\n".join([row1, row2, row3]) title = "Query {}".format(i+1) slide = slide_template.format(title=title, query=query[1], rows=rows) lines.append(slide) with open(outfile, 'w', encoding='utf8') as texfile: texfile.write('\n'.join(lines)) def make_slides_pptx(users, csv=None): """Not finished. Used latex instead""" if csv is None: results = get_results(users) else: with open(csv, encoding='utf8') as csvfile: results = list(csv.reader(csvfile)) header = results[0] prs = Presentation() slide_layout = prs.slide_layouts[1] for i, query in enumerate(results[1:]): slide = prs.slides.add_slide(slide_layout) slide.shapes.title.text = 'Query {}'.format(i+1) body_shape = slide.shapes.placeholders[1] tf = body_shape.text_frame p = tf.paragraphs[0] p.text = query[1] p.level = 0 prs.save('test.pptx')
ned2/tator
notebooks/utils.py
utils.py
py
11,139
python
en
code
0
github-code
6
[ { "api_name": "sys.path.append", "line_number": 11, "usage_type": "call" }, { "api_name": "sys.path", "line_number": 11, "usage_type": "attribute" }, { "api_name": "os.environ", "line_number": 12, "usage_type": "attribute" }, { "api_name": "django.setup", "lin...
2694410426
import os import sys import signal import threading import multiprocessing import atexit import time from ctypes import c_bool from .module import StateIO from .config import Config class Controller: def __init__(self, config: Config) -> None: args, self.cfg = config.load() self.pidfile_path = '/tmp/controller.pids' # should be config ? # make sure self.on_exist is always called atexit.register(self.on_shutdown) self.procmem = {"StateIO": multiprocessing.Array(StateIO, 1)} self.threads = {} self.processes = {} # parse config and import modules classes self.modules_classes = {} self.args_import(args) # initialize all modules self.modules_instances = [None]*99 self.modules_init() def _module_init(self, mtype): mclass = self.modules_classes[mtype]["ModuleName"] args = self.modules_classes[mtype]["Attributes"] init_order = self.modules_classes[mtype]["RunPriority"] in_list = self.modules_classes[mtype]["InputMem"] out_list = self.modules_classes[mtype]["OutputMem"] # initialize BaseModule class module = mclass(args, self.procmem, in_list, out_list) module.INIT_NAME = mtype module.INIT_ORDER = init_order module.SHUTDOWN = multiprocessing.Value(c_bool, False) # TODO tick_delta should be here with info msg if it is here run_type = self.modules_classes[mtype]["RunType"] if run_type == 0: module.IS_MAIN = True elif run_type == 1: module.IS_THREAD = True elif run_type == 2: module.IS_PROCESS = True else: print("fatal error: incorect RunType config at:\n module: {0}".format(module.INIT_NAME)) sys.exit(1) if self.modules_instances[init_order]: _name = self.modules_instances[init_order].INIT_NAME print("fatal error: equal RunPriority config at modules:\n" "module{0}\nmodule: {1}".format(_name, module.INIT_NAME)) sys.exit(1) self.modules_instances.pop(init_order) self.modules_instances.insert(init_order, module) print(" {0} initilized".format(module.INIT_NAME)) def modules_init(self): for key in self.modules_classes: asycn_init = self.modules_classes[key]["AsyncInit"] if asycn_init: thread = threading.Thread( target=self._module_init, args=([key]) ) thread.daemon = True thread.start() else: self._module_init(key) self.modules_instances = [ inst for inst in self.modules_instances if inst is not None ] index = list(range(len(self.modules_instances))) for instance, n in zip(self.modules_instances, index): instance.INIT_ORDER = n def start(self): self.kill_processes() # FIXME make sure only one instance is running # save main pid with open(self.pidfile_path, "w") as pidfile: pid0 = str(os.getpid()) pidfile.write("%s\n" % (pid0)) print(" controller started with PID(s):", pid0) tmp = [] while not self.procmem["StateIO"][0].shutdown: # TODO test if class_import works here (hot reload), should be async call when updating instances # TODO add nice arg to processes # FIXME on_start is out of order for instance in self.modules_instances: # print("tick:",instance.INIT_NAME) shutdown = instance.SHUTDOWN.value if not instance.IS_RUNNING: if instance.IS_PROCESS and not shutdown: instance.IS_RUNNING = True proc = multiprocessing.Process(target=instance._tick) proc.name = instance.INIT_NAME self.processes[proc.name] = proc proc.start() # TODO should save here process pids elif instance.IS_THREAD and not shutdown: instance.IS_RUNNING = True thread = threading.Thread(target=instance._tick) thread.name = instance.INIT_NAME thread.daemon = True self.threads[thread.name] = thread thread.start() elif not instance.IS_MAIN and not shutdown: instance.IS_RUNNING = True instance._start() if instance.IS_MAIN and not shutdown: try: instance.on_tick() except Exception as e: # TODO error reporing format print(instance.INIT_NAME) print(e) if shutdown: tmp.append(instance.INIT_ORDER) # FIXME process is still be visible in top with 0 mem, when it is shutdown tmp.reverse() for n in tmp: name = self.modules_instances[n].INIT_NAME if self.modules_instances[n].IS_PROCESS: self.processes[name].kill() self.processes[name].join(timeout=0.001) self.processes[name].close() del self.processes[name] elif self.modules_instances[n].IS_THREAD: del self.threads[name] elif self.modules_instances[n].IS_MAIN: self.modules_instances[n]._stop() self.modules_instances.remove(self.modules_instances[n]) print(" {0} is removed".format(name)) tmp.clear() # t_instances = len(self.modules_instances) + 1 # t_threads = len(self.threads) # t_processes = len(self.processes) # t_main = t_instances - t_threads - t_processes # print(" main process instence(s): {0}\n" # " thread instance(s): {1}\n" # " subprocess instance(s): {2}\n" # " total running instances: {3}"\ # .format(t_main, # t_threads, # t_processes, # t_instances # )) time.sleep(1) def on_shutdown(self): """ Called each time application is exiting throught atexit """ # FIXME will error on joystick # TODO make sure threads are exited correctly # TODO wait for processes to exit, check for zombie processes pass def shutdown(self): self.procmem["StateIO"][0].shutdown = True def kill_processes(self): pid = None if os.path.exists(self.pidfile_path): with open(self.pidfile_path, "r") as pidfile: pids = pidfile.readlines() pidfile.close() for _pid in pids[::-1]: pid = _pid[:-1] while True: if os.path.exists("/proc/" + pid): print("Attempting to shutdown existing controller:", pid) # FIXME hcitool will hangd when sending SIGINT os.kill(int(pid), signal.SIGINT) continue break def class_import(self, mtype, arg, mname=None): # TODO check if configs class(s) is already imported in config try: print(mtype, arg, mname) class_cfg = self.cfg[mtype][mname][arg] module_name = class_cfg["ModuleName"] cls_name = mtype[:3] + ":" + arg[:3] + ":" + mname + ":" + module_name except KeyError: print("import error: unknown module class name:", arg) sys.exit(1) self.modules_classes[cls_name] = class_cfg try: if module_name != 'BaseModule': folder_name = mtype[:-1] module_path = 'modules.'+ module_name + '.' + folder_name class_name = folder_name.capitalize() module = __import__(module_path, fromlist=[class_name]) self.modules_classes[cls_name]["ModuleName"] = getattr(module, class_name) else: # used for testing module_path = 'controller' module = __import__(module_path, fromlist=['BaseModule']) self.modules_classes[cls_name]["ModuleName"] = getattr(module, 'BaseModule') except AttributeError: print("import error: failed to import class:", cls_name, 'modules.'+ module_name + '.' + mtype[:-1]) sys.exit(1) try: for m in self.modules_classes[cls_name]['InputMem']: if not m in self.procmem: module = __import__('modules.structures', fromlist=[m]) mem = getattr(module, m) self.procmem[m] = multiprocessing.Array(mem, 1) print(" shared memory {0} intialized".format(m)) except AttributeError: print("import error: failed to import input shared memory class:", m) sys.exit(1) try: for m in self.modules_classes[cls_name]['OutputMem']: if not m in self.procmem: module = __import__('modules.structures', fromlist=[m]) mem = getattr(module, m) # mem = getattr(self.imported, m) self.procmem[m] = multiprocessing.Array(mem, 1) print(" shared memory {0} intialized".format(m)) except AttributeError: print("import error: failed to import output shared memory class:", m) sys.exit(1) def _parse_module_args(self, const): if const != 'default': args = const.split(":") return args[0], args[1] else: return const, const def args_import(self, args): if len(sys.argv) == 1: # TODO add default args, when no argements provided print("default args are not implemented!") sys.exit(1) if args.stop: self.kill_processes() sys.exit(0) # reset sys args to avoid interference with other modules sys.argv = [sys.argv[0]] if args.hardware_only: self.class_import("interfaces", args.hardware_only, "hardware") return if args.display_only: interface, controller = self._parse_module_args(args.display_only) self.class_import("interfaces", interface, "display") self.class_import("controllers", controller, "display") return if args.sound_only: interface, controller = self._parse_module_args(args.sound_only) self.class_import("interfaces", interface, "speaker") self.class_import("controllers", controller, "speaker") return if not args.no_hardware: self.class_import("interfaces", 'default', "hardware") # must below hardware, to support no-hardware flag if args.actuators_only: interface, controller = self._parse_module_args(args.actuators_only) self.class_import("interfaces", interface, "actuators") self.class_import("controllers", controller, "actuators") return if not args.actuators_only: self.class_import("interfaces", 'default', "actuators") self.class_import("controllers", 'default', "actuators") if not args.no_display: self.class_import("interfaces", 'default', "display") self.class_import("controllers",'default', "display") if not args.no_sound: self.class_import("interfaces", 'default', "speaker") self.class_import("controllers", 'default', "speaker") if args.keyboard: self.class_import("interfaces", args.keyboard, "keyboard") if args.joystick: self.class_import("interfaces", args.joystick, "joystick")
bitula/minipupper-dev
controller/controller.py
controller.py
py
12,689
python
en
code
2
github-code
6
[ { "api_name": "config.Config", "line_number": 15, "usage_type": "name" }, { "api_name": "config.load", "line_number": 17, "usage_type": "call" }, { "api_name": "atexit.register", "line_number": 21, "usage_type": "call" }, { "api_name": "multiprocessing.Array", ...
9224654424
# Usage # python scripts/collect_pickle_states.py -i PICKLE_DATA_PATH import argparse import numpy as np import tqdm import structs def collect_stats(args): data = structs.load(args.input_path) sample_count = [] for key in tqdm.tqdm(data): sample_count.append(data[key]['rst'].shape[0]) values, counts = np.unique(sample_count, return_counts=True) value_counts_pairs = list(zip(values, counts)) for (value, count) in value_counts_pairs: print('{} has {} predictions with counts {}'.format(args.input_path, value, count)) def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-i', '--input-path', help='Input pickle file to read') args = parser.parse_args() collect_stats(args) if __name__ == '__main__': main()
Tsinghua-MARS-Lab/InterSim
simulator/prediction/M2I/guilded_m_pred/scripts/collect_pickle_stats.py
collect_pickle_stats.py
py
816
python
en
code
119
github-code
6
[ { "api_name": "structs.load", "line_number": 12, "usage_type": "call" }, { "api_name": "tqdm.tqdm", "line_number": 15, "usage_type": "call" }, { "api_name": "numpy.unique", "line_number": 18, "usage_type": "call" }, { "api_name": "argparse.ArgumentParser", "li...
42168409282
from typing import List class Solution: def optimalArray(self, n : int, ar : List[int]) -> List[int]: # code here res = [] half, full = 0, 0 for i in range(n): full += ar[i] if i&1: res.append(full - 2*half) else: half += ar[i//2] res.append(full - 2*half + ar[i//2]) return res; #{ # Driver Code Starts class IntArray: def __init__(self) -> None: pass def Input(self,n): arr=[int(i) for i in input().strip().split()]#array input return arr def Print(self,arr): for i in arr: print(i,end=" ") print() if __name__=="__main__": t = int(input()) for _ in range(t): n = int(input()) a=IntArray().Input(n) obj = Solution() res = obj.optimalArray(n, a) IntArray().Print(res) # } Driver Code Ends
shane-Coder/DailyCode
Optimal Array - GFG/optimal-array.py
optimal-array.py
py
1,007
python
en
code
0
github-code
6
[ { "api_name": "typing.List", "line_number": 5, "usage_type": "name" } ]
30500996966
# ---------------------------------J.A.R.V.I.S.---------------------------------- import datetime import pyttsx3 import speech_recognition as sr import __name__ import comtypes.client # ----------------------------------------------------------------------------- engine = pyttsx3.init("sapi5") voices = engine.getProperty("voices") engine.setProperty('voice', voices[0].id) def speak(audio): """This function is used to speak the text""" engine.say(audio) engine.runAndWait() def wishMe(): """This function is used to wish me """ hour = int(datetime.datetime.now().hour) if hour >= 0 and hour < 12: speak('Good Morning!') elif hour >= 12 and hour < 18: speak('Good Afternoon!') else: speak('Good Evening!') speak("I am JARVIS Sir. Please tell me how may I help you") def takeCommand(): """This function in used to take microphone input fron the user and return string""" r = sr.Recognizer() with sr.Microphone() as source: print("Listening...") r.pause_threshold = 1 audio = r.listen(source) try: print("Recognizing...") query = r.recognize_google(audio, language='en-in') print(f"User said: {query}\n") except Exception: # print(e) print("Say that again please...") return "None" return query if __name__ != '__main__': pass else: wishMe() query = takeCommand().lower()
ArcTix-09/codes
python/J.A.R.V.I.S..py
J.A.R.V.I.S..py
py
1,509
python
en
code
1
github-code
6
[ { "api_name": "pyttsx3.init", "line_number": 10, "usage_type": "call" }, { "api_name": "datetime.datetime.now", "line_number": 23, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 23, "usage_type": "attribute" }, { "api_name": "speech_reco...
25229791277
import pandas as pd from sklearn.metrics.pairwise import linear_kernel from scipy.io import mmwrite, mmread import pickle from gensim.models import Word2Vec # 데이터 가져오기 df_reviews = pd.read_csv('./naver_crawling_data/naver_cleaned_reviews.csv') Tfidf_matrix = mmread('./naver_models/Tfidf_book_review.mtx').tocsr() with open('./naver_models/tfidf.pickle', 'rb') as f: Tfidf = pickle.load(f) def getRecommendation(cosine_sim): simScore = list(enumerate(cosine_sim[-1])) simScore = sorted(simScore, key=lambda x:x[1], reverse=True) simScore = simScore[1:11] bookidx = [i[0] for i in simScore] recBookList = df_reviews.iloc[bookidx] return recBookList embedding_model = Word2Vec.load('./naver_models/Word2VecModel_naver.model') key_word = '감동' sentence = [key_word] * 11 sim_word = embedding_model.wv.most_similar(key_word, topn=10) words = [] for word, _ in sim_word: # 앞에는 단어, 뒤에는 유사도 words.append(word) print(words) for i, word in enumerate(words): sentence += [word] * (10-i) sentence = ' '.join(sentence) # print(sentence) sentence_vec = Tfidf.transform([sentence]) cosine_sim = linear_kernel(sentence_vec, Tfidf_matrix) recommendation = getRecommendation(cosine_sim) print(recommendation['titles'])
sealamen/project_03_book_curator
07_book_recommendation.py
07_book_recommendation.py
py
1,305
python
en
code
0
github-code
6
[ { "api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call" }, { "api_name": "scipy.io.mmread", "line_number": 9, "usage_type": "call" }, { "api_name": "pickle.load", "line_number": 11, "usage_type": "call" }, { "api_name": "gensim.models.Word2Vec.load...
30338122797
from collections import deque def bfs(graph, start): queue = deque([start]) #방문할 노드를 넣어두는 곳 visited = [] #방문한 노드들 while queue: v = queue.popleft() print(v, end=" ") if v not in visited: visited.append(v) queue += graph[v] return visited graph = [ [], [2,3,8], [1,7], [1,4,5], [3,5], [3,4], [7], [2,6,8], [1,7] ] print(bfs(graph, 1))
minju7346/CordingTest
bfs_test2.py
bfs_test2.py
py
472
python
en
code
0
github-code
6
[ { "api_name": "collections.deque", "line_number": 4, "usage_type": "call" } ]
74326542907
import datetime import jsonlines # Appends json to a jsonl file def append_to_jsonl(timeline, file_path): print("Writing contents to jsonl...") # Sort major events array by timestamp sorted_timeline = sorted(timeline, key=lambda event: int(event['date'])) # Pretty print JSON of human datetime for event in sorted_timeline: date = datetime.datetime.fromtimestamp(int(event['date'])/1000).strftime('%c') event['date_pretty'] = date # print(f"[{date}] {event['name']} ({len(event['citationIds'])})") with jsonlines.open(file_path, mode='a') as writer: for item in sorted_timeline: writer.write(item)
jeromechoo/gpt-for-you
helpers/write.py
write.py
py
618
python
en
code
4
github-code
6
[ { "api_name": "datetime.datetime.fromtimestamp", "line_number": 13, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 13, "usage_type": "attribute" }, { "api_name": "jsonlines.open", "line_number": 17, "usage_type": "call" } ]
37211599965
import numpy as np import torch # # np.argwhere的用法 # a=np.zeros((4,3), dtype=np.uint32) # b=np.argwhere(np.zeros((4,3), dtype=np.uint32) == 0) # print(a) # print(b) # print(type(b),b.shape) # # reshape的用法,torch和numpy都有类似的用法 # a = torch.arange(4.) # print(a.shape) # a=torch.reshape(a, (2, 2)) # print(a.shape) # b = torch.rand(4,4,2) # print(b.shape) # b=torch.reshape(b, (-1,2)) # print(b.shape) # # np.arange和np.linspace的一点比较 # print(np.arange(-6, 6, 2)) # I_r_grid_x = (np.arange(-6, 6, 2) + 1.0) / 6 # I_r_grid_y = (np.arange(-6, 6, 2) + 1.0) / 6 # print(I_r_grid_x) # print(I_r_grid_y) # I_r_grid_x=np.linspace(-1,1,6) # print(I_r_grid_x) # # np.concatenate用法 # a = np.concatenate([np.zeros((1, 3)), np.ones((1, 9))], axis=1) # print(a) # # np.fill_diagonal用法 # hat_C=5*np.ones((4,4)) # np.fill_diagonal(hat_C, 1) # print(hat_C) # 参考点生成 # xs = torch.linspace(0, 1020, steps=61) # ys = torch.linspace(0, 1020, steps=61) # x, y = torch.meshgrid(xs, ys, indexing='xy') # P = torch.dstack([x, y]) # print(P) # 归一化测试 # 只有当requires_grad=True时,讨论叶张量才有意义 batch_pt = torch.ones((4, 2, 31, 31), requires_grad=True) batch_pt_norm = batch_pt / 992 batch_pt_norm.sum().backward() print(batch_pt_norm.max()) print("OK") # 赋值测试 a,b,c,d=0,0,0,0 print(a,b,c,d)
hanquansanren/unified_doctransformer
simple_test/unit_test.py
unit_test.py
py
1,371
python
en
code
1
github-code
6
[ { "api_name": "torch.ones", "line_number": 53, "usage_type": "call" } ]
43622986430
# -*- coding: utf-8 -*- import html from gi.repository import Gtk from mcomix.preferences import config class MessageDialogRemember(Gtk.MessageDialog): __slots__ = ('__dialog_id', '__choices', '__remember_checkbox') def __init__(self): """ Creates a dialog window. """ super().__init__() self.set_modal(True) #: Unique dialog identifier (for storing 'Do not ask again') self.__dialog_id = None #: List of response IDs that should be remembered self.__choices = [] self.__remember_checkbox = Gtk.CheckButton(label='Do not ask again.') self.__remember_checkbox.set_no_show_all(True) self.__remember_checkbox.set_can_focus(False) self.get_message_area().pack_end(self.__remember_checkbox, True, True, 6) self.set_default_response(Gtk.ResponseType.OK) def set_text(self, primary: str, secondary: str = None): """ Formats the dialog's text fields. :param primary: Main text. :param secondary: Descriptive text """ self.set_markup(f'<span weight="bold" size="larger">{html.escape(primary)}</span>') if secondary: self.format_secondary_markup(html.escape(secondary)) def should_remember_choice(self): """ :returns: True when the dialog choice should be remembered """ return self.__remember_checkbox.get_active() def set_should_remember_choice(self, dialog_id: str, choices: tuple): """ This method enables the 'Do not ask again' checkbox. :param dialog_id: Unique identifier for the dialog (a string). :param choices: List of response IDs that should be remembered """ self.__remember_checkbox.show() self.__dialog_id = dialog_id self.__choices = [int(choice) for choice in choices] def run(self): """ Makes the dialog visible and waits for a result. Also destroys the dialog after the result has been returned """ if self.__dialog_id in config['STORED_DIALOG_CHOICES']: self.destroy() return config['STORED_DIALOG_CHOICES'][self.__dialog_id] self.show_all() # Prevent checkbox from grabbing focus by only enabling it after show self.__remember_checkbox.set_can_focus(True) result = super().run() if self.should_remember_choice() and int(result) in self.__choices: config['STORED_DIALOG_CHOICES'][self.__dialog_id] = int(result) self.destroy() return result
thermitegod/mcomix-lite
mcomix/message_dialog/remember.py
remember.py
py
2,610
python
en
code
2
github-code
6
[ { "api_name": "gi.repository.Gtk.MessageDialog", "line_number": 10, "usage_type": "attribute" }, { "api_name": "gi.repository.Gtk", "line_number": 10, "usage_type": "name" }, { "api_name": "gi.repository.Gtk.CheckButton", "line_number": 27, "usage_type": "call" }, { ...
8670584413
import cv2 import cvzone from cvzone.PoseModule import PoseDetector import numpy as np cap = cv2.VideoCapture(1) detector = PoseDetector() per = 0 a1 = 0 color = (0,0,255) situps = 0 dir = 0 while True: __ , img = cap.read() #assert isinstance(img, object) img = detector.findPose(img) lmlist, bbox = detector.findPosition(img,False) assert isinstance(lmlist, object) if lmlist: a1 = detector.findAngle(img,24,26,28) per = np.interp(a1,(75,160),(100,0)) bar_value = np.interp(a1,(75,160),(15,15+300)) # print(per) cv2.rectangle(img,(580,int(bar_value)),(580 + 20,15 + 350),color,cv2.FILLED) cv2.rectangle(img,(580,15),(580 + 20,15 + 350),(0,0,0),3) cvzone.putTextRect(img,f'{int(per)} %',(575,410),1.2,2,colorT=(),colorR=color,border=3,colorB=()) if per ==100: if dir == 0: situps += 0.5 dir = 1 color = (0,255,0) elif per == 0: if dir == 1: situps += 0.5 dir = 0 color = (0,255,0) else: color = (0,0,255) #print(situps) cvzone.putTextRect(img,f'SIT UPS : {str(int(situps))}',(30,30),2,2,colorT=(),colorR=(255,0,0),border=3,colorB=()) cv2.imshow('Situps Counter', img) if cv2.waitKey(1) == ord('q'): break
adirastogi235/PUSH-UP-COUNTER
main.py
main.py
py
1,392
python
en
code
0
github-code
6
[ { "api_name": "cv2.VideoCapture", "line_number": 6, "usage_type": "call" }, { "api_name": "cvzone.PoseModule.PoseDetector", "line_number": 8, "usage_type": "call" }, { "api_name": "numpy.interp", "line_number": 23, "usage_type": "call" }, { "api_name": "numpy.inte...